diff options
Diffstat (limited to 'include')
242 files changed, 5952 insertions, 1184 deletions
diff --git a/include/asm-alpha/a.out.h b/include/asm-alpha/a.out.h index d97daf42753d..e43cf61649a9 100644 --- a/include/asm-alpha/a.out.h +++ b/include/asm-alpha/a.out.h | |||
@@ -101,6 +101,8 @@ struct exec | |||
101 | #define STACK_TOP \ | 101 | #define STACK_TOP \ |
102 | (current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL) | 102 | (current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL) |
103 | 103 | ||
104 | #define STACK_TOP_MAX 0x00120000000UL | ||
105 | |||
104 | #endif | 106 | #endif |
105 | 107 | ||
106 | #endif /* __A_OUT_GNU_H__ */ | 108 | #endif /* __A_OUT_GNU_H__ */ |
diff --git a/include/asm-alpha/fb.h b/include/asm-alpha/fb.h new file mode 100644 index 000000000000..fa9bbb96b2b3 --- /dev/null +++ b/include/asm-alpha/fb.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef _ASM_FB_H_ | ||
2 | #define _ASM_FB_H_ | ||
3 | #include <linux/device.h> | ||
4 | |||
5 | /* Caching is off in the I/O space quadrant by design. */ | ||
6 | #define fb_pgprotect(...) do {} while (0) | ||
7 | |||
8 | static inline int fb_is_primary_device(struct fb_info *info) | ||
9 | { | ||
10 | return 0; | ||
11 | } | ||
12 | |||
13 | #endif /* _ASM_FB_H_ */ | ||
diff --git a/include/asm-alpha/page.h b/include/asm-alpha/page.h index d2bed3cb33ff..bae7f05716d4 100644 --- a/include/asm-alpha/page.h +++ b/include/asm-alpha/page.h | |||
@@ -17,7 +17,8 @@ | |||
17 | extern void clear_page(void *page); | 17 | extern void clear_page(void *page); |
18 | #define clear_user_page(page, vaddr, pg) clear_page(page) | 18 | #define clear_user_page(page, vaddr, pg) clear_page(page) |
19 | 19 | ||
20 | #define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vmaddr) | 20 | #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ |
21 | alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vmaddr) | ||
21 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE | 22 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE |
22 | 23 | ||
23 | extern void copy_page(void * _to, void * _from); | 24 | extern void copy_page(void * _to, void * _from); |
diff --git a/include/asm-alpha/termios.h b/include/asm-alpha/termios.h index 39e492c3bfa3..fa13716a11c3 100644 --- a/include/asm-alpha/termios.h +++ b/include/asm-alpha/termios.h | |||
@@ -81,7 +81,7 @@ struct termio { | |||
81 | 81 | ||
82 | #define user_termio_to_kernel_termios(a_termios, u_termio) \ | 82 | #define user_termio_to_kernel_termios(a_termios, u_termio) \ |
83 | ({ \ | 83 | ({ \ |
84 | struct termios *k_termios = (a_termios); \ | 84 | struct ktermios *k_termios = (a_termios); \ |
85 | struct termio k_termio; \ | 85 | struct termio k_termio; \ |
86 | int canon, ret; \ | 86 | int canon, ret; \ |
87 | \ | 87 | \ |
@@ -113,7 +113,7 @@ struct termio { | |||
113 | */ | 113 | */ |
114 | #define kernel_termios_to_user_termio(u_termio, a_termios) \ | 114 | #define kernel_termios_to_user_termio(u_termio, a_termios) \ |
115 | ({ \ | 115 | ({ \ |
116 | struct termios *k_termios = (a_termios); \ | 116 | struct ktermios *k_termios = (a_termios); \ |
117 | struct termio k_termio; \ | 117 | struct termio k_termio; \ |
118 | int canon; \ | 118 | int canon; \ |
119 | \ | 119 | \ |
diff --git a/include/asm-arm/a.out.h b/include/asm-arm/a.out.h index 3e5fe64c4394..d7165e86df25 100644 --- a/include/asm-arm/a.out.h +++ b/include/asm-arm/a.out.h | |||
@@ -30,6 +30,7 @@ struct exec | |||
30 | #ifdef __KERNEL__ | 30 | #ifdef __KERNEL__ |
31 | #define STACK_TOP ((current->personality == PER_LINUX_32BIT) ? \ | 31 | #define STACK_TOP ((current->personality == PER_LINUX_32BIT) ? \ |
32 | TASK_SIZE : TASK_SIZE_26) | 32 | TASK_SIZE : TASK_SIZE_26) |
33 | #define STACK_TOP_MAX TASK_SIZE | ||
33 | #endif | 34 | #endif |
34 | 35 | ||
35 | #ifndef LIBRARY_START_TEXT | 36 | #ifndef LIBRARY_START_TEXT |
diff --git a/include/asm-arm/fb.h b/include/asm-arm/fb.h new file mode 100644 index 000000000000..d92e99cd8c8a --- /dev/null +++ b/include/asm-arm/fb.h | |||
@@ -0,0 +1,19 @@ | |||
1 | #ifndef _ASM_FB_H_ | ||
2 | #define _ASM_FB_H_ | ||
3 | |||
4 | #include <linux/fb.h> | ||
5 | #include <linux/fs.h> | ||
6 | #include <asm/page.h> | ||
7 | |||
8 | static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, | ||
9 | unsigned long off) | ||
10 | { | ||
11 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | ||
12 | } | ||
13 | |||
14 | static inline int fb_is_primary_device(struct fb_info *info) | ||
15 | { | ||
16 | return 0; | ||
17 | } | ||
18 | |||
19 | #endif /* _ASM_FB_H_ */ | ||
diff --git a/include/asm-arm/pgtable.h b/include/asm-arm/pgtable.h index cb4c2c9d000a..d2e8171d1d4e 100644 --- a/include/asm-arm/pgtable.h +++ b/include/asm-arm/pgtable.h | |||
@@ -83,14 +83,14 @@ | |||
83 | * means that a write to a clean page will cause a permission fault, and | 83 | * means that a write to a clean page will cause a permission fault, and |
84 | * the Linux MM layer will mark the page dirty via handle_pte_fault(). | 84 | * the Linux MM layer will mark the page dirty via handle_pte_fault(). |
85 | * For the hardware to notice the permission change, the TLB entry must | 85 | * For the hardware to notice the permission change, the TLB entry must |
86 | * be flushed, and ptep_establish() does that for us. | 86 | * be flushed, and ptep_set_access_flags() does that for us. |
87 | * | 87 | * |
88 | * The "accessed" or "young" bit is emulated by a similar method; we only | 88 | * The "accessed" or "young" bit is emulated by a similar method; we only |
89 | * allow accesses to the page if the "young" bit is set. Accesses to the | 89 | * allow accesses to the page if the "young" bit is set. Accesses to the |
90 | * page will cause a fault, and handle_pte_fault() will set the young bit | 90 | * page will cause a fault, and handle_pte_fault() will set the young bit |
91 | * for us as long as the page is marked present in the corresponding Linux | 91 | * for us as long as the page is marked present in the corresponding Linux |
92 | * PTE entry. Again, ptep_establish() will ensure that the TLB is up to | 92 | * PTE entry. Again, ptep_set_access_flags() will ensure that the TLB is |
93 | * date. | 93 | * up to date. |
94 | * | 94 | * |
95 | * However, when the "young" bit is cleared, we deny access to the page | 95 | * However, when the "young" bit is cleared, we deny access to the page |
96 | * by clearing the hardware PTE. Currently Linux does not flush the TLB | 96 | * by clearing the hardware PTE. Currently Linux does not flush the TLB |
diff --git a/include/asm-arm26/a.out.h b/include/asm-arm26/a.out.h index 9b2702c42c87..7167f54ae3fc 100644 --- a/include/asm-arm26/a.out.h +++ b/include/asm-arm26/a.out.h | |||
@@ -29,6 +29,7 @@ struct exec | |||
29 | 29 | ||
30 | #ifdef __KERNEL__ | 30 | #ifdef __KERNEL__ |
31 | #define STACK_TOP TASK_SIZE | 31 | #define STACK_TOP TASK_SIZE |
32 | #define STACK_TOP_MAX STACK_TOP | ||
32 | #endif | 33 | #endif |
33 | 34 | ||
34 | #ifndef LIBRARY_START_TEXT | 35 | #ifndef LIBRARY_START_TEXT |
diff --git a/include/asm-arm26/fb.h b/include/asm-arm26/fb.h new file mode 100644 index 000000000000..c7df38030992 --- /dev/null +++ b/include/asm-arm26/fb.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef _ASM_FB_H_ | ||
2 | #define _ASM_FB_H_ | ||
3 | #include <linux/fb.h> | ||
4 | |||
5 | #define fb_pgprotect(...) do {} while (0) | ||
6 | |||
7 | static inline int fb_is_primary_device(struct fb_info *info) | ||
8 | { | ||
9 | return 0; | ||
10 | } | ||
11 | |||
12 | #endif /* _ASM_FB_H_ */ | ||
diff --git a/include/asm-avr32/a.out.h b/include/asm-avr32/a.out.h index 50bf6e31a143..9f398ab28ed0 100644 --- a/include/asm-avr32/a.out.h +++ b/include/asm-avr32/a.out.h | |||
@@ -20,6 +20,7 @@ struct exec | |||
20 | #ifdef __KERNEL__ | 20 | #ifdef __KERNEL__ |
21 | 21 | ||
22 | #define STACK_TOP TASK_SIZE | 22 | #define STACK_TOP TASK_SIZE |
23 | #define STACK_TOP_MAX STACK_TOP | ||
23 | 24 | ||
24 | #endif | 25 | #endif |
25 | 26 | ||
diff --git a/include/asm-avr32/arch-at32ap/board.h b/include/asm-avr32/arch-at32ap/board.h index 974480438849..0215965dc586 100644 --- a/include/asm-avr32/arch-at32ap/board.h +++ b/include/asm-avr32/arch-at32ap/board.h | |||
@@ -36,4 +36,18 @@ struct platform_device * | |||
36 | at32_add_device_lcdc(unsigned int id, struct atmel_lcdfb_info *data, | 36 | at32_add_device_lcdc(unsigned int id, struct atmel_lcdfb_info *data, |
37 | unsigned long fbmem_start, unsigned long fbmem_len); | 37 | unsigned long fbmem_start, unsigned long fbmem_len); |
38 | 38 | ||
39 | /* depending on what's hooked up, not all SSC pins will be used */ | ||
40 | #define ATMEL_SSC_TK 0x01 | ||
41 | #define ATMEL_SSC_TF 0x02 | ||
42 | #define ATMEL_SSC_TD 0x04 | ||
43 | #define ATMEL_SSC_TX (ATMEL_SSC_TK | ATMEL_SSC_TF | ATMEL_SSC_TD) | ||
44 | |||
45 | #define ATMEL_SSC_RK 0x10 | ||
46 | #define ATMEL_SSC_RF 0x20 | ||
47 | #define ATMEL_SSC_RD 0x40 | ||
48 | #define ATMEL_SSC_RX (ATMEL_SSC_RK | ATMEL_SSC_RF | ATMEL_SSC_RD) | ||
49 | |||
50 | struct platform_device * | ||
51 | at32_add_device_ssc(unsigned int id, unsigned int flags); | ||
52 | |||
39 | #endif /* __ASM_ARCH_BOARD_H */ | 53 | #endif /* __ASM_ARCH_BOARD_H */ |
diff --git a/include/asm-avr32/arch-at32ap/sm.h b/include/asm-avr32/arch-at32ap/sm.h deleted file mode 100644 index 265a9ead20bf..000000000000 --- a/include/asm-avr32/arch-at32ap/sm.h +++ /dev/null | |||
@@ -1,27 +0,0 @@ | |||
1 | /* | ||
2 | * AT32 System Manager interface. | ||
3 | * | ||
4 | * Copyright (C) 2006 Atmel Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #ifndef __ASM_AVR32_AT32_SM_H__ | ||
11 | #define __ASM_AVR32_AT32_SM_H__ | ||
12 | |||
13 | struct irq_chip; | ||
14 | struct platform_device; | ||
15 | |||
16 | struct at32_sm { | ||
17 | spinlock_t lock; | ||
18 | void __iomem *regs; | ||
19 | struct irq_chip *eim_chip; | ||
20 | unsigned int eim_first_irq; | ||
21 | struct platform_device *pdev; | ||
22 | }; | ||
23 | |||
24 | extern struct platform_device at32_sm_device; | ||
25 | extern struct at32_sm system_manager; | ||
26 | |||
27 | #endif /* __ASM_AVR32_AT32_SM_H__ */ | ||
diff --git a/include/asm-avr32/atomic.h b/include/asm-avr32/atomic.h index b9c2548a52f3..7ef3862a73d0 100644 --- a/include/asm-avr32/atomic.h +++ b/include/asm-avr32/atomic.h | |||
@@ -101,7 +101,7 @@ static inline int atomic_sub_unless(atomic_t *v, int a, int u) | |||
101 | " mov %1, 1\n" | 101 | " mov %1, 1\n" |
102 | "1:" | 102 | "1:" |
103 | : "=&r"(tmp), "=&r"(result), "=o"(v->counter) | 103 | : "=&r"(tmp), "=&r"(result), "=o"(v->counter) |
104 | : "m"(v->counter), "rKs21"(a), "rKs21"(u) | 104 | : "m"(v->counter), "rKs21"(a), "rKs21"(u), "1"(result) |
105 | : "cc", "memory"); | 105 | : "cc", "memory"); |
106 | 106 | ||
107 | return result; | 107 | return result; |
@@ -137,7 +137,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) | |||
137 | " mov %1, 1\n" | 137 | " mov %1, 1\n" |
138 | "1:" | 138 | "1:" |
139 | : "=&r"(tmp), "=&r"(result), "=o"(v->counter) | 139 | : "=&r"(tmp), "=&r"(result), "=o"(v->counter) |
140 | : "m"(v->counter), "r"(a), "ir"(u) | 140 | : "m"(v->counter), "r"(a), "ir"(u), "1"(result) |
141 | : "cc", "memory"); | 141 | : "cc", "memory"); |
142 | } | 142 | } |
143 | 143 | ||
diff --git a/include/asm-avr32/fb.h b/include/asm-avr32/fb.h new file mode 100644 index 000000000000..41baf84ad402 --- /dev/null +++ b/include/asm-avr32/fb.h | |||
@@ -0,0 +1,21 @@ | |||
1 | #ifndef _ASM_FB_H_ | ||
2 | #define _ASM_FB_H_ | ||
3 | |||
4 | #include <linux/fb.h> | ||
5 | #include <linux/fs.h> | ||
6 | #include <asm/page.h> | ||
7 | |||
8 | static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, | ||
9 | unsigned long off) | ||
10 | { | ||
11 | vma->vm_page_prot = __pgprot((pgprot_val(vma->vm_page_prot) | ||
12 | & ~_PAGE_CACHABLE) | ||
13 | | (_PAGE_BUFFER | _PAGE_DIRTY)); | ||
14 | } | ||
15 | |||
16 | static inline int fb_is_primary_device(struct fb_info *info) | ||
17 | { | ||
18 | return 0; | ||
19 | } | ||
20 | |||
21 | #endif /* _ASM_FB_H_ */ | ||
diff --git a/include/asm-avr32/unaligned.h b/include/asm-avr32/unaligned.h index 3042723fcbfd..36f5fd430543 100644 --- a/include/asm-avr32/unaligned.h +++ b/include/asm-avr32/unaligned.h | |||
@@ -7,19 +7,10 @@ | |||
7 | * words, but halfwords must be halfword-aligned, and doublewords must | 7 | * words, but halfwords must be halfword-aligned, and doublewords must |
8 | * be word-aligned. | 8 | * be word-aligned. |
9 | * | 9 | * |
10 | * TODO: Make all this CPU-specific and optimize. | 10 | * However, swapped word loads must be word-aligned so we can't |
11 | * optimize word loads in general. | ||
11 | */ | 12 | */ |
12 | 13 | ||
13 | #include <linux/string.h> | 14 | #include <asm-generic/unaligned.h> |
14 | |||
15 | /* Use memmove here, so gcc does not insert a __builtin_memcpy. */ | ||
16 | |||
17 | #define get_unaligned(ptr) \ | ||
18 | ({ __typeof__(*(ptr)) __tmp; memmove(&__tmp, (ptr), sizeof(*(ptr))); __tmp; }) | ||
19 | |||
20 | #define put_unaligned(val, ptr) \ | ||
21 | ({ __typeof__(*(ptr)) __tmp = (val); \ | ||
22 | memmove((ptr), &__tmp, sizeof(*(ptr))); \ | ||
23 | (void)0; }) | ||
24 | 15 | ||
25 | #endif /* __ASM_AVR32_UNALIGNED_H */ | 16 | #endif /* __ASM_AVR32_UNALIGNED_H */ |
diff --git a/include/asm-blackfin/fb.h b/include/asm-blackfin/fb.h new file mode 100644 index 000000000000..c7df38030992 --- /dev/null +++ b/include/asm-blackfin/fb.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef _ASM_FB_H_ | ||
2 | #define _ASM_FB_H_ | ||
3 | #include <linux/fb.h> | ||
4 | |||
5 | #define fb_pgprotect(...) do {} while (0) | ||
6 | |||
7 | static inline int fb_is_primary_device(struct fb_info *info) | ||
8 | { | ||
9 | return 0; | ||
10 | } | ||
11 | |||
12 | #endif /* _ASM_FB_H_ */ | ||
diff --git a/include/asm-cris/a.out.h b/include/asm-cris/a.out.h index 770734ce54a6..919b34a084f8 100644 --- a/include/asm-cris/a.out.h +++ b/include/asm-cris/a.out.h | |||
@@ -8,6 +8,7 @@ | |||
8 | 8 | ||
9 | /* grabbed from the intel stuff */ | 9 | /* grabbed from the intel stuff */ |
10 | #define STACK_TOP TASK_SIZE | 10 | #define STACK_TOP TASK_SIZE |
11 | #define STACK_TOP_MAX STACK_TOP | ||
11 | 12 | ||
12 | 13 | ||
13 | struct exec | 14 | struct exec |
diff --git a/include/asm-cris/fb.h b/include/asm-cris/fb.h new file mode 100644 index 000000000000..c7df38030992 --- /dev/null +++ b/include/asm-cris/fb.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef _ASM_FB_H_ | ||
2 | #define _ASM_FB_H_ | ||
3 | #include <linux/fb.h> | ||
4 | |||
5 | #define fb_pgprotect(...) do {} while (0) | ||
6 | |||
7 | static inline int fb_is_primary_device(struct fb_info *info) | ||
8 | { | ||
9 | return 0; | ||
10 | } | ||
11 | |||
12 | #endif /* _ASM_FB_H_ */ | ||
diff --git a/include/asm-cris/page.h b/include/asm-cris/page.h index 9f13c32552bf..0648e3153f81 100644 --- a/include/asm-cris/page.h +++ b/include/asm-cris/page.h | |||
@@ -20,7 +20,8 @@ | |||
20 | #define clear_user_page(page, vaddr, pg) clear_page(page) | 20 | #define clear_user_page(page, vaddr, pg) clear_page(page) |
21 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) | 21 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) |
22 | 22 | ||
23 | #define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr) | 23 | #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ |
24 | alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) | ||
24 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE | 25 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE |
25 | 26 | ||
26 | /* | 27 | /* |
diff --git a/include/asm-frv/fb.h b/include/asm-frv/fb.h new file mode 100644 index 000000000000..c7df38030992 --- /dev/null +++ b/include/asm-frv/fb.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef _ASM_FB_H_ | ||
2 | #define _ASM_FB_H_ | ||
3 | #include <linux/fb.h> | ||
4 | |||
5 | #define fb_pgprotect(...) do {} while (0) | ||
6 | |||
7 | static inline int fb_is_primary_device(struct fb_info *info) | ||
8 | { | ||
9 | return 0; | ||
10 | } | ||
11 | |||
12 | #endif /* _ASM_FB_H_ */ | ||
diff --git a/include/asm-frv/mem-layout.h b/include/asm-frv/mem-layout.h index a025dd4514e7..aaf2a773d9d3 100644 --- a/include/asm-frv/mem-layout.h +++ b/include/asm-frv/mem-layout.h | |||
@@ -60,6 +60,7 @@ | |||
60 | */ | 60 | */ |
61 | #define BRK_BASE __UL(2 * 1024 * 1024 + PAGE_SIZE) | 61 | #define BRK_BASE __UL(2 * 1024 * 1024 + PAGE_SIZE) |
62 | #define STACK_TOP __UL(2 * 1024 * 1024) | 62 | #define STACK_TOP __UL(2 * 1024 * 1024) |
63 | #define STACK_TOP_MAX STACK_TOP | ||
63 | 64 | ||
64 | /* userspace process size */ | 65 | /* userspace process size */ |
65 | #ifdef CONFIG_MMU | 66 | #ifdef CONFIG_MMU |
diff --git a/include/asm-frv/pgtable.h b/include/asm-frv/pgtable.h index adde69985255..147e995bec24 100644 --- a/include/asm-frv/pgtable.h +++ b/include/asm-frv/pgtable.h | |||
@@ -388,13 +388,6 @@ static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte |= _PAGE_DIRTY; return pt | |||
388 | static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte |= _PAGE_ACCESSED; return pte; } | 388 | static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte |= _PAGE_ACCESSED; return pte; } |
389 | static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte &= ~_PAGE_WP; return pte; } | 389 | static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte &= ~_PAGE_WP; return pte; } |
390 | 390 | ||
391 | static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | ||
392 | { | ||
393 | int i = test_and_clear_bit(_PAGE_BIT_DIRTY, ptep); | ||
394 | asm volatile("dcf %M0" :: "U"(*ptep)); | ||
395 | return i; | ||
396 | } | ||
397 | |||
398 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | 391 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) |
399 | { | 392 | { |
400 | int i = test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep); | 393 | int i = test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep); |
@@ -504,7 +497,6 @@ static inline int pte_file(pte_t pte) | |||
504 | remap_pfn_range(vma, vaddr, pfn, size, prot) | 497 | remap_pfn_range(vma, vaddr, pfn, size, prot) |
505 | 498 | ||
506 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | 499 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
507 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | ||
508 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | 500 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
509 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | 501 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
510 | #define __HAVE_ARCH_PTE_SAME | 502 | #define __HAVE_ARCH_PTE_SAME |
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 7f30cce52857..344e3091af24 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h | |||
@@ -28,7 +28,7 @@ struct bug_entry { | |||
28 | #endif | 28 | #endif |
29 | 29 | ||
30 | #ifndef HAVE_ARCH_BUG_ON | 30 | #ifndef HAVE_ARCH_BUG_ON |
31 | #define BUG_ON(condition) do { if (unlikely((condition)!=0)) BUG(); } while(0) | 31 | #define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0) |
32 | #endif | 32 | #endif |
33 | 33 | ||
34 | #ifndef HAVE_ARCH_WARN_ON | 34 | #ifndef HAVE_ARCH_WARN_ON |
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index d984a9041436..d85172e9ed45 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h | |||
@@ -14,6 +14,11 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; | |||
14 | #define DEFINE_PER_CPU(type, name) \ | 14 | #define DEFINE_PER_CPU(type, name) \ |
15 | __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name | 15 | __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name |
16 | 16 | ||
17 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | ||
18 | __attribute__((__section__(".data.percpu.shared_aligned"))) \ | ||
19 | __typeof__(type) per_cpu__##name \ | ||
20 | ____cacheline_aligned_in_smp | ||
21 | |||
17 | /* var is in discarded region: offset to particular copy we want */ | 22 | /* var is in discarded region: offset to particular copy we want */ |
18 | #define per_cpu(var, cpu) (*({ \ | 23 | #define per_cpu(var, cpu) (*({ \ |
19 | extern int simple_identifier_##var(void); \ | 24 | extern int simple_identifier_##var(void); \ |
@@ -34,6 +39,9 @@ do { \ | |||
34 | #define DEFINE_PER_CPU(type, name) \ | 39 | #define DEFINE_PER_CPU(type, name) \ |
35 | __typeof__(type) per_cpu__##name | 40 | __typeof__(type) per_cpu__##name |
36 | 41 | ||
42 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | ||
43 | DEFINE_PER_CPU(type, name) | ||
44 | |||
37 | #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) | 45 | #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) |
38 | #define __get_cpu_var(var) per_cpu__##var | 46 | #define __get_cpu_var(var) per_cpu__##var |
39 | #define __raw_get_cpu_var(var) per_cpu__##var | 47 | #define __raw_get_cpu_var(var) per_cpu__##var |
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 7d7bcf990e99..f605e8d0eed3 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
@@ -3,25 +3,6 @@ | |||
3 | 3 | ||
4 | #ifndef __ASSEMBLY__ | 4 | #ifndef __ASSEMBLY__ |
5 | 5 | ||
6 | #ifndef __HAVE_ARCH_PTEP_ESTABLISH | ||
7 | /* | ||
8 | * Establish a new mapping: | ||
9 | * - flush the old one | ||
10 | * - update the page tables | ||
11 | * - inform the TLB about the new one | ||
12 | * | ||
13 | * We hold the mm semaphore for reading, and the pte lock. | ||
14 | * | ||
15 | * Note: the old pte is known to not be writable, so we don't need to | ||
16 | * worry about dirty bits etc getting lost. | ||
17 | */ | ||
18 | #define ptep_establish(__vma, __address, __ptep, __entry) \ | ||
19 | do { \ | ||
20 | set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ | ||
21 | flush_tlb_page(__vma, __address); \ | ||
22 | } while (0) | ||
23 | #endif | ||
24 | |||
25 | #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | 6 | #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
26 | /* | 7 | /* |
27 | * Largely same as above, but only sets the access flags (dirty, | 8 | * Largely same as above, but only sets the access flags (dirty, |
@@ -68,31 +49,6 @@ do { \ | |||
68 | }) | 49 | }) |
69 | #endif | 50 | #endif |
70 | 51 | ||
71 | #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | ||
72 | #define ptep_test_and_clear_dirty(__vma, __address, __ptep) \ | ||
73 | ({ \ | ||
74 | pte_t __pte = *__ptep; \ | ||
75 | int r = 1; \ | ||
76 | if (!pte_dirty(__pte)) \ | ||
77 | r = 0; \ | ||
78 | else \ | ||
79 | set_pte_at((__vma)->vm_mm, (__address), (__ptep), \ | ||
80 | pte_mkclean(__pte)); \ | ||
81 | r; \ | ||
82 | }) | ||
83 | #endif | ||
84 | |||
85 | #ifndef __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH | ||
86 | #define ptep_clear_flush_dirty(__vma, __address, __ptep) \ | ||
87 | ({ \ | ||
88 | int __dirty; \ | ||
89 | __dirty = ptep_test_and_clear_dirty(__vma, __address, __ptep); \ | ||
90 | if (__dirty) \ | ||
91 | flush_tlb_page(__vma, __address); \ | ||
92 | __dirty; \ | ||
93 | }) | ||
94 | #endif | ||
95 | |||
96 | #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR | 52 | #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR |
97 | #define ptep_get_and_clear(__mm, __address, __ptep) \ | 53 | #define ptep_get_and_clear(__mm, __address, __ptep) \ |
98 | ({ \ | 54 | ({ \ |
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h index 09ec447fe2af..16a466e50681 100644 --- a/include/asm-generic/unaligned.h +++ b/include/asm-generic/unaligned.h | |||
@@ -18,7 +18,8 @@ | |||
18 | #define get_unaligned(ptr) \ | 18 | #define get_unaligned(ptr) \ |
19 | __get_unaligned((ptr), sizeof(*(ptr))) | 19 | __get_unaligned((ptr), sizeof(*(ptr))) |
20 | #define put_unaligned(x,ptr) \ | 20 | #define put_unaligned(x,ptr) \ |
21 | __put_unaligned((__u64)(x), (ptr), sizeof(*(ptr))) | 21 | ((void)sizeof(*(ptr)=(x)),\ |
22 | __put_unaligned((__force __u64)(x), (ptr), sizeof(*(ptr)))) | ||
22 | 23 | ||
23 | /* | 24 | /* |
24 | * This function doesn't actually exist. The idea is that when | 25 | * This function doesn't actually exist. The idea is that when |
@@ -95,21 +96,21 @@ static inline void __ustw(__u16 val, __u16 *addr) | |||
95 | default: \ | 96 | default: \ |
96 | bad_unaligned_access_length(); \ | 97 | bad_unaligned_access_length(); \ |
97 | }; \ | 98 | }; \ |
98 | (__typeof__(*(ptr)))val; \ | 99 | (__force __typeof__(*(ptr)))val; \ |
99 | }) | 100 | }) |
100 | 101 | ||
101 | #define __put_unaligned(val, ptr, size) \ | 102 | #define __put_unaligned(val, ptr, size) \ |
102 | do { \ | 103 | ({ \ |
103 | void *__gu_p = ptr; \ | 104 | void *__gu_p = ptr; \ |
104 | switch (size) { \ | 105 | switch (size) { \ |
105 | case 1: \ | 106 | case 1: \ |
106 | *(__u8 *)__gu_p = val; \ | 107 | *(__u8 *)__gu_p = (__force __u8)val; \ |
107 | break; \ | 108 | break; \ |
108 | case 2: \ | 109 | case 2: \ |
109 | __ustw(val, __gu_p); \ | 110 | __ustw((__force __u16)val, __gu_p); \ |
110 | break; \ | 111 | break; \ |
111 | case 4: \ | 112 | case 4: \ |
112 | __ustl(val, __gu_p); \ | 113 | __ustl((__force __u32)val, __gu_p); \ |
113 | break; \ | 114 | break; \ |
114 | case 8: \ | 115 | case 8: \ |
115 | __ustq(val, __gu_p); \ | 116 | __ustq(val, __gu_p); \ |
@@ -117,6 +118,7 @@ do { \ | |||
117 | default: \ | 118 | default: \ |
118 | bad_unaligned_access_length(); \ | 119 | bad_unaligned_access_length(); \ |
119 | }; \ | 120 | }; \ |
120 | } while(0) | 121 | (void)0; \ |
122 | }) | ||
121 | 123 | ||
122 | #endif /* _ASM_GENERIC_UNALIGNED_H */ | 124 | #endif /* _ASM_GENERIC_UNALIGNED_H */ |
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 84155eb67f1d..0240e0506a07 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -224,7 +224,11 @@ | |||
224 | } | 224 | } |
225 | 225 | ||
226 | #define NOTES \ | 226 | #define NOTES \ |
227 | .notes : { *(.note.*) } :note | 227 | .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ |
228 | VMLINUX_SYMBOL(__start_notes) = .; \ | ||
229 | *(.note.*) \ | ||
230 | VMLINUX_SYMBOL(__stop_notes) = .; \ | ||
231 | } | ||
228 | 232 | ||
229 | #define INITCALLS \ | 233 | #define INITCALLS \ |
230 | *(.initcall0.init) \ | 234 | *(.initcall0.init) \ |
@@ -245,3 +249,11 @@ | |||
245 | *(.initcall7.init) \ | 249 | *(.initcall7.init) \ |
246 | *(.initcall7s.init) | 250 | *(.initcall7s.init) |
247 | 251 | ||
252 | #define PERCPU(align) \ | ||
253 | . = ALIGN(align); \ | ||
254 | __per_cpu_start = .; \ | ||
255 | .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ | ||
256 | *(.data.percpu) \ | ||
257 | *(.data.percpu.shared_aligned) \ | ||
258 | } \ | ||
259 | __per_cpu_end = .; | ||
diff --git a/include/asm-h8300/a.out.h b/include/asm-h8300/a.out.h index 3c70939f9f00..aa5d22778235 100644 --- a/include/asm-h8300/a.out.h +++ b/include/asm-h8300/a.out.h | |||
@@ -20,6 +20,7 @@ struct exec | |||
20 | #ifdef __KERNEL__ | 20 | #ifdef __KERNEL__ |
21 | 21 | ||
22 | #define STACK_TOP TASK_SIZE | 22 | #define STACK_TOP TASK_SIZE |
23 | #define STACK_TOP_MAX STACK_TOP | ||
23 | 24 | ||
24 | #endif | 25 | #endif |
25 | 26 | ||
diff --git a/include/asm-h8300/fb.h b/include/asm-h8300/fb.h new file mode 100644 index 000000000000..c7df38030992 --- /dev/null +++ b/include/asm-h8300/fb.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef _ASM_FB_H_ | ||
2 | #define _ASM_FB_H_ | ||
3 | #include <linux/fb.h> | ||
4 | |||
5 | #define fb_pgprotect(...) do {} while (0) | ||
6 | |||
7 | static inline int fb_is_primary_device(struct fb_info *info) | ||
8 | { | ||
9 | return 0; | ||
10 | } | ||
11 | |||
12 | #endif /* _ASM_FB_H_ */ | ||
diff --git a/include/asm-h8300/page.h b/include/asm-h8300/page.h index 3b4f2903f91d..c8cc81a3aca5 100644 --- a/include/asm-h8300/page.h +++ b/include/asm-h8300/page.h | |||
@@ -22,7 +22,8 @@ | |||
22 | #define clear_user_page(page, vaddr, pg) clear_page(page) | 22 | #define clear_user_page(page, vaddr, pg) clear_page(page) |
23 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) | 23 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) |
24 | 24 | ||
25 | #define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr) | 25 | #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ |
26 | alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) | ||
26 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE | 27 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE |
27 | 28 | ||
28 | /* | 29 | /* |
diff --git a/include/asm-i386/a.out.h b/include/asm-i386/a.out.h index ab17bb8e5465..851a60f8258c 100644 --- a/include/asm-i386/a.out.h +++ b/include/asm-i386/a.out.h | |||
@@ -20,6 +20,7 @@ struct exec | |||
20 | #ifdef __KERNEL__ | 20 | #ifdef __KERNEL__ |
21 | 21 | ||
22 | #define STACK_TOP TASK_SIZE | 22 | #define STACK_TOP TASK_SIZE |
23 | #define STACK_TOP_MAX STACK_TOP | ||
23 | 24 | ||
24 | #endif | 25 | #endif |
25 | 26 | ||
diff --git a/include/asm-i386/fb.h b/include/asm-i386/fb.h new file mode 100644 index 000000000000..d1c6297d4a61 --- /dev/null +++ b/include/asm-i386/fb.h | |||
@@ -0,0 +1,17 @@ | |||
1 | #ifndef _ASM_FB_H_ | ||
2 | #define _ASM_FB_H_ | ||
3 | |||
4 | #include <linux/fb.h> | ||
5 | #include <linux/fs.h> | ||
6 | #include <asm/page.h> | ||
7 | |||
8 | extern int fb_is_primary_device(struct fb_info *info); | ||
9 | |||
10 | static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, | ||
11 | unsigned long off) | ||
12 | { | ||
13 | if (boot_cpu_data.x86 > 3) | ||
14 | pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; | ||
15 | } | ||
16 | |||
17 | #endif /* _ASM_FB_H_ */ | ||
diff --git a/include/asm-i386/irq.h b/include/asm-i386/irq.h index 9e15ce0006eb..36f310632c49 100644 --- a/include/asm-i386/irq.h +++ b/include/asm-i386/irq.h | |||
@@ -41,6 +41,7 @@ extern int irqbalance_disable(char *str); | |||
41 | extern void fixup_irqs(cpumask_t map); | 41 | extern void fixup_irqs(cpumask_t map); |
42 | #endif | 42 | #endif |
43 | 43 | ||
44 | unsigned int do_IRQ(struct pt_regs *regs); | ||
44 | void init_IRQ(void); | 45 | void init_IRQ(void); |
45 | void __init native_init_IRQ(void); | 46 | void __init native_init_IRQ(void); |
46 | 47 | ||
diff --git a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h index 8774d06689da..06f7303c30ca 100644 --- a/include/asm-i386/kprobes.h +++ b/include/asm-i386/kprobes.h | |||
@@ -42,7 +42,6 @@ typedef u8 kprobe_opcode_t; | |||
42 | ? (MAX_STACK_SIZE) \ | 42 | ? (MAX_STACK_SIZE) \ |
43 | : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) | 43 | : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) |
44 | 44 | ||
45 | #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry | ||
46 | #define ARCH_SUPPORTS_KRETPROBES | 45 | #define ARCH_SUPPORTS_KRETPROBES |
47 | #define ARCH_INACTIVE_KPROBE_COUNT 0 | 46 | #define ARCH_INACTIVE_KPROBE_COUNT 0 |
48 | #define flush_insn_slot(p) do { } while (0) | 47 | #define flush_insn_slot(p) do { } while (0) |
diff --git a/include/asm-i386/mach-default/irq_vectors_limits.h b/include/asm-i386/mach-default/irq_vectors_limits.h index 7f161e760be6..a90c7a60109f 100644 --- a/include/asm-i386/mach-default/irq_vectors_limits.h +++ b/include/asm-i386/mach-default/irq_vectors_limits.h | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifndef _ASM_IRQ_VECTORS_LIMITS_H | 1 | #ifndef _ASM_IRQ_VECTORS_LIMITS_H |
2 | #define _ASM_IRQ_VECTORS_LIMITS_H | 2 | #define _ASM_IRQ_VECTORS_LIMITS_H |
3 | 3 | ||
4 | #ifdef CONFIG_X86_IO_APIC | 4 | #if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT) |
5 | #define NR_IRQS 224 | 5 | #define NR_IRQS 224 |
6 | # if (224 >= 32 * NR_CPUS) | 6 | # if (224 >= 32 * NR_CPUS) |
7 | # define NR_IRQ_VECTORS NR_IRQS | 7 | # define NR_IRQ_VECTORS NR_IRQS |
diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h index 8198d1cca1f3..7eb0b0b1fb3c 100644 --- a/include/asm-i386/mmu_context.h +++ b/include/asm-i386/mmu_context.h | |||
@@ -32,6 +32,8 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |||
32 | #endif | 32 | #endif |
33 | } | 33 | } |
34 | 34 | ||
35 | void leave_mm(unsigned long cpu); | ||
36 | |||
35 | static inline void switch_mm(struct mm_struct *prev, | 37 | static inline void switch_mm(struct mm_struct *prev, |
36 | struct mm_struct *next, | 38 | struct mm_struct *next, |
37 | struct task_struct *tsk) | 39 | struct task_struct *tsk) |
diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h index 818ac8bf01e2..99cf5d3692a9 100644 --- a/include/asm-i386/page.h +++ b/include/asm-i386/page.h | |||
@@ -34,7 +34,8 @@ | |||
34 | #define clear_user_page(page, vaddr, pg) clear_page(page) | 34 | #define clear_user_page(page, vaddr, pg) clear_page(page) |
35 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) | 35 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) |
36 | 36 | ||
37 | #define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr) | 37 | #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ |
38 | alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) | ||
38 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE | 39 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE |
39 | 40 | ||
40 | /* | 41 | /* |
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h index 7f846a7d6bcc..7df88be2dd9e 100644 --- a/include/asm-i386/paravirt.h +++ b/include/asm-i386/paravirt.h | |||
@@ -52,6 +52,8 @@ struct paravirt_ops | |||
52 | /* Basic arch-specific setup */ | 52 | /* Basic arch-specific setup */ |
53 | void (*arch_setup)(void); | 53 | void (*arch_setup)(void); |
54 | char *(*memory_setup)(void); | 54 | char *(*memory_setup)(void); |
55 | void (*post_allocator_init)(void); | ||
56 | |||
55 | void (*init_IRQ)(void); | 57 | void (*init_IRQ)(void); |
56 | void (*time_init)(void); | 58 | void (*time_init)(void); |
57 | 59 | ||
@@ -116,7 +118,7 @@ struct paravirt_ops | |||
116 | 118 | ||
117 | u64 (*read_tsc)(void); | 119 | u64 (*read_tsc)(void); |
118 | u64 (*read_pmc)(void); | 120 | u64 (*read_pmc)(void); |
119 | u64 (*get_scheduled_cycles)(void); | 121 | unsigned long long (*sched_clock)(void); |
120 | unsigned long (*get_cpu_khz)(void); | 122 | unsigned long (*get_cpu_khz)(void); |
121 | 123 | ||
122 | /* Segment descriptor handling */ | 124 | /* Segment descriptor handling */ |
@@ -173,7 +175,7 @@ struct paravirt_ops | |||
173 | unsigned long va); | 175 | unsigned long va); |
174 | 176 | ||
175 | /* Hooks for allocating/releasing pagetable pages */ | 177 | /* Hooks for allocating/releasing pagetable pages */ |
176 | void (*alloc_pt)(u32 pfn); | 178 | void (*alloc_pt)(struct mm_struct *mm, u32 pfn); |
177 | void (*alloc_pd)(u32 pfn); | 179 | void (*alloc_pd)(u32 pfn); |
178 | void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); | 180 | void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); |
179 | void (*release_pt)(u32 pfn); | 181 | void (*release_pt)(u32 pfn); |
@@ -260,6 +262,7 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *site, unsigned len) | |||
260 | unsigned paravirt_patch_insns(void *site, unsigned len, | 262 | unsigned paravirt_patch_insns(void *site, unsigned len, |
261 | const char *start, const char *end); | 263 | const char *start, const char *end); |
262 | 264 | ||
265 | int paravirt_disable_iospace(void); | ||
263 | 266 | ||
264 | /* | 267 | /* |
265 | * This generates an indirect call based on the operation type number. | 268 | * This generates an indirect call based on the operation type number. |
@@ -563,7 +566,10 @@ static inline u64 paravirt_read_tsc(void) | |||
563 | 566 | ||
564 | #define rdtscll(val) (val = paravirt_read_tsc()) | 567 | #define rdtscll(val) (val = paravirt_read_tsc()) |
565 | 568 | ||
566 | #define get_scheduled_cycles(val) (val = paravirt_ops.get_scheduled_cycles()) | 569 | static inline unsigned long long paravirt_sched_clock(void) |
570 | { | ||
571 | return PVOP_CALL0(unsigned long long, sched_clock); | ||
572 | } | ||
567 | #define calculate_cpu_khz() (paravirt_ops.get_cpu_khz()) | 573 | #define calculate_cpu_khz() (paravirt_ops.get_cpu_khz()) |
568 | 574 | ||
569 | #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) | 575 | #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) |
@@ -669,6 +675,12 @@ static inline void setup_secondary_clock(void) | |||
669 | } | 675 | } |
670 | #endif | 676 | #endif |
671 | 677 | ||
678 | static inline void paravirt_post_allocator_init(void) | ||
679 | { | ||
680 | if (paravirt_ops.post_allocator_init) | ||
681 | (*paravirt_ops.post_allocator_init)(); | ||
682 | } | ||
683 | |||
672 | static inline void paravirt_pagetable_setup_start(pgd_t *base) | 684 | static inline void paravirt_pagetable_setup_start(pgd_t *base) |
673 | { | 685 | { |
674 | if (paravirt_ops.pagetable_setup_start) | 686 | if (paravirt_ops.pagetable_setup_start) |
@@ -725,9 +737,9 @@ static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, | |||
725 | PVOP_VCALL3(flush_tlb_others, &cpumask, mm, va); | 737 | PVOP_VCALL3(flush_tlb_others, &cpumask, mm, va); |
726 | } | 738 | } |
727 | 739 | ||
728 | static inline void paravirt_alloc_pt(unsigned pfn) | 740 | static inline void paravirt_alloc_pt(struct mm_struct *mm, unsigned pfn) |
729 | { | 741 | { |
730 | PVOP_VCALL1(alloc_pt, pfn); | 742 | PVOP_VCALL2(alloc_pt, mm, pfn); |
731 | } | 743 | } |
732 | static inline void paravirt_release_pt(unsigned pfn) | 744 | static inline void paravirt_release_pt(unsigned pfn) |
733 | { | 745 | { |
diff --git a/include/asm-i386/percpu.h b/include/asm-i386/percpu.h index f54830b5d5ac..a7ebd436f3cc 100644 --- a/include/asm-i386/percpu.h +++ b/include/asm-i386/percpu.h | |||
@@ -54,6 +54,11 @@ extern unsigned long __per_cpu_offset[]; | |||
54 | #define DEFINE_PER_CPU(type, name) \ | 54 | #define DEFINE_PER_CPU(type, name) \ |
55 | __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name | 55 | __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name |
56 | 56 | ||
57 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | ||
58 | __attribute__((__section__(".data.percpu.shared_aligned"))) \ | ||
59 | __typeof__(type) per_cpu__##name \ | ||
60 | ____cacheline_aligned_in_smp | ||
61 | |||
57 | /* We can use this directly for local CPU (faster). */ | 62 | /* We can use this directly for local CPU (faster). */ |
58 | DECLARE_PER_CPU(unsigned long, this_cpu_off); | 63 | DECLARE_PER_CPU(unsigned long, this_cpu_off); |
59 | 64 | ||
diff --git a/include/asm-i386/pgalloc.h b/include/asm-i386/pgalloc.h index d07b7afc2692..f2fc33ceb9f2 100644 --- a/include/asm-i386/pgalloc.h +++ b/include/asm-i386/pgalloc.h | |||
@@ -7,7 +7,7 @@ | |||
7 | #ifdef CONFIG_PARAVIRT | 7 | #ifdef CONFIG_PARAVIRT |
8 | #include <asm/paravirt.h> | 8 | #include <asm/paravirt.h> |
9 | #else | 9 | #else |
10 | #define paravirt_alloc_pt(pfn) do { } while (0) | 10 | #define paravirt_alloc_pt(mm, pfn) do { } while (0) |
11 | #define paravirt_alloc_pd(pfn) do { } while (0) | 11 | #define paravirt_alloc_pd(pfn) do { } while (0) |
12 | #define paravirt_alloc_pd(pfn) do { } while (0) | 12 | #define paravirt_alloc_pd(pfn) do { } while (0) |
13 | #define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) do { } while (0) | 13 | #define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) do { } while (0) |
@@ -17,13 +17,13 @@ | |||
17 | 17 | ||
18 | #define pmd_populate_kernel(mm, pmd, pte) \ | 18 | #define pmd_populate_kernel(mm, pmd, pte) \ |
19 | do { \ | 19 | do { \ |
20 | paravirt_alloc_pt(__pa(pte) >> PAGE_SHIFT); \ | 20 | paravirt_alloc_pt(mm, __pa(pte) >> PAGE_SHIFT); \ |
21 | set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))); \ | 21 | set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))); \ |
22 | } while (0) | 22 | } while (0) |
23 | 23 | ||
24 | #define pmd_populate(mm, pmd, pte) \ | 24 | #define pmd_populate(mm, pmd, pte) \ |
25 | do { \ | 25 | do { \ |
26 | paravirt_alloc_pt(page_to_pfn(pte)); \ | 26 | paravirt_alloc_pt(mm, page_to_pfn(pte)); \ |
27 | set_pmd(pmd, __pmd(_PAGE_TABLE + \ | 27 | set_pmd(pmd, __pmd(_PAGE_TABLE + \ |
28 | ((unsigned long long)page_to_pfn(pte) << \ | 28 | ((unsigned long long)page_to_pfn(pte) << \ |
29 | (unsigned long long) PAGE_SHIFT))); \ | 29 | (unsigned long long) PAGE_SHIFT))); \ |
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index 01734e05e63b..c7fefa6b12fd 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h | |||
@@ -289,17 +289,6 @@ static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) | |||
289 | __changed; \ | 289 | __changed; \ |
290 | }) | 290 | }) |
291 | 291 | ||
292 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | ||
293 | #define ptep_test_and_clear_dirty(vma, addr, ptep) ({ \ | ||
294 | int __ret = 0; \ | ||
295 | if (pte_dirty(*(ptep))) \ | ||
296 | __ret = test_and_clear_bit(_PAGE_BIT_DIRTY, \ | ||
297 | &(ptep)->pte_low); \ | ||
298 | if (__ret) \ | ||
299 | pte_update((vma)->vm_mm, addr, ptep); \ | ||
300 | __ret; \ | ||
301 | }) | ||
302 | |||
303 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | 292 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
304 | #define ptep_test_and_clear_young(vma, addr, ptep) ({ \ | 293 | #define ptep_test_and_clear_young(vma, addr, ptep) ({ \ |
305 | int __ret = 0; \ | 294 | int __ret = 0; \ |
@@ -311,27 +300,6 @@ static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) | |||
311 | __ret; \ | 300 | __ret; \ |
312 | }) | 301 | }) |
313 | 302 | ||
314 | /* | ||
315 | * Rules for using ptep_establish: the pte MUST be a user pte, and | ||
316 | * must be a present->present transition. | ||
317 | */ | ||
318 | #define __HAVE_ARCH_PTEP_ESTABLISH | ||
319 | #define ptep_establish(vma, address, ptep, pteval) \ | ||
320 | do { \ | ||
321 | set_pte_present((vma)->vm_mm, address, ptep, pteval); \ | ||
322 | flush_tlb_page(vma, address); \ | ||
323 | } while (0) | ||
324 | |||
325 | #define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH | ||
326 | #define ptep_clear_flush_dirty(vma, address, ptep) \ | ||
327 | ({ \ | ||
328 | int __dirty; \ | ||
329 | __dirty = ptep_test_and_clear_dirty((vma), (address), (ptep)); \ | ||
330 | if (__dirty) \ | ||
331 | flush_tlb_page(vma, address); \ | ||
332 | __dirty; \ | ||
333 | }) | ||
334 | |||
335 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | 303 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH |
336 | #define ptep_clear_flush_young(vma, address, ptep) \ | 304 | #define ptep_clear_flush_young(vma, address, ptep) \ |
337 | ({ \ | 305 | ({ \ |
diff --git a/include/asm-i386/setup.h b/include/asm-i386/setup.h index 0d5bff9dc4a5..7862fe858a9e 100644 --- a/include/asm-i386/setup.h +++ b/include/asm-i386/setup.h | |||
@@ -81,6 +81,10 @@ void __init add_memory_region(unsigned long long start, | |||
81 | 81 | ||
82 | extern unsigned long init_pg_tables_end; | 82 | extern unsigned long init_pg_tables_end; |
83 | 83 | ||
84 | #ifndef CONFIG_PARAVIRT | ||
85 | #define paravirt_post_allocator_init() do {} while (0) | ||
86 | #endif | ||
87 | |||
84 | #endif /* __ASSEMBLY__ */ | 88 | #endif /* __ASSEMBLY__ */ |
85 | 89 | ||
86 | #endif /* __KERNEL__ */ | 90 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h index 0c7132787062..1f73bde165b1 100644 --- a/include/asm-i386/smp.h +++ b/include/asm-i386/smp.h | |||
@@ -43,9 +43,12 @@ extern u8 x86_cpu_to_apicid[]; | |||
43 | 43 | ||
44 | #define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu] | 44 | #define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu] |
45 | 45 | ||
46 | extern void set_cpu_sibling_map(int cpu); | ||
47 | |||
46 | #ifdef CONFIG_HOTPLUG_CPU | 48 | #ifdef CONFIG_HOTPLUG_CPU |
47 | extern void cpu_exit_clear(void); | 49 | extern void cpu_exit_clear(void); |
48 | extern void cpu_uninit(void); | 50 | extern void cpu_uninit(void); |
51 | extern void remove_siblinginfo(int cpu); | ||
49 | #endif | 52 | #endif |
50 | 53 | ||
51 | struct smp_ops | 54 | struct smp_ops |
@@ -129,6 +132,8 @@ extern int __cpu_disable(void); | |||
129 | extern void __cpu_die(unsigned int cpu); | 132 | extern void __cpu_die(unsigned int cpu); |
130 | extern unsigned int num_processors; | 133 | extern unsigned int num_processors; |
131 | 134 | ||
135 | void __cpuinit smp_store_cpu_info(int id); | ||
136 | |||
132 | #endif /* !__ASSEMBLY__ */ | 137 | #endif /* !__ASSEMBLY__ */ |
133 | 138 | ||
134 | #else /* CONFIG_SMP */ | 139 | #else /* CONFIG_SMP */ |
diff --git a/include/asm-i386/timer.h b/include/asm-i386/timer.h index 153770e25faa..51a713e33a9e 100644 --- a/include/asm-i386/timer.h +++ b/include/asm-i386/timer.h | |||
@@ -15,8 +15,38 @@ extern int no_sync_cmos_clock; | |||
15 | extern int recalibrate_cpu_khz(void); | 15 | extern int recalibrate_cpu_khz(void); |
16 | 16 | ||
17 | #ifndef CONFIG_PARAVIRT | 17 | #ifndef CONFIG_PARAVIRT |
18 | #define get_scheduled_cycles(val) rdtscll(val) | ||
19 | #define calculate_cpu_khz() native_calculate_cpu_khz() | 18 | #define calculate_cpu_khz() native_calculate_cpu_khz() |
20 | #endif | 19 | #endif |
21 | 20 | ||
21 | /* Accellerators for sched_clock() | ||
22 | * convert from cycles(64bits) => nanoseconds (64bits) | ||
23 | * basic equation: | ||
24 | * ns = cycles / (freq / ns_per_sec) | ||
25 | * ns = cycles * (ns_per_sec / freq) | ||
26 | * ns = cycles * (10^9 / (cpu_khz * 10^3)) | ||
27 | * ns = cycles * (10^6 / cpu_khz) | ||
28 | * | ||
29 | * Then we use scaling math (suggested by george@mvista.com) to get: | ||
30 | * ns = cycles * (10^6 * SC / cpu_khz) / SC | ||
31 | * ns = cycles * cyc2ns_scale / SC | ||
32 | * | ||
33 | * And since SC is a constant power of two, we can convert the div | ||
34 | * into a shift. | ||
35 | * | ||
36 | * We can use khz divisor instead of mhz to keep a better percision, since | ||
37 | * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. | ||
38 | * (mathieu.desnoyers@polymtl.ca) | ||
39 | * | ||
40 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" | ||
41 | */ | ||
42 | extern unsigned long cyc2ns_scale __read_mostly; | ||
43 | |||
44 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ | ||
45 | |||
46 | static inline unsigned long long cycles_2_ns(unsigned long long cyc) | ||
47 | { | ||
48 | return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR; | ||
49 | } | ||
50 | |||
51 | |||
22 | #endif | 52 | #endif |
diff --git a/include/asm-i386/tsc.h b/include/asm-i386/tsc.h index 62c091ffcccc..a4d806610b7f 100644 --- a/include/asm-i386/tsc.h +++ b/include/asm-i386/tsc.h | |||
@@ -63,6 +63,7 @@ extern void tsc_init(void); | |||
63 | extern void mark_tsc_unstable(char *reason); | 63 | extern void mark_tsc_unstable(char *reason); |
64 | extern int unsynchronized_tsc(void); | 64 | extern int unsynchronized_tsc(void); |
65 | extern void init_tsc_clocksource(void); | 65 | extern void init_tsc_clocksource(void); |
66 | int check_tsc_unstable(void); | ||
66 | 67 | ||
67 | /* | 68 | /* |
68 | * Boot-time check whether the TSCs are synchronized across | 69 | * Boot-time check whether the TSCs are synchronized across |
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h index e84ace1ec8bf..9b15545eb9b5 100644 --- a/include/asm-i386/unistd.h +++ b/include/asm-i386/unistd.h | |||
@@ -329,10 +329,11 @@ | |||
329 | #define __NR_signalfd 321 | 329 | #define __NR_signalfd 321 |
330 | #define __NR_timerfd 322 | 330 | #define __NR_timerfd 322 |
331 | #define __NR_eventfd 323 | 331 | #define __NR_eventfd 323 |
332 | #define __NR_fallocate 324 | ||
332 | 333 | ||
333 | #ifdef __KERNEL__ | 334 | #ifdef __KERNEL__ |
334 | 335 | ||
335 | #define NR_syscalls 324 | 336 | #define NR_syscalls 325 |
336 | 337 | ||
337 | #define __ARCH_WANT_IPC_PARSE_VERSION | 338 | #define __ARCH_WANT_IPC_PARSE_VERSION |
338 | #define __ARCH_WANT_OLD_READDIR | 339 | #define __ARCH_WANT_OLD_READDIR |
diff --git a/include/asm-i386/vmi_time.h b/include/asm-i386/vmi_time.h index 213930b995cb..478188130328 100644 --- a/include/asm-i386/vmi_time.h +++ b/include/asm-i386/vmi_time.h | |||
@@ -49,7 +49,7 @@ extern struct vmi_timer_ops { | |||
49 | extern void __init vmi_time_init(void); | 49 | extern void __init vmi_time_init(void); |
50 | extern unsigned long vmi_get_wallclock(void); | 50 | extern unsigned long vmi_get_wallclock(void); |
51 | extern int vmi_set_wallclock(unsigned long now); | 51 | extern int vmi_set_wallclock(unsigned long now); |
52 | extern unsigned long long vmi_get_sched_cycles(void); | 52 | extern unsigned long long vmi_sched_clock(void); |
53 | extern unsigned long vmi_cpu_khz(void); | 53 | extern unsigned long vmi_cpu_khz(void); |
54 | 54 | ||
55 | #ifdef CONFIG_X86_LOCAL_APIC | 55 | #ifdef CONFIG_X86_LOCAL_APIC |
diff --git a/include/asm-i386/xen/hypercall.h b/include/asm-i386/xen/hypercall.h new file mode 100644 index 000000000000..bc0ee7d961ca --- /dev/null +++ b/include/asm-i386/xen/hypercall.h | |||
@@ -0,0 +1,413 @@ | |||
1 | /****************************************************************************** | ||
2 | * hypercall.h | ||
3 | * | ||
4 | * Linux-specific hypervisor handling. | ||
5 | * | ||
6 | * Copyright (c) 2002-2004, K A Fraser | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version 2 | ||
10 | * as published by the Free Software Foundation; or, when distributed | ||
11 | * separately from the Linux kernel or incorporated into other | ||
12 | * software packages, subject to the following license: | ||
13 | * | ||
14 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
15 | * of this source file (the "Software"), to deal in the Software without | ||
16 | * restriction, including without limitation the rights to use, copy, modify, | ||
17 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | ||
18 | * and to permit persons to whom the Software is furnished to do so, subject to | ||
19 | * the following conditions: | ||
20 | * | ||
21 | * The above copyright notice and this permission notice shall be included in | ||
22 | * all copies or substantial portions of the Software. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
25 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
26 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
27 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
28 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
29 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
30 | * IN THE SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #ifndef __HYPERCALL_H__ | ||
34 | #define __HYPERCALL_H__ | ||
35 | |||
36 | #include <linux/errno.h> | ||
37 | #include <linux/string.h> | ||
38 | |||
39 | #include <xen/interface/xen.h> | ||
40 | #include <xen/interface/sched.h> | ||
41 | #include <xen/interface/physdev.h> | ||
42 | |||
43 | extern struct { char _entry[32]; } hypercall_page[]; | ||
44 | |||
45 | #define _hypercall0(type, name) \ | ||
46 | ({ \ | ||
47 | long __res; \ | ||
48 | asm volatile ( \ | ||
49 | "call %[call]" \ | ||
50 | : "=a" (__res) \ | ||
51 | : [call] "m" (hypercall_page[__HYPERVISOR_##name]) \ | ||
52 | : "memory" ); \ | ||
53 | (type)__res; \ | ||
54 | }) | ||
55 | |||
56 | #define _hypercall1(type, name, a1) \ | ||
57 | ({ \ | ||
58 | long __res, __ign1; \ | ||
59 | asm volatile ( \ | ||
60 | "call %[call]" \ | ||
61 | : "=a" (__res), "=b" (__ign1) \ | ||
62 | : "1" ((long)(a1)), \ | ||
63 | [call] "m" (hypercall_page[__HYPERVISOR_##name]) \ | ||
64 | : "memory" ); \ | ||
65 | (type)__res; \ | ||
66 | }) | ||
67 | |||
68 | #define _hypercall2(type, name, a1, a2) \ | ||
69 | ({ \ | ||
70 | long __res, __ign1, __ign2; \ | ||
71 | asm volatile ( \ | ||
72 | "call %[call]" \ | ||
73 | : "=a" (__res), "=b" (__ign1), "=c" (__ign2) \ | ||
74 | : "1" ((long)(a1)), "2" ((long)(a2)), \ | ||
75 | [call] "m" (hypercall_page[__HYPERVISOR_##name]) \ | ||
76 | : "memory" ); \ | ||
77 | (type)__res; \ | ||
78 | }) | ||
79 | |||
80 | #define _hypercall3(type, name, a1, a2, a3) \ | ||
81 | ({ \ | ||
82 | long __res, __ign1, __ign2, __ign3; \ | ||
83 | asm volatile ( \ | ||
84 | "call %[call]" \ | ||
85 | : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ | ||
86 | "=d" (__ign3) \ | ||
87 | : "1" ((long)(a1)), "2" ((long)(a2)), \ | ||
88 | "3" ((long)(a3)), \ | ||
89 | [call] "m" (hypercall_page[__HYPERVISOR_##name]) \ | ||
90 | : "memory" ); \ | ||
91 | (type)__res; \ | ||
92 | }) | ||
93 | |||
94 | #define _hypercall4(type, name, a1, a2, a3, a4) \ | ||
95 | ({ \ | ||
96 | long __res, __ign1, __ign2, __ign3, __ign4; \ | ||
97 | asm volatile ( \ | ||
98 | "call %[call]" \ | ||
99 | : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ | ||
100 | "=d" (__ign3), "=S" (__ign4) \ | ||
101 | : "1" ((long)(a1)), "2" ((long)(a2)), \ | ||
102 | "3" ((long)(a3)), "4" ((long)(a4)), \ | ||
103 | [call] "m" (hypercall_page[__HYPERVISOR_##name]) \ | ||
104 | : "memory" ); \ | ||
105 | (type)__res; \ | ||
106 | }) | ||
107 | |||
108 | #define _hypercall5(type, name, a1, a2, a3, a4, a5) \ | ||
109 | ({ \ | ||
110 | long __res, __ign1, __ign2, __ign3, __ign4, __ign5; \ | ||
111 | asm volatile ( \ | ||
112 | "call %[call]" \ | ||
113 | : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ | ||
114 | "=d" (__ign3), "=S" (__ign4), "=D" (__ign5) \ | ||
115 | : "1" ((long)(a1)), "2" ((long)(a2)), \ | ||
116 | "3" ((long)(a3)), "4" ((long)(a4)), \ | ||
117 | "5" ((long)(a5)), \ | ||
118 | [call] "m" (hypercall_page[__HYPERVISOR_##name]) \ | ||
119 | : "memory" ); \ | ||
120 | (type)__res; \ | ||
121 | }) | ||
122 | |||
123 | static inline int | ||
124 | HYPERVISOR_set_trap_table(struct trap_info *table) | ||
125 | { | ||
126 | return _hypercall1(int, set_trap_table, table); | ||
127 | } | ||
128 | |||
129 | static inline int | ||
130 | HYPERVISOR_mmu_update(struct mmu_update *req, int count, | ||
131 | int *success_count, domid_t domid) | ||
132 | { | ||
133 | return _hypercall4(int, mmu_update, req, count, success_count, domid); | ||
134 | } | ||
135 | |||
136 | static inline int | ||
137 | HYPERVISOR_mmuext_op(struct mmuext_op *op, int count, | ||
138 | int *success_count, domid_t domid) | ||
139 | { | ||
140 | return _hypercall4(int, mmuext_op, op, count, success_count, domid); | ||
141 | } | ||
142 | |||
143 | static inline int | ||
144 | HYPERVISOR_set_gdt(unsigned long *frame_list, int entries) | ||
145 | { | ||
146 | return _hypercall2(int, set_gdt, frame_list, entries); | ||
147 | } | ||
148 | |||
149 | static inline int | ||
150 | HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp) | ||
151 | { | ||
152 | return _hypercall2(int, stack_switch, ss, esp); | ||
153 | } | ||
154 | |||
155 | static inline int | ||
156 | HYPERVISOR_set_callbacks(unsigned long event_selector, | ||
157 | unsigned long event_address, | ||
158 | unsigned long failsafe_selector, | ||
159 | unsigned long failsafe_address) | ||
160 | { | ||
161 | return _hypercall4(int, set_callbacks, | ||
162 | event_selector, event_address, | ||
163 | failsafe_selector, failsafe_address); | ||
164 | } | ||
165 | |||
166 | static inline int | ||
167 | HYPERVISOR_fpu_taskswitch(int set) | ||
168 | { | ||
169 | return _hypercall1(int, fpu_taskswitch, set); | ||
170 | } | ||
171 | |||
172 | static inline int | ||
173 | HYPERVISOR_sched_op(int cmd, unsigned long arg) | ||
174 | { | ||
175 | return _hypercall2(int, sched_op, cmd, arg); | ||
176 | } | ||
177 | |||
178 | static inline long | ||
179 | HYPERVISOR_set_timer_op(u64 timeout) | ||
180 | { | ||
181 | unsigned long timeout_hi = (unsigned long)(timeout>>32); | ||
182 | unsigned long timeout_lo = (unsigned long)timeout; | ||
183 | return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi); | ||
184 | } | ||
185 | |||
186 | static inline int | ||
187 | HYPERVISOR_set_debugreg(int reg, unsigned long value) | ||
188 | { | ||
189 | return _hypercall2(int, set_debugreg, reg, value); | ||
190 | } | ||
191 | |||
192 | static inline unsigned long | ||
193 | HYPERVISOR_get_debugreg(int reg) | ||
194 | { | ||
195 | return _hypercall1(unsigned long, get_debugreg, reg); | ||
196 | } | ||
197 | |||
198 | static inline int | ||
199 | HYPERVISOR_update_descriptor(u64 ma, u64 desc) | ||
200 | { | ||
201 | return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32); | ||
202 | } | ||
203 | |||
204 | static inline int | ||
205 | HYPERVISOR_memory_op(unsigned int cmd, void *arg) | ||
206 | { | ||
207 | return _hypercall2(int, memory_op, cmd, arg); | ||
208 | } | ||
209 | |||
210 | static inline int | ||
211 | HYPERVISOR_multicall(void *call_list, int nr_calls) | ||
212 | { | ||
213 | return _hypercall2(int, multicall, call_list, nr_calls); | ||
214 | } | ||
215 | |||
216 | static inline int | ||
217 | HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val, | ||
218 | unsigned long flags) | ||
219 | { | ||
220 | unsigned long pte_hi = 0; | ||
221 | #ifdef CONFIG_X86_PAE | ||
222 | pte_hi = new_val.pte_high; | ||
223 | #endif | ||
224 | return _hypercall4(int, update_va_mapping, va, | ||
225 | new_val.pte_low, pte_hi, flags); | ||
226 | } | ||
227 | |||
228 | static inline int | ||
229 | HYPERVISOR_event_channel_op(int cmd, void *arg) | ||
230 | { | ||
231 | int rc = _hypercall2(int, event_channel_op, cmd, arg); | ||
232 | if (unlikely(rc == -ENOSYS)) { | ||
233 | struct evtchn_op op; | ||
234 | op.cmd = cmd; | ||
235 | memcpy(&op.u, arg, sizeof(op.u)); | ||
236 | rc = _hypercall1(int, event_channel_op_compat, &op); | ||
237 | memcpy(arg, &op.u, sizeof(op.u)); | ||
238 | } | ||
239 | return rc; | ||
240 | } | ||
241 | |||
242 | static inline int | ||
243 | HYPERVISOR_xen_version(int cmd, void *arg) | ||
244 | { | ||
245 | return _hypercall2(int, xen_version, cmd, arg); | ||
246 | } | ||
247 | |||
248 | static inline int | ||
249 | HYPERVISOR_console_io(int cmd, int count, char *str) | ||
250 | { | ||
251 | return _hypercall3(int, console_io, cmd, count, str); | ||
252 | } | ||
253 | |||
254 | static inline int | ||
255 | HYPERVISOR_physdev_op(int cmd, void *arg) | ||
256 | { | ||
257 | int rc = _hypercall2(int, physdev_op, cmd, arg); | ||
258 | if (unlikely(rc == -ENOSYS)) { | ||
259 | struct physdev_op op; | ||
260 | op.cmd = cmd; | ||
261 | memcpy(&op.u, arg, sizeof(op.u)); | ||
262 | rc = _hypercall1(int, physdev_op_compat, &op); | ||
263 | memcpy(arg, &op.u, sizeof(op.u)); | ||
264 | } | ||
265 | return rc; | ||
266 | } | ||
267 | |||
268 | static inline int | ||
269 | HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count) | ||
270 | { | ||
271 | return _hypercall3(int, grant_table_op, cmd, uop, count); | ||
272 | } | ||
273 | |||
274 | static inline int | ||
275 | HYPERVISOR_update_va_mapping_otherdomain(unsigned long va, pte_t new_val, | ||
276 | unsigned long flags, domid_t domid) | ||
277 | { | ||
278 | unsigned long pte_hi = 0; | ||
279 | #ifdef CONFIG_X86_PAE | ||
280 | pte_hi = new_val.pte_high; | ||
281 | #endif | ||
282 | return _hypercall5(int, update_va_mapping_otherdomain, va, | ||
283 | new_val.pte_low, pte_hi, flags, domid); | ||
284 | } | ||
285 | |||
286 | static inline int | ||
287 | HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type) | ||
288 | { | ||
289 | return _hypercall2(int, vm_assist, cmd, type); | ||
290 | } | ||
291 | |||
292 | static inline int | ||
293 | HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args) | ||
294 | { | ||
295 | return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); | ||
296 | } | ||
297 | |||
298 | static inline int | ||
299 | HYPERVISOR_suspend(unsigned long srec) | ||
300 | { | ||
301 | return _hypercall3(int, sched_op, SCHEDOP_shutdown, | ||
302 | SHUTDOWN_suspend, srec); | ||
303 | } | ||
304 | |||
305 | static inline int | ||
306 | HYPERVISOR_nmi_op(unsigned long op, unsigned long arg) | ||
307 | { | ||
308 | return _hypercall2(int, nmi_op, op, arg); | ||
309 | } | ||
310 | |||
311 | static inline void | ||
312 | MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va, | ||
313 | pte_t new_val, unsigned long flags) | ||
314 | { | ||
315 | mcl->op = __HYPERVISOR_update_va_mapping; | ||
316 | mcl->args[0] = va; | ||
317 | #ifdef CONFIG_X86_PAE | ||
318 | mcl->args[1] = new_val.pte_low; | ||
319 | mcl->args[2] = new_val.pte_high; | ||
320 | #else | ||
321 | mcl->args[1] = new_val.pte_low; | ||
322 | mcl->args[2] = 0; | ||
323 | #endif | ||
324 | mcl->args[3] = flags; | ||
325 | } | ||
326 | |||
327 | static inline void | ||
328 | MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd, | ||
329 | void *uop, unsigned int count) | ||
330 | { | ||
331 | mcl->op = __HYPERVISOR_grant_table_op; | ||
332 | mcl->args[0] = cmd; | ||
333 | mcl->args[1] = (unsigned long)uop; | ||
334 | mcl->args[2] = count; | ||
335 | } | ||
336 | |||
337 | static inline void | ||
338 | MULTI_update_va_mapping_otherdomain(struct multicall_entry *mcl, unsigned long va, | ||
339 | pte_t new_val, unsigned long flags, | ||
340 | domid_t domid) | ||
341 | { | ||
342 | mcl->op = __HYPERVISOR_update_va_mapping_otherdomain; | ||
343 | mcl->args[0] = va; | ||
344 | #ifdef CONFIG_X86_PAE | ||
345 | mcl->args[1] = new_val.pte_low; | ||
346 | mcl->args[2] = new_val.pte_high; | ||
347 | #else | ||
348 | mcl->args[1] = new_val.pte_low; | ||
349 | mcl->args[2] = 0; | ||
350 | #endif | ||
351 | mcl->args[3] = flags; | ||
352 | mcl->args[4] = domid; | ||
353 | } | ||
354 | |||
355 | static inline void | ||
356 | MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr, | ||
357 | struct desc_struct desc) | ||
358 | { | ||
359 | mcl->op = __HYPERVISOR_update_descriptor; | ||
360 | mcl->args[0] = maddr; | ||
361 | mcl->args[1] = maddr >> 32; | ||
362 | mcl->args[2] = desc.a; | ||
363 | mcl->args[3] = desc.b; | ||
364 | } | ||
365 | |||
366 | static inline void | ||
367 | MULTI_memory_op(struct multicall_entry *mcl, unsigned int cmd, void *arg) | ||
368 | { | ||
369 | mcl->op = __HYPERVISOR_memory_op; | ||
370 | mcl->args[0] = cmd; | ||
371 | mcl->args[1] = (unsigned long)arg; | ||
372 | } | ||
373 | |||
374 | static inline void | ||
375 | MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req, | ||
376 | int count, int *success_count, domid_t domid) | ||
377 | { | ||
378 | mcl->op = __HYPERVISOR_mmu_update; | ||
379 | mcl->args[0] = (unsigned long)req; | ||
380 | mcl->args[1] = count; | ||
381 | mcl->args[2] = (unsigned long)success_count; | ||
382 | mcl->args[3] = domid; | ||
383 | } | ||
384 | |||
385 | static inline void | ||
386 | MULTI_mmuext_op(struct multicall_entry *mcl, struct mmuext_op *op, int count, | ||
387 | int *success_count, domid_t domid) | ||
388 | { | ||
389 | mcl->op = __HYPERVISOR_mmuext_op; | ||
390 | mcl->args[0] = (unsigned long)op; | ||
391 | mcl->args[1] = count; | ||
392 | mcl->args[2] = (unsigned long)success_count; | ||
393 | mcl->args[3] = domid; | ||
394 | } | ||
395 | |||
396 | static inline void | ||
397 | MULTI_set_gdt(struct multicall_entry *mcl, unsigned long *frames, int entries) | ||
398 | { | ||
399 | mcl->op = __HYPERVISOR_set_gdt; | ||
400 | mcl->args[0] = (unsigned long)frames; | ||
401 | mcl->args[1] = entries; | ||
402 | } | ||
403 | |||
404 | static inline void | ||
405 | MULTI_stack_switch(struct multicall_entry *mcl, | ||
406 | unsigned long ss, unsigned long esp) | ||
407 | { | ||
408 | mcl->op = __HYPERVISOR_stack_switch; | ||
409 | mcl->args[0] = ss; | ||
410 | mcl->args[1] = esp; | ||
411 | } | ||
412 | |||
413 | #endif /* __HYPERCALL_H__ */ | ||
diff --git a/include/asm-i386/xen/hypervisor.h b/include/asm-i386/xen/hypervisor.h new file mode 100644 index 000000000000..8e15dd28c91f --- /dev/null +++ b/include/asm-i386/xen/hypervisor.h | |||
@@ -0,0 +1,73 @@ | |||
1 | /****************************************************************************** | ||
2 | * hypervisor.h | ||
3 | * | ||
4 | * Linux-specific hypervisor handling. | ||
5 | * | ||
6 | * Copyright (c) 2002-2004, K A Fraser | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version 2 | ||
10 | * as published by the Free Software Foundation; or, when distributed | ||
11 | * separately from the Linux kernel or incorporated into other | ||
12 | * software packages, subject to the following license: | ||
13 | * | ||
14 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
15 | * of this source file (the "Software"), to deal in the Software without | ||
16 | * restriction, including without limitation the rights to use, copy, modify, | ||
17 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | ||
18 | * and to permit persons to whom the Software is furnished to do so, subject to | ||
19 | * the following conditions: | ||
20 | * | ||
21 | * The above copyright notice and this permission notice shall be included in | ||
22 | * all copies or substantial portions of the Software. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
25 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
26 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
27 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
28 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
29 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
30 | * IN THE SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #ifndef __HYPERVISOR_H__ | ||
34 | #define __HYPERVISOR_H__ | ||
35 | |||
36 | #include <linux/types.h> | ||
37 | #include <linux/kernel.h> | ||
38 | #include <linux/version.h> | ||
39 | |||
40 | #include <xen/interface/xen.h> | ||
41 | #include <xen/interface/version.h> | ||
42 | |||
43 | #include <asm/ptrace.h> | ||
44 | #include <asm/page.h> | ||
45 | #include <asm/desc.h> | ||
46 | #if defined(__i386__) | ||
47 | # ifdef CONFIG_X86_PAE | ||
48 | # include <asm-generic/pgtable-nopud.h> | ||
49 | # else | ||
50 | # include <asm-generic/pgtable-nopmd.h> | ||
51 | # endif | ||
52 | #endif | ||
53 | #include <asm/xen/hypercall.h> | ||
54 | |||
55 | /* arch/i386/kernel/setup.c */ | ||
56 | extern struct shared_info *HYPERVISOR_shared_info; | ||
57 | extern struct start_info *xen_start_info; | ||
58 | #define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN) | ||
59 | |||
60 | /* arch/i386/mach-xen/evtchn.c */ | ||
61 | /* Force a proper event-channel callback from Xen. */ | ||
62 | extern void force_evtchn_callback(void); | ||
63 | |||
64 | /* Turn jiffies into Xen system time. */ | ||
65 | u64 jiffies_to_st(unsigned long jiffies); | ||
66 | |||
67 | |||
68 | #define MULTI_UVMFLAGS_INDEX 3 | ||
69 | #define MULTI_UVMDOMID_INDEX 4 | ||
70 | |||
71 | #define is_running_on_xen() (xen_start_info ? 1 : 0) | ||
72 | |||
73 | #endif /* __HYPERVISOR_H__ */ | ||
diff --git a/include/asm-i386/xen/interface.h b/include/asm-i386/xen/interface.h new file mode 100644 index 000000000000..165c3968e138 --- /dev/null +++ b/include/asm-i386/xen/interface.h | |||
@@ -0,0 +1,188 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch-x86_32.h | ||
3 | * | ||
4 | * Guest OS interface to x86 32-bit Xen. | ||
5 | * | ||
6 | * Copyright (c) 2004, K A Fraser | ||
7 | */ | ||
8 | |||
9 | #ifndef __XEN_PUBLIC_ARCH_X86_32_H__ | ||
10 | #define __XEN_PUBLIC_ARCH_X86_32_H__ | ||
11 | |||
12 | #ifdef __XEN__ | ||
13 | #define __DEFINE_GUEST_HANDLE(name, type) \ | ||
14 | typedef struct { type *p; } __guest_handle_ ## name | ||
15 | #else | ||
16 | #define __DEFINE_GUEST_HANDLE(name, type) \ | ||
17 | typedef type * __guest_handle_ ## name | ||
18 | #endif | ||
19 | |||
20 | #define DEFINE_GUEST_HANDLE_STRUCT(name) \ | ||
21 | __DEFINE_GUEST_HANDLE(name, struct name) | ||
22 | #define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name) | ||
23 | #define GUEST_HANDLE(name) __guest_handle_ ## name | ||
24 | |||
25 | #ifndef __ASSEMBLY__ | ||
26 | /* Guest handles for primitive C types. */ | ||
27 | __DEFINE_GUEST_HANDLE(uchar, unsigned char); | ||
28 | __DEFINE_GUEST_HANDLE(uint, unsigned int); | ||
29 | __DEFINE_GUEST_HANDLE(ulong, unsigned long); | ||
30 | DEFINE_GUEST_HANDLE(char); | ||
31 | DEFINE_GUEST_HANDLE(int); | ||
32 | DEFINE_GUEST_HANDLE(long); | ||
33 | DEFINE_GUEST_HANDLE(void); | ||
34 | #endif | ||
35 | |||
36 | /* | ||
37 | * SEGMENT DESCRIPTOR TABLES | ||
38 | */ | ||
39 | /* | ||
40 | * A number of GDT entries are reserved by Xen. These are not situated at the | ||
41 | * start of the GDT because some stupid OSes export hard-coded selector values | ||
42 | * in their ABI. These hard-coded values are always near the start of the GDT, | ||
43 | * so Xen places itself out of the way, at the far end of the GDT. | ||
44 | */ | ||
45 | #define FIRST_RESERVED_GDT_PAGE 14 | ||
46 | #define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096) | ||
47 | #define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8) | ||
48 | |||
49 | /* | ||
50 | * These flat segments are in the Xen-private section of every GDT. Since these | ||
51 | * are also present in the initial GDT, many OSes will be able to avoid | ||
52 | * installing their own GDT. | ||
53 | */ | ||
54 | #define FLAT_RING1_CS 0xe019 /* GDT index 259 */ | ||
55 | #define FLAT_RING1_DS 0xe021 /* GDT index 260 */ | ||
56 | #define FLAT_RING1_SS 0xe021 /* GDT index 260 */ | ||
57 | #define FLAT_RING3_CS 0xe02b /* GDT index 261 */ | ||
58 | #define FLAT_RING3_DS 0xe033 /* GDT index 262 */ | ||
59 | #define FLAT_RING3_SS 0xe033 /* GDT index 262 */ | ||
60 | |||
61 | #define FLAT_KERNEL_CS FLAT_RING1_CS | ||
62 | #define FLAT_KERNEL_DS FLAT_RING1_DS | ||
63 | #define FLAT_KERNEL_SS FLAT_RING1_SS | ||
64 | #define FLAT_USER_CS FLAT_RING3_CS | ||
65 | #define FLAT_USER_DS FLAT_RING3_DS | ||
66 | #define FLAT_USER_SS FLAT_RING3_SS | ||
67 | |||
68 | /* And the trap vector is... */ | ||
69 | #define TRAP_INSTR "int $0x82" | ||
70 | |||
71 | /* | ||
72 | * Virtual addresses beyond this are not modifiable by guest OSes. The | ||
73 | * machine->physical mapping table starts at this address, read-only. | ||
74 | */ | ||
75 | #ifdef CONFIG_X86_PAE | ||
76 | #define __HYPERVISOR_VIRT_START 0xF5800000 | ||
77 | #else | ||
78 | #define __HYPERVISOR_VIRT_START 0xFC000000 | ||
79 | #endif | ||
80 | |||
81 | #ifndef HYPERVISOR_VIRT_START | ||
82 | #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) | ||
83 | #endif | ||
84 | |||
85 | #ifndef machine_to_phys_mapping | ||
86 | #define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START) | ||
87 | #endif | ||
88 | |||
89 | /* Maximum number of virtual CPUs in multi-processor guests. */ | ||
90 | #define MAX_VIRT_CPUS 32 | ||
91 | |||
92 | #ifndef __ASSEMBLY__ | ||
93 | |||
94 | /* | ||
95 | * Send an array of these to HYPERVISOR_set_trap_table() | ||
96 | */ | ||
97 | #define TI_GET_DPL(_ti) ((_ti)->flags & 3) | ||
98 | #define TI_GET_IF(_ti) ((_ti)->flags & 4) | ||
99 | #define TI_SET_DPL(_ti, _dpl) ((_ti)->flags |= (_dpl)) | ||
100 | #define TI_SET_IF(_ti, _if) ((_ti)->flags |= ((!!(_if))<<2)) | ||
101 | |||
102 | struct trap_info { | ||
103 | uint8_t vector; /* exception vector */ | ||
104 | uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */ | ||
105 | uint16_t cs; /* code selector */ | ||
106 | unsigned long address; /* code offset */ | ||
107 | }; | ||
108 | DEFINE_GUEST_HANDLE_STRUCT(trap_info); | ||
109 | |||
110 | struct cpu_user_regs { | ||
111 | uint32_t ebx; | ||
112 | uint32_t ecx; | ||
113 | uint32_t edx; | ||
114 | uint32_t esi; | ||
115 | uint32_t edi; | ||
116 | uint32_t ebp; | ||
117 | uint32_t eax; | ||
118 | uint16_t error_code; /* private */ | ||
119 | uint16_t entry_vector; /* private */ | ||
120 | uint32_t eip; | ||
121 | uint16_t cs; | ||
122 | uint8_t saved_upcall_mask; | ||
123 | uint8_t _pad0; | ||
124 | uint32_t eflags; /* eflags.IF == !saved_upcall_mask */ | ||
125 | uint32_t esp; | ||
126 | uint16_t ss, _pad1; | ||
127 | uint16_t es, _pad2; | ||
128 | uint16_t ds, _pad3; | ||
129 | uint16_t fs, _pad4; | ||
130 | uint16_t gs, _pad5; | ||
131 | }; | ||
132 | DEFINE_GUEST_HANDLE_STRUCT(cpu_user_regs); | ||
133 | |||
134 | typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ | ||
135 | |||
136 | /* | ||
137 | * The following is all CPU context. Note that the fpu_ctxt block is filled | ||
138 | * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used. | ||
139 | */ | ||
140 | struct vcpu_guest_context { | ||
141 | /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */ | ||
142 | struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */ | ||
143 | #define VGCF_I387_VALID (1<<0) | ||
144 | #define VGCF_HVM_GUEST (1<<1) | ||
145 | #define VGCF_IN_KERNEL (1<<2) | ||
146 | unsigned long flags; /* VGCF_* flags */ | ||
147 | struct cpu_user_regs user_regs; /* User-level CPU registers */ | ||
148 | struct trap_info trap_ctxt[256]; /* Virtual IDT */ | ||
149 | unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */ | ||
150 | unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */ | ||
151 | unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */ | ||
152 | unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */ | ||
153 | unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */ | ||
154 | unsigned long event_callback_cs; /* CS:EIP of event callback */ | ||
155 | unsigned long event_callback_eip; | ||
156 | unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */ | ||
157 | unsigned long failsafe_callback_eip; | ||
158 | unsigned long vm_assist; /* VMASST_TYPE_* bitmap */ | ||
159 | }; | ||
160 | DEFINE_GUEST_HANDLE_STRUCT(vcpu_guest_context); | ||
161 | |||
162 | struct arch_shared_info { | ||
163 | unsigned long max_pfn; /* max pfn that appears in table */ | ||
164 | /* Frame containing list of mfns containing list of mfns containing p2m. */ | ||
165 | unsigned long pfn_to_mfn_frame_list_list; | ||
166 | unsigned long nmi_reason; | ||
167 | }; | ||
168 | |||
169 | struct arch_vcpu_info { | ||
170 | unsigned long cr2; | ||
171 | unsigned long pad[5]; /* sizeof(struct vcpu_info) == 64 */ | ||
172 | }; | ||
173 | |||
174 | #endif /* !__ASSEMBLY__ */ | ||
175 | |||
176 | /* | ||
177 | * Prefix forces emulation of some non-trapping instructions. | ||
178 | * Currently only CPUID. | ||
179 | */ | ||
180 | #ifdef __ASSEMBLY__ | ||
181 | #define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ; | ||
182 | #define XEN_CPUID XEN_EMULATE_PREFIX cpuid | ||
183 | #else | ||
184 | #define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; " | ||
185 | #define XEN_CPUID XEN_EMULATE_PREFIX "cpuid" | ||
186 | #endif | ||
187 | |||
188 | #endif | ||
diff --git a/include/asm-ia64/fb.h b/include/asm-ia64/fb.h new file mode 100644 index 000000000000..89a397cee90a --- /dev/null +++ b/include/asm-ia64/fb.h | |||
@@ -0,0 +1,23 @@ | |||
1 | #ifndef _ASM_FB_H_ | ||
2 | #define _ASM_FB_H_ | ||
3 | |||
4 | #include <linux/fb.h> | ||
5 | #include <linux/fs.h> | ||
6 | #include <linux/efi.h> | ||
7 | #include <asm/page.h> | ||
8 | |||
9 | static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, | ||
10 | unsigned long off) | ||
11 | { | ||
12 | if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start)) | ||
13 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | ||
14 | else | ||
15 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
16 | } | ||
17 | |||
18 | static inline int fb_is_primary_device(struct fb_info *info) | ||
19 | { | ||
20 | return 0; | ||
21 | } | ||
22 | |||
23 | #endif /* _ASM_FB_H_ */ | ||
diff --git a/include/asm-ia64/ioctls.h b/include/asm-ia64/ioctls.h index 31ee521aeb7a..f41b636a0bf6 100644 --- a/include/asm-ia64/ioctls.h +++ b/include/asm-ia64/ioctls.h | |||
@@ -53,6 +53,10 @@ | |||
53 | #define TIOCSBRK 0x5427 /* BSD compatibility */ | 53 | #define TIOCSBRK 0x5427 /* BSD compatibility */ |
54 | #define TIOCCBRK 0x5428 /* BSD compatibility */ | 54 | #define TIOCCBRK 0x5428 /* BSD compatibility */ |
55 | #define TIOCGSID 0x5429 /* Return the session ID of FD */ | 55 | #define TIOCGSID 0x5429 /* Return the session ID of FD */ |
56 | #define TCGETS2 _IOR('T',0x2A, struct termios2) | ||
57 | #define TCSETS2 _IOW('T',0x2B, struct termios2) | ||
58 | #define TCSETSW2 _IOW('T',0x2C, struct termios2) | ||
59 | #define TCSETSF2 _IOW('T',0x2D, struct termios2) | ||
56 | #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ | 60 | #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ |
57 | #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ | 61 | #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ |
58 | 62 | ||
diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h index 6382e52ec227..067d9dea68f9 100644 --- a/include/asm-ia64/kprobes.h +++ b/include/asm-ia64/kprobes.h | |||
@@ -82,8 +82,6 @@ struct kprobe_ctlblk { | |||
82 | struct prev_kprobe prev_kprobe[ARCH_PREV_KPROBE_SZ]; | 82 | struct prev_kprobe prev_kprobe[ARCH_PREV_KPROBE_SZ]; |
83 | }; | 83 | }; |
84 | 84 | ||
85 | #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry | ||
86 | |||
87 | #define ARCH_SUPPORTS_KRETPROBES | 85 | #define ARCH_SUPPORTS_KRETPROBES |
88 | #define ARCH_INACTIVE_KPROBE_COUNT 1 | 86 | #define ARCH_INACTIVE_KPROBE_COUNT 1 |
89 | 87 | ||
diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h index 485759ba9e36..d6345464a2b3 100644 --- a/include/asm-ia64/page.h +++ b/include/asm-ia64/page.h | |||
@@ -87,12 +87,13 @@ do { \ | |||
87 | } while (0) | 87 | } while (0) |
88 | 88 | ||
89 | 89 | ||
90 | #define alloc_zeroed_user_highpage(vma, vaddr) \ | 90 | #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ |
91 | ({ \ | 91 | ({ \ |
92 | struct page *page = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr); \ | 92 | struct page *page = alloc_page_vma( \ |
93 | if (page) \ | 93 | GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr); \ |
94 | flush_dcache_page(page); \ | 94 | if (page) \ |
95 | page; \ | 95 | flush_dcache_page(page); \ |
96 | page; \ | ||
96 | }) | 97 | }) |
97 | 98 | ||
98 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE | 99 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE |
diff --git a/include/asm-ia64/percpu.h b/include/asm-ia64/percpu.h index fbe5cf3ab8dc..43a7aac414e0 100644 --- a/include/asm-ia64/percpu.h +++ b/include/asm-ia64/percpu.h | |||
@@ -29,6 +29,16 @@ | |||
29 | __attribute__((__section__(".data.percpu"))) \ | 29 | __attribute__((__section__(".data.percpu"))) \ |
30 | __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name | 30 | __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name |
31 | 31 | ||
32 | #ifdef CONFIG_SMP | ||
33 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | ||
34 | __attribute__((__section__(".data.percpu.shared_aligned"))) \ | ||
35 | __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name \ | ||
36 | ____cacheline_aligned_in_smp | ||
37 | #else | ||
38 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | ||
39 | DEFINE_PER_CPU(type, name) | ||
40 | #endif | ||
41 | |||
32 | /* | 42 | /* |
33 | * Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an | 43 | * Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an |
34 | * external routine, to avoid include-hell. | 44 | * external routine, to avoid include-hell. |
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h index f923d811c421..de6d01e24dd0 100644 --- a/include/asm-ia64/pgtable.h +++ b/include/asm-ia64/pgtable.h | |||
@@ -395,22 +395,6 @@ ptep_test_and_clear_young (struct vm_area_struct *vma, unsigned long addr, pte_t | |||
395 | #endif | 395 | #endif |
396 | } | 396 | } |
397 | 397 | ||
398 | static inline int | ||
399 | ptep_test_and_clear_dirty (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | ||
400 | { | ||
401 | #ifdef CONFIG_SMP | ||
402 | if (!pte_dirty(*ptep)) | ||
403 | return 0; | ||
404 | return test_and_clear_bit(_PAGE_D_BIT, ptep); | ||
405 | #else | ||
406 | pte_t pte = *ptep; | ||
407 | if (!pte_dirty(pte)) | ||
408 | return 0; | ||
409 | set_pte_at(vma->vm_mm, addr, ptep, pte_mkclean(pte)); | ||
410 | return 1; | ||
411 | #endif | ||
412 | } | ||
413 | |||
414 | static inline pte_t | 398 | static inline pte_t |
415 | ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 399 | ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
416 | { | 400 | { |
@@ -543,8 +527,10 @@ extern void lazy_mmu_prot_update (pte_t pte); | |||
543 | # define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \ | 527 | # define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \ |
544 | ({ \ | 528 | ({ \ |
545 | int __changed = !pte_same(*(__ptep), __entry); \ | 529 | int __changed = !pte_same(*(__ptep), __entry); \ |
546 | if (__changed) \ | 530 | if (__changed) { \ |
547 | ptep_establish(__vma, __addr, __ptep, __entry); \ | 531 | set_pte_at((__vma)->vm_mm, (__addr), __ptep, __entry); \ |
532 | flush_tlb_page(__vma, __addr); \ | ||
533 | } \ | ||
548 | __changed; \ | 534 | __changed; \ |
549 | }) | 535 | }) |
550 | #endif | 536 | #endif |
@@ -588,7 +574,6 @@ extern void lazy_mmu_prot_update (pte_t pte); | |||
588 | #endif | 574 | #endif |
589 | 575 | ||
590 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | 576 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
591 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | ||
592 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | 577 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
593 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | 578 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
594 | #define __HAVE_ARCH_PTE_SAME | 579 | #define __HAVE_ARCH_PTE_SAME |
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h index db81ba406cef..6251c76437d2 100644 --- a/include/asm-ia64/processor.h +++ b/include/asm-ia64/processor.h | |||
@@ -295,9 +295,9 @@ struct thread_struct { | |||
295 | regs->ar_bspstore = current->thread.rbs_bot; \ | 295 | regs->ar_bspstore = current->thread.rbs_bot; \ |
296 | regs->ar_fpsr = FPSR_DEFAULT; \ | 296 | regs->ar_fpsr = FPSR_DEFAULT; \ |
297 | regs->loadrs = 0; \ | 297 | regs->loadrs = 0; \ |
298 | regs->r8 = current->mm->dumpable; /* set "don't zap registers" flag */ \ | 298 | regs->r8 = get_dumpable(current->mm); /* set "don't zap registers" flag */ \ |
299 | regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \ | 299 | regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \ |
300 | if (unlikely(!current->mm->dumpable)) { \ | 300 | if (unlikely(!get_dumpable(current->mm))) { \ |
301 | /* \ | 301 | /* \ |
302 | * Zap scratch regs to avoid leaking bits between processes with different \ | 302 | * Zap scratch regs to avoid leaking bits between processes with different \ |
303 | * uid/privileges. \ | 303 | * uid/privileges. \ |
diff --git a/include/asm-ia64/termbits.h b/include/asm-ia64/termbits.h index 7fae3109ef47..9f162e0089ad 100644 --- a/include/asm-ia64/termbits.h +++ b/include/asm-ia64/termbits.h | |||
@@ -149,6 +149,7 @@ struct ktermios { | |||
149 | #define HUPCL 0002000 | 149 | #define HUPCL 0002000 |
150 | #define CLOCAL 0004000 | 150 | #define CLOCAL 0004000 |
151 | #define CBAUDEX 0010000 | 151 | #define CBAUDEX 0010000 |
152 | #define BOTHER 0010000 | ||
152 | #define B57600 0010001 | 153 | #define B57600 0010001 |
153 | #define B115200 0010002 | 154 | #define B115200 0010002 |
154 | #define B230400 0010003 | 155 | #define B230400 0010003 |
@@ -164,10 +165,12 @@ struct ktermios { | |||
164 | #define B3000000 0010015 | 165 | #define B3000000 0010015 |
165 | #define B3500000 0010016 | 166 | #define B3500000 0010016 |
166 | #define B4000000 0010017 | 167 | #define B4000000 0010017 |
167 | #define CIBAUD 002003600000 /* input baud rate (not used) */ | 168 | #define CIBAUD 002003600000 /* input baud rate */ |
168 | #define CMSPAR 010000000000 /* mark or space (stick) parity */ | 169 | #define CMSPAR 010000000000 /* mark or space (stick) parity */ |
169 | #define CRTSCTS 020000000000 /* flow control */ | 170 | #define CRTSCTS 020000000000 /* flow control */ |
170 | 171 | ||
172 | #define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */ | ||
173 | |||
171 | /* c_lflag bits */ | 174 | /* c_lflag bits */ |
172 | #define ISIG 0000001 | 175 | #define ISIG 0000001 |
173 | #define ICANON 0000002 | 176 | #define ICANON 0000002 |
diff --git a/include/asm-ia64/termios.h b/include/asm-ia64/termios.h index 08750c2d3607..689d218c0c28 100644 --- a/include/asm-ia64/termios.h +++ b/include/asm-ia64/termios.h | |||
@@ -87,8 +87,10 @@ struct termio { | |||
87 | copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ | 87 | copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ |
88 | }) | 88 | }) |
89 | 89 | ||
90 | #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) | 90 | #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2)) |
91 | #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) | 91 | #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2)) |
92 | #define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios)) | ||
93 | #define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios)) | ||
92 | 94 | ||
93 | # endif /* __KERNEL__ */ | 95 | # endif /* __KERNEL__ */ |
94 | 96 | ||
diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h index 441c9e001776..315f8de950a2 100644 --- a/include/asm-ia64/unistd.h +++ b/include/asm-ia64/unistd.h | |||
@@ -292,7 +292,7 @@ | |||
292 | #define __NR_sync_file_range 1300 | 292 | #define __NR_sync_file_range 1300 |
293 | #define __NR_tee 1301 | 293 | #define __NR_tee 1301 |
294 | #define __NR_vmsplice 1302 | 294 | #define __NR_vmsplice 1302 |
295 | /* 1303 reserved for move_pages */ | 295 | #define __NR_fallocate 1303 |
296 | #define __NR_getcpu 1304 | 296 | #define __NR_getcpu 1304 |
297 | #define __NR_epoll_pwait 1305 | 297 | #define __NR_epoll_pwait 1305 |
298 | #define __NR_utimensat 1306 | 298 | #define __NR_utimensat 1306 |
diff --git a/include/asm-ia64/ustack.h b/include/asm-ia64/ustack.h index a349467913ea..504167c35b8b 100644 --- a/include/asm-ia64/ustack.h +++ b/include/asm-ia64/ustack.h | |||
@@ -11,6 +11,7 @@ | |||
11 | /* The absolute hard limit for stack size is 1/2 of the mappable space in the region */ | 11 | /* The absolute hard limit for stack size is 1/2 of the mappable space in the region */ |
12 | #define MAX_USER_STACK_SIZE (RGN_MAP_LIMIT/2) | 12 | #define MAX_USER_STACK_SIZE (RGN_MAP_LIMIT/2) |
13 | #define STACK_TOP (0x6000000000000000UL + RGN_MAP_LIMIT) | 13 | #define STACK_TOP (0x6000000000000000UL + RGN_MAP_LIMIT) |
14 | #define STACK_TOP_MAX STACK_TOP | ||
14 | #endif | 15 | #endif |
15 | 16 | ||
16 | /* Make a default stack size of 2GiB */ | 17 | /* Make a default stack size of 2GiB */ |
diff --git a/include/asm-m32r/a.out.h b/include/asm-m32r/a.out.h index 9a4a5d20160a..6a1b5d42f328 100644 --- a/include/asm-m32r/a.out.h +++ b/include/asm-m32r/a.out.h | |||
@@ -20,6 +20,7 @@ struct exec | |||
20 | #ifdef __KERNEL__ | 20 | #ifdef __KERNEL__ |
21 | 21 | ||
22 | #define STACK_TOP TASK_SIZE | 22 | #define STACK_TOP TASK_SIZE |
23 | #define STACK_TOP_MAX STACK_TOP | ||
23 | 24 | ||
24 | #endif | 25 | #endif |
25 | 26 | ||
diff --git a/include/asm-m32r/fb.h b/include/asm-m32r/fb.h new file mode 100644 index 000000000000..d92e99cd8c8a --- /dev/null +++ b/include/asm-m32r/fb.h | |||
@@ -0,0 +1,19 @@ | |||
1 | #ifndef _ASM_FB_H_ | ||
2 | #define _ASM_FB_H_ | ||
3 | |||
4 | #include <linux/fb.h> | ||
5 | #include <linux/fs.h> | ||
6 | #include <asm/page.h> | ||
7 | |||
8 | static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, | ||
9 | unsigned long off) | ||
10 | { | ||
11 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | ||
12 | } | ||
13 | |||
14 | static inline int fb_is_primary_device(struct fb_info *info) | ||
15 | { | ||
16 | return 0; | ||
17 | } | ||
18 | |||
19 | #endif /* _ASM_FB_H_ */ | ||
diff --git a/include/asm-m32r/page.h b/include/asm-m32r/page.h index 6f6ecf7d14a3..04fd183a2c58 100644 --- a/include/asm-m32r/page.h +++ b/include/asm-m32r/page.h | |||
@@ -15,7 +15,8 @@ extern void copy_page(void *to, void *from); | |||
15 | #define clear_user_page(page, vaddr, pg) clear_page(page) | 15 | #define clear_user_page(page, vaddr, pg) clear_page(page) |
16 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) | 16 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) |
17 | 17 | ||
18 | #define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr) | 18 | #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ |
19 | alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) | ||
19 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE | 20 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE |
20 | 21 | ||
21 | /* | 22 | /* |
diff --git a/include/asm-m32r/pgtable.h b/include/asm-m32r/pgtable.h index 35af58c6b812..92d7266783fd 100644 --- a/include/asm-m32r/pgtable.h +++ b/include/asm-m32r/pgtable.h | |||
@@ -250,11 +250,6 @@ static inline pte_t pte_mkwrite(pte_t pte) | |||
250 | return pte; | 250 | return pte; |
251 | } | 251 | } |
252 | 252 | ||
253 | static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | ||
254 | { | ||
255 | return test_and_clear_bit(_PAGE_BIT_DIRTY, ptep); | ||
256 | } | ||
257 | |||
258 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | 253 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) |
259 | { | 254 | { |
260 | return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep); | 255 | return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep); |
@@ -348,7 +343,6 @@ static inline void pmd_set(pmd_t * pmdp, pte_t * ptep) | |||
348 | remap_pfn_range(vma, vaddr, pfn, size, prot) | 343 | remap_pfn_range(vma, vaddr, pfn, size, prot) |
349 | 344 | ||
350 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | 345 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
351 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | ||
352 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | 346 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
353 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | 347 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
354 | #define __HAVE_ARCH_PTE_SAME | 348 | #define __HAVE_ARCH_PTE_SAME |
diff --git a/include/asm-m68k/a.out.h b/include/asm-m68k/a.out.h index eda1662773b8..6fc86a221a94 100644 --- a/include/asm-m68k/a.out.h +++ b/include/asm-m68k/a.out.h | |||
@@ -20,6 +20,7 @@ struct exec | |||
20 | #ifdef __KERNEL__ | 20 | #ifdef __KERNEL__ |
21 | 21 | ||
22 | #define STACK_TOP TASK_SIZE | 22 | #define STACK_TOP TASK_SIZE |
23 | #define STACK_TOP_MAX STACK_TOP | ||
23 | 24 | ||
24 | #endif | 25 | #endif |
25 | 26 | ||
diff --git a/include/asm-m68k/fb.h b/include/asm-m68k/fb.h new file mode 100644 index 000000000000..380b97ae8157 --- /dev/null +++ b/include/asm-m68k/fb.h | |||
@@ -0,0 +1,34 @@ | |||
1 | #ifndef _ASM_FB_H_ | ||
2 | #define _ASM_FB_H_ | ||
3 | |||
4 | #include <linux/fb.h> | ||
5 | #include <linux/fs.h> | ||
6 | #include <asm/page.h> | ||
7 | #include <asm/setup.h> | ||
8 | |||
9 | #ifdef CONFIG_SUN3 | ||
10 | static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, | ||
11 | unsigned long off) | ||
12 | { | ||
13 | pgprot_val(vma->vm_page_prot) |= SUN3_PAGE_NOCACHE; | ||
14 | } | ||
15 | #else | ||
16 | static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, | ||
17 | unsigned long off) | ||
18 | { | ||
19 | if (CPU_IS_020_OR_030) | ||
20 | pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE030; | ||
21 | if (CPU_IS_040_OR_060) { | ||
22 | pgprot_val(vma->vm_page_prot) &= _CACHEMASK040; | ||
23 | /* Use no-cache mode, serialized */ | ||
24 | pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE_S; | ||
25 | } | ||
26 | } | ||
27 | #endif /* CONFIG_SUN3 */ | ||
28 | |||
29 | static inline int fb_is_primary_device(struct fb_info *info) | ||
30 | { | ||
31 | return 0; | ||
32 | } | ||
33 | |||
34 | #endif /* _ASM_FB_H_ */ | ||
diff --git a/include/asm-m68knommu/fb.h b/include/asm-m68knommu/fb.h new file mode 100644 index 000000000000..c7df38030992 --- /dev/null +++ b/include/asm-m68knommu/fb.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef _ASM_FB_H_ | ||
2 | #define _ASM_FB_H_ | ||
3 | #include <linux/fb.h> | ||
4 | |||
5 | #define fb_pgprotect(...) do {} while (0) | ||
6 | |||
7 | static inline int fb_is_primary_device(struct fb_info *info) | ||
8 | { | ||
9 | return 0; | ||
10 | } | ||
11 | |||
12 | #endif /* _ASM_FB_H_ */ | ||
diff --git a/include/asm-m68knommu/irq.h b/include/asm-m68knommu/irq.h index 7b8f874f8429..9373c31ac87d 100644 --- a/include/asm-m68knommu/irq.h +++ b/include/asm-m68knommu/irq.h | |||
@@ -1,7 +1,5 @@ | |||
1 | #ifndef _M68K_IRQ_H_ | 1 | #ifndef _M68KNOMMU_IRQ_H_ |
2 | #define _M68K_IRQ_H_ | 2 | #define _M68KNOMMU_IRQ_H_ |
3 | |||
4 | #include <asm/ptrace.h> | ||
5 | 3 | ||
6 | #ifdef CONFIG_COLDFIRE | 4 | #ifdef CONFIG_COLDFIRE |
7 | /* | 5 | /* |
@@ -17,75 +15,12 @@ | |||
17 | /* | 15 | /* |
18 | * # of m68k interrupts | 16 | * # of m68k interrupts |
19 | */ | 17 | */ |
20 | #define SYS_IRQS 8 | 18 | #define SYS_IRQS 8 |
21 | #define NR_IRQS (24+SYS_IRQS) | 19 | #define NR_IRQS (24 + SYS_IRQS) |
22 | 20 | ||
23 | #endif /* CONFIG_COLDFIRE */ | 21 | #endif /* CONFIG_COLDFIRE */ |
24 | 22 | ||
25 | /* | ||
26 | * Interrupt source definitions | ||
27 | * General interrupt sources are the level 1-7. | ||
28 | * Adding an interrupt service routine for one of these sources | ||
29 | * results in the addition of that routine to a chain of routines. | ||
30 | * Each one is called in succession. Each individual interrupt | ||
31 | * service routine should determine if the device associated with | ||
32 | * that routine requires service. | ||
33 | */ | ||
34 | 23 | ||
35 | #define IRQ1 (1) /* level 1 interrupt */ | ||
36 | #define IRQ2 (2) /* level 2 interrupt */ | ||
37 | #define IRQ3 (3) /* level 3 interrupt */ | ||
38 | #define IRQ4 (4) /* level 4 interrupt */ | ||
39 | #define IRQ5 (5) /* level 5 interrupt */ | ||
40 | #define IRQ6 (6) /* level 6 interrupt */ | ||
41 | #define IRQ7 (7) /* level 7 interrupt (non-maskable) */ | ||
42 | |||
43 | /* | ||
44 | * Machine specific interrupt sources. | ||
45 | * | ||
46 | * Adding an interrupt service routine for a source with this bit | ||
47 | * set indicates a special machine specific interrupt source. | ||
48 | * The machine specific files define these sources. | ||
49 | * | ||
50 | * The IRQ_MACHSPEC bit is now gone - the only thing it did was to | ||
51 | * introduce unnecessary overhead. | ||
52 | * | ||
53 | * All interrupt handling is actually machine specific so it is better | ||
54 | * to use function pointers, as used by the Sparc port, and select the | ||
55 | * interrupt handling functions when initializing the kernel. This way | ||
56 | * we save some unnecessary overhead at run-time. | ||
57 | * 01/11/97 - Jes | ||
58 | */ | ||
59 | |||
60 | extern void (*mach_enable_irq)(unsigned int); | ||
61 | extern void (*mach_disable_irq)(unsigned int); | ||
62 | |||
63 | /* | ||
64 | * various flags for request_irq() - the Amiga now uses the standard | ||
65 | * mechanism like all other architectures - IRQF_DISABLED and | ||
66 | * IRQF_SHARED are your friends. | ||
67 | */ | ||
68 | #define IRQ_FLG_LOCK (0x0001) /* handler is not replaceable */ | ||
69 | #define IRQ_FLG_REPLACE (0x0002) /* replace existing handler */ | ||
70 | #define IRQ_FLG_FAST (0x0004) | ||
71 | #define IRQ_FLG_SLOW (0x0008) | ||
72 | #define IRQ_FLG_STD (0x8000) /* internally used */ | ||
73 | |||
74 | #ifdef CONFIG_M68360 | ||
75 | |||
76 | #define CPM_INTERRUPT IRQ4 | ||
77 | |||
78 | /* see MC68360 User's Manual, p. 7-377 */ | ||
79 | #define CPM_VECTOR_BASE 0x04 /* 3 MSbits of CPM vector */ | ||
80 | |||
81 | #endif /* CONFIG_M68360 */ | ||
82 | |||
83 | /* | ||
84 | * Some drivers want these entry points | ||
85 | */ | ||
86 | #define enable_irq(x) do { } while (0) | ||
87 | #define disable_irq(x) do { } while (0) | ||
88 | #define disable_irq_nosync(x) disable_irq(x) | ||
89 | #define irq_canonicalize(irq) (irq) | 24 | #define irq_canonicalize(irq) (irq) |
90 | 25 | ||
91 | #endif /* _M68K_IRQ_H_ */ | 26 | #endif /* _M68KNOMMU_IRQ_H_ */ |
diff --git a/include/asm-m68knommu/irqnode.h b/include/asm-m68knommu/irqnode.h deleted file mode 100644 index 6132a9858b52..000000000000 --- a/include/asm-m68knommu/irqnode.h +++ /dev/null | |||
@@ -1,36 +0,0 @@ | |||
1 | #ifndef _M68K_IRQNODE_H_ | ||
2 | #define _M68K_IRQNODE_H_ | ||
3 | |||
4 | #include <linux/interrupt.h> | ||
5 | |||
6 | /* | ||
7 | * This structure is used to chain together the ISRs for a particular | ||
8 | * interrupt source (if it supports chaining). | ||
9 | */ | ||
10 | typedef struct irq_node { | ||
11 | irq_handler_t handler; | ||
12 | unsigned long flags; | ||
13 | void *dev_id; | ||
14 | const char *devname; | ||
15 | struct irq_node *next; | ||
16 | } irq_node_t; | ||
17 | |||
18 | /* | ||
19 | * This structure has only 4 elements for speed reasons | ||
20 | */ | ||
21 | struct irq_entry { | ||
22 | irq_handler_t handler; | ||
23 | unsigned long flags; | ||
24 | void *dev_id; | ||
25 | const char *devname; | ||
26 | }; | ||
27 | |||
28 | /* count of spurious interrupts */ | ||
29 | extern volatile unsigned int num_spurious; | ||
30 | |||
31 | /* | ||
32 | * This function returns a new irq_node_t | ||
33 | */ | ||
34 | extern irq_node_t *new_irq_node(void); | ||
35 | |||
36 | #endif /* _M68K_IRQNODE_H_ */ | ||
diff --git a/include/asm-m68knommu/m68360.h b/include/asm-m68knommu/m68360.h index dd11b070884b..eb7d39ef2855 100644 --- a/include/asm-m68knommu/m68360.h +++ b/include/asm-m68knommu/m68360.h | |||
@@ -3,3 +3,11 @@ | |||
3 | #include "m68360_quicc.h" | 3 | #include "m68360_quicc.h" |
4 | #include "m68360_enet.h" | 4 | #include "m68360_enet.h" |
5 | 5 | ||
6 | #ifdef CONFIG_M68360 | ||
7 | |||
8 | #define CPM_INTERRUPT 4 | ||
9 | |||
10 | /* see MC68360 User's Manual, p. 7-377 */ | ||
11 | #define CPM_VECTOR_BASE 0x04 /* 3 MSbits of CPM vector */ | ||
12 | |||
13 | #endif /* CONFIG_M68360 */ | ||
diff --git a/include/asm-m68knommu/page.h b/include/asm-m68knommu/page.h index 2a1b8bdcb29c..9efa0a9851b1 100644 --- a/include/asm-m68knommu/page.h +++ b/include/asm-m68knommu/page.h | |||
@@ -22,7 +22,8 @@ | |||
22 | #define clear_user_page(page, vaddr, pg) clear_page(page) | 22 | #define clear_user_page(page, vaddr, pg) clear_page(page) |
23 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) | 23 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) |
24 | 24 | ||
25 | #define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr) | 25 | #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ |
26 | alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) | ||
26 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE | 27 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE |
27 | 28 | ||
28 | /* | 29 | /* |
diff --git a/include/asm-m68knommu/pgtable.h b/include/asm-m68knommu/pgtable.h index 9dfbbc24aa71..e1e6a1d2333a 100644 --- a/include/asm-m68knommu/pgtable.h +++ b/include/asm-m68knommu/pgtable.h | |||
@@ -49,7 +49,6 @@ static inline int pte_file(pte_t pte) { return 0; } | |||
49 | * These would be in other places but having them here reduces the diffs. | 49 | * These would be in other places but having them here reduces the diffs. |
50 | */ | 50 | */ |
51 | extern unsigned int kobjsize(const void *objp); | 51 | extern unsigned int kobjsize(const void *objp); |
52 | extern int is_in_rom(unsigned long); | ||
53 | 52 | ||
54 | /* | 53 | /* |
55 | * No page table caches to initialise. | 54 | * No page table caches to initialise. |
diff --git a/include/asm-m68knommu/traps.h b/include/asm-m68knommu/traps.h index f2a81317cc10..d0671e5f8e29 100644 --- a/include/asm-m68knommu/traps.h +++ b/include/asm-m68knommu/traps.h | |||
@@ -16,6 +16,10 @@ | |||
16 | typedef void (*e_vector)(void); | 16 | typedef void (*e_vector)(void); |
17 | 17 | ||
18 | extern e_vector vectors[]; | 18 | extern e_vector vectors[]; |
19 | extern void init_vectors(void); | ||
20 | extern void enable_vector(unsigned int irq); | ||
21 | extern void disable_vector(unsigned int irq); | ||
22 | extern void ack_vector(unsigned int irq); | ||
19 | 23 | ||
20 | #endif | 24 | #endif |
21 | 25 | ||
diff --git a/include/asm-m68knommu/uaccess.h b/include/asm-m68knommu/uaccess.h index 62b29b10bc6d..9ed9169a8849 100644 --- a/include/asm-m68knommu/uaccess.h +++ b/include/asm-m68knommu/uaccess.h | |||
@@ -15,12 +15,15 @@ | |||
15 | 15 | ||
16 | #define access_ok(type,addr,size) _access_ok((unsigned long)(addr),(size)) | 16 | #define access_ok(type,addr,size) _access_ok((unsigned long)(addr),(size)) |
17 | 17 | ||
18 | /* | ||
19 | * It is not enough to just have access_ok check for a real RAM address. | ||
20 | * This would disallow the case of code/ro-data running XIP in flash/rom. | ||
21 | * Ideally we would check the possible flash ranges too, but that is | ||
22 | * currently not so easy. | ||
23 | */ | ||
18 | static inline int _access_ok(unsigned long addr, unsigned long size) | 24 | static inline int _access_ok(unsigned long addr, unsigned long size) |
19 | { | 25 | { |
20 | extern unsigned long memory_start, memory_end; | 26 | return 1; |
21 | |||
22 | return (((addr >= memory_start) && (addr+size < memory_end)) || | ||
23 | (is_in_rom(addr) && is_in_rom(addr+size))); | ||
24 | } | 27 | } |
25 | 28 | ||
26 | /* | 29 | /* |
diff --git a/include/asm-mips/a.out.h b/include/asm-mips/a.out.h index ef33c3f13484..1ad60ba186d0 100644 --- a/include/asm-mips/a.out.h +++ b/include/asm-mips/a.out.h | |||
@@ -40,6 +40,7 @@ struct exec | |||
40 | #ifdef CONFIG_64BIT | 40 | #ifdef CONFIG_64BIT |
41 | #define STACK_TOP (current->thread.mflags & MF_32BIT_ADDR ? TASK_SIZE32 : TASK_SIZE) | 41 | #define STACK_TOP (current->thread.mflags & MF_32BIT_ADDR ? TASK_SIZE32 : TASK_SIZE) |
42 | #endif | 42 | #endif |
43 | #define STACK_TOP_MAX TASK_SIZE | ||
43 | 44 | ||
44 | #endif | 45 | #endif |
45 | 46 | ||
diff --git a/include/asm-mips/dec/serial.h b/include/asm-mips/dec/serial.h deleted file mode 100644 index acad75890a05..000000000000 --- a/include/asm-mips/dec/serial.h +++ /dev/null | |||
@@ -1,36 +0,0 @@ | |||
1 | /* | ||
2 | * include/asm-mips/dec/serial.h | ||
3 | * | ||
4 | * Definitions common to all DECstation serial devices. | ||
5 | * | ||
6 | * Copyright (C) 2004 Maciej W. Rozycki | ||
7 | * | ||
8 | * Based on bits extracted from drivers/tc/zs.h for which | ||
9 | * the following copyrights apply: | ||
10 | * | ||
11 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | ||
12 | * Copyright (C) 1996 Paul Mackerras (Paul.Mackerras@cs.anu.edu.au) | ||
13 | * Copyright (C) Harald Koerfgen | ||
14 | * | ||
15 | * This program is free software; you can redistribute it and/or | ||
16 | * modify it under the terms of the GNU General Public License | ||
17 | * as published by the Free Software Foundation; either version | ||
18 | * 2 of the License, or (at your option) any later version. | ||
19 | */ | ||
20 | #ifndef __ASM_MIPS_DEC_SERIAL_H | ||
21 | #define __ASM_MIPS_DEC_SERIAL_H | ||
22 | |||
23 | struct dec_serial_hook { | ||
24 | int (*init_channel)(void *handle); | ||
25 | void (*init_info)(void *handle); | ||
26 | void (*rx_char)(unsigned char ch, unsigned char fl); | ||
27 | int (*poll_rx_char)(void *handle); | ||
28 | int (*poll_tx_char)(void *handle, unsigned char ch); | ||
29 | unsigned int cflags; | ||
30 | }; | ||
31 | |||
32 | extern int register_dec_serial_hook(unsigned int channel, | ||
33 | struct dec_serial_hook *hook); | ||
34 | extern int unregister_dec_serial_hook(unsigned int channel); | ||
35 | |||
36 | #endif /* __ASM_MIPS_DEC_SERIAL_H */ | ||
diff --git a/include/asm-mips/fb.h b/include/asm-mips/fb.h new file mode 100644 index 000000000000..bd3f68c9ddfc --- /dev/null +++ b/include/asm-mips/fb.h | |||
@@ -0,0 +1,19 @@ | |||
1 | #ifndef _ASM_FB_H_ | ||
2 | #define _ASM_FB_H_ | ||
3 | |||
4 | #include <linux/fb.h> | ||
5 | #include <linux/fs.h> | ||
6 | #include <asm/page.h> | ||
7 | |||
8 | static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, | ||
9 | unsigned long off) | ||
10 | { | ||
11 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
12 | } | ||
13 | |||
14 | static inline int fb_is_primary_device(struct fb_info *info) | ||
15 | { | ||
16 | return 0; | ||
17 | } | ||
18 | |||
19 | #endif /* _ASM_FB_H_ */ | ||
diff --git a/include/asm-mips/sibyte/bcm1480_regs.h b/include/asm-mips/sibyte/bcm1480_regs.h index bda391d3af85..2738c1366f66 100644 --- a/include/asm-mips/sibyte/bcm1480_regs.h +++ b/include/asm-mips/sibyte/bcm1480_regs.h | |||
@@ -220,17 +220,25 @@ | |||
220 | #define A_BCM1480_DUART(chan) ((((chan)&2) == 0)? A_BCM1480_DUART0 : A_BCM1480_DUART1) | 220 | #define A_BCM1480_DUART(chan) ((((chan)&2) == 0)? A_BCM1480_DUART0 : A_BCM1480_DUART1) |
221 | 221 | ||
222 | #define BCM1480_DUART_CHANREG_SPACING 0x100 | 222 | #define BCM1480_DUART_CHANREG_SPACING 0x100 |
223 | #define A_BCM1480_DUART_CHANREG(chan,reg) (A_BCM1480_DUART(chan) \ | 223 | #define A_BCM1480_DUART_CHANREG(chan, reg) \ |
224 | + BCM1480_DUART_CHANREG_SPACING*((chan)&1) \ | 224 | (A_BCM1480_DUART(chan) + \ |
225 | + (reg)) | 225 | BCM1480_DUART_CHANREG_SPACING * (((chan) & 1) + 1) + (reg)) |
226 | #define R_BCM1480_DUART_CHANREG(chan,reg) (BCM1480_DUART_CHANREG_SPACING*((chan)&1) + (reg)) | 226 | #define A_BCM1480_DUART_CTRLREG(chan, reg) \ |
227 | 227 | (A_BCM1480_DUART(chan) + \ | |
228 | #define R_BCM1480_DUART_IMRREG(chan) (R_DUART_IMR_A + ((chan)&1)*DUART_IMRISR_SPACING) | 228 | BCM1480_DUART_CHANREG_SPACING * 3 + (reg)) |
229 | #define R_BCM1480_DUART_ISRREG(chan) (R_DUART_ISR_A + ((chan)&1)*DUART_IMRISR_SPACING) | 229 | |
230 | 230 | #define R_BCM1480_DUART_IMRREG(chan) \ | |
231 | #define A_BCM1480_DUART_IMRREG(chan) (A_BCM1480_DUART(chan) + R_BCM1480_DUART_IMRREG(chan)) | 231 | (R_DUART_IMR_A + ((chan) & 1) * DUART_IMRISR_SPACING) |
232 | #define A_BCM1480_DUART_ISRREG(chan) (A_BCM1480_DUART(chan) + R_BCM1480_DUART_ISRREG(chan)) | 232 | #define R_BCM1480_DUART_ISRREG(chan) \ |
233 | #define A_BCM1480_DUART_IN_PORT(chan) (A_BCM1480_DUART(chan) + R_DUART_INP_ORT) | 233 | (R_DUART_ISR_A + ((chan) & 1) * DUART_IMRISR_SPACING) |
234 | |||
235 | #define A_BCM1480_DUART_IMRREG(chan) \ | ||
236 | (A_BCM1480_DUART_CTRLREG((chan), R_BCM1480_DUART_IMRREG(chan))) | ||
237 | #define A_BCM1480_DUART_ISRREG(chan) \ | ||
238 | (A_BCM1480_DUART_CTRLREG((chan), R_BCM1480_DUART_ISRREG(chan))) | ||
239 | |||
240 | #define A_BCM1480_DUART_IN_PORT(chan) \ | ||
241 | (A_BCM1480_DUART_CTRLREG((chan), R_DUART_IN_PORT)) | ||
234 | 242 | ||
235 | /* | 243 | /* |
236 | * These constants are the absolute addresses. | 244 | * These constants are the absolute addresses. |
diff --git a/include/asm-mips/sibyte/sb1250_regs.h b/include/asm-mips/sibyte/sb1250_regs.h index da7c188993c9..220b7e94f1bf 100644 --- a/include/asm-mips/sibyte/sb1250_regs.h +++ b/include/asm-mips/sibyte/sb1250_regs.h | |||
@@ -272,59 +272,69 @@ | |||
272 | ********************************************************************* */ | 272 | ********************************************************************* */ |
273 | 273 | ||
274 | 274 | ||
275 | #if SIBYTE_HDR_FEATURE_1250_112x /* This MC only on 1250 & 112x */ | 275 | #if SIBYTE_HDR_FEATURE_1250_112x /* This MC only on 1250 & 112x */ |
276 | #define R_DUART_NUM_PORTS 2 | 276 | #define R_DUART_NUM_PORTS 2 |
277 | 277 | ||
278 | #define A_DUART 0x0010060000 | 278 | #define A_DUART 0x0010060000 |
279 | 279 | ||
280 | #define DUART_CHANREG_SPACING 0x100 | 280 | #define DUART_CHANREG_SPACING 0x100 |
281 | #define A_DUART_CHANREG(chan,reg) (A_DUART + DUART_CHANREG_SPACING*(chan) + (reg)) | 281 | |
282 | #define R_DUART_CHANREG(chan,reg) (DUART_CHANREG_SPACING*(chan) + (reg)) | 282 | #define A_DUART_CHANREG(chan, reg) \ |
283 | (A_DUART + DUART_CHANREG_SPACING * ((chan) + 1) + (reg)) | ||
283 | #endif /* 1250 & 112x */ | 284 | #endif /* 1250 & 112x */ |
284 | 285 | ||
285 | #define R_DUART_MODE_REG_1 0x100 | 286 | #define R_DUART_MODE_REG_1 0x000 |
286 | #define R_DUART_MODE_REG_2 0x110 | 287 | #define R_DUART_MODE_REG_2 0x010 |
287 | #define R_DUART_STATUS 0x120 | 288 | #define R_DUART_STATUS 0x020 |
288 | #define R_DUART_CLK_SEL 0x130 | 289 | #define R_DUART_CLK_SEL 0x030 |
289 | #define R_DUART_CMD 0x150 | 290 | #define R_DUART_CMD 0x050 |
290 | #define R_DUART_RX_HOLD 0x160 | 291 | #define R_DUART_RX_HOLD 0x060 |
291 | #define R_DUART_TX_HOLD 0x170 | 292 | #define R_DUART_TX_HOLD 0x070 |
292 | 293 | ||
293 | #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480) | 294 | #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480) |
294 | #define R_DUART_FULL_CTL 0x140 | 295 | #define R_DUART_FULL_CTL 0x040 |
295 | #define R_DUART_OPCR_X 0x180 | 296 | #define R_DUART_OPCR_X 0x080 |
296 | #define R_DUART_AUXCTL_X 0x190 | 297 | #define R_DUART_AUXCTL_X 0x090 |
297 | #endif /* 1250 PASS2 || 112x PASS1 || 1480*/ | 298 | #endif /* 1250 PASS2 || 112x PASS1 || 1480 */ |
298 | 299 | ||
299 | 300 | ||
300 | /* | 301 | /* |
301 | * The IMR and ISR can't be addressed with A_DUART_CHANREG, | 302 | * The IMR and ISR can't be addressed with A_DUART_CHANREG, |
302 | * so use this macro instead. | 303 | * so use these macros instead. |
303 | */ | 304 | */ |
304 | 305 | ||
305 | #define R_DUART_AUX_CTRL 0x310 | 306 | #if SIBYTE_HDR_FEATURE_1250_112x /* This MC only on 1250 & 112x */ |
306 | #define R_DUART_ISR_A 0x320 | 307 | #define DUART_IMRISR_SPACING 0x20 |
307 | #define R_DUART_IMR_A 0x330 | 308 | #define DUART_INCHNG_SPACING 0x10 |
308 | #define R_DUART_ISR_B 0x340 | ||
309 | #define R_DUART_IMR_B 0x350 | ||
310 | #define R_DUART_OUT_PORT 0x360 | ||
311 | #define R_DUART_OPCR 0x370 | ||
312 | #define R_DUART_IN_PORT 0x380 | ||
313 | 309 | ||
314 | #define R_DUART_SET_OPR 0x3B0 | 310 | #define A_DUART_CTRLREG(reg) \ |
315 | #define R_DUART_CLEAR_OPR 0x3C0 | 311 | (A_DUART + DUART_CHANREG_SPACING * 3 + (reg)) |
316 | 312 | ||
317 | #define DUART_IMRISR_SPACING 0x20 | 313 | #define R_DUART_IMRREG(chan) \ |
314 | (R_DUART_IMR_A + (chan) * DUART_IMRISR_SPACING) | ||
315 | #define R_DUART_ISRREG(chan) \ | ||
316 | (R_DUART_ISR_A + (chan) * DUART_IMRISR_SPACING) | ||
317 | #define R_DUART_INCHREG(chan) \ | ||
318 | (R_DUART_IN_CHNG_A + (chan) * DUART_INCHNG_SPACING) | ||
318 | 319 | ||
319 | #if SIBYTE_HDR_FEATURE_1250_112x /* This MC only on 1250 & 112x */ | 320 | #define A_DUART_IMRREG(chan) A_DUART_CTRLREG(R_DUART_IMRREG(chan)) |
320 | #define R_DUART_IMRREG(chan) (R_DUART_IMR_A + (chan)*DUART_IMRISR_SPACING) | 321 | #define A_DUART_ISRREG(chan) A_DUART_CTRLREG(R_DUART_ISRREG(chan)) |
321 | #define R_DUART_ISRREG(chan) (R_DUART_ISR_A + (chan)*DUART_IMRISR_SPACING) | 322 | #define A_DUART_INCHREG(chan) A_DUART_CTRLREG(R_DUART_INCHREG(chan)) |
322 | |||
323 | #define A_DUART_IMRREG(chan) (A_DUART + R_DUART_IMRREG(chan)) | ||
324 | #define A_DUART_ISRREG(chan) (A_DUART + R_DUART_ISRREG(chan)) | ||
325 | #endif /* 1250 & 112x */ | 323 | #endif /* 1250 & 112x */ |
326 | 324 | ||
327 | 325 | #define R_DUART_AUX_CTRL 0x010 | |
326 | #define R_DUART_ISR_A 0x020 | ||
327 | #define R_DUART_IMR_A 0x030 | ||
328 | #define R_DUART_ISR_B 0x040 | ||
329 | #define R_DUART_IMR_B 0x050 | ||
330 | #define R_DUART_OUT_PORT 0x060 | ||
331 | #define R_DUART_OPCR 0x070 | ||
332 | #define R_DUART_IN_PORT 0x080 | ||
333 | |||
334 | #define R_DUART_SET_OPR 0x0B0 | ||
335 | #define R_DUART_CLEAR_OPR 0x0C0 | ||
336 | #define R_DUART_IN_CHNG_A 0x0D0 | ||
337 | #define R_DUART_IN_CHNG_B 0x0E0 | ||
328 | 338 | ||
329 | 339 | ||
330 | /* | 340 | /* |
diff --git a/include/asm-mips/sibyte/sb1250_uart.h b/include/asm-mips/sibyte/sb1250_uart.h index e87045e62bf0..cf74fedcbef1 100644 --- a/include/asm-mips/sibyte/sb1250_uart.h +++ b/include/asm-mips/sibyte/sb1250_uart.h | |||
@@ -75,7 +75,8 @@ | |||
75 | #define V_DUART_PARITY_MODE_ADD_FIXED V_DUART_PARITY_MODE(K_DUART_PARITY_MODE_ADD_FIXED) | 75 | #define V_DUART_PARITY_MODE_ADD_FIXED V_DUART_PARITY_MODE(K_DUART_PARITY_MODE_ADD_FIXED) |
76 | #define V_DUART_PARITY_MODE_NONE V_DUART_PARITY_MODE(K_DUART_PARITY_MODE_NONE) | 76 | #define V_DUART_PARITY_MODE_NONE V_DUART_PARITY_MODE(K_DUART_PARITY_MODE_NONE) |
77 | 77 | ||
78 | #define M_DUART_ERR_MODE _SB_MAKEMASK1(5) /* must be zero */ | 78 | #define M_DUART_TX_IRQ_SEL_TXRDY 0 |
79 | #define M_DUART_TX_IRQ_SEL_TXEMPT _SB_MAKEMASK1(5) | ||
79 | 80 | ||
80 | #define M_DUART_RX_IRQ_SEL_RXRDY 0 | 81 | #define M_DUART_RX_IRQ_SEL_RXRDY 0 |
81 | #define M_DUART_RX_IRQ_SEL_RXFULL _SB_MAKEMASK1(6) | 82 | #define M_DUART_RX_IRQ_SEL_RXFULL _SB_MAKEMASK1(6) |
@@ -246,10 +247,13 @@ | |||
246 | 247 | ||
247 | #define M_DUART_ISR_BRK_A _SB_MAKEMASK1(2) | 248 | #define M_DUART_ISR_BRK_A _SB_MAKEMASK1(2) |
248 | #define M_DUART_ISR_IN_A _SB_MAKEMASK1(3) | 249 | #define M_DUART_ISR_IN_A _SB_MAKEMASK1(3) |
250 | #define M_DUART_ISR_ALL_A _SB_MAKEMASK(4,0) | ||
251 | |||
249 | #define M_DUART_ISR_TX_B _SB_MAKEMASK1(4) | 252 | #define M_DUART_ISR_TX_B _SB_MAKEMASK1(4) |
250 | #define M_DUART_ISR_RX_B _SB_MAKEMASK1(5) | 253 | #define M_DUART_ISR_RX_B _SB_MAKEMASK1(5) |
251 | #define M_DUART_ISR_BRK_B _SB_MAKEMASK1(6) | 254 | #define M_DUART_ISR_BRK_B _SB_MAKEMASK1(6) |
252 | #define M_DUART_ISR_IN_B _SB_MAKEMASK1(7) | 255 | #define M_DUART_ISR_IN_B _SB_MAKEMASK1(7) |
256 | #define M_DUART_ISR_ALL_B _SB_MAKEMASK(4,4) | ||
253 | 257 | ||
254 | /* | 258 | /* |
255 | * DUART Channel A Interrupt Status Register (Table 10-17) | 259 | * DUART Channel A Interrupt Status Register (Table 10-17) |
@@ -262,6 +266,7 @@ | |||
262 | #define M_DUART_ISR_RX _SB_MAKEMASK1(1) | 266 | #define M_DUART_ISR_RX _SB_MAKEMASK1(1) |
263 | #define M_DUART_ISR_BRK _SB_MAKEMASK1(2) | 267 | #define M_DUART_ISR_BRK _SB_MAKEMASK1(2) |
264 | #define M_DUART_ISR_IN _SB_MAKEMASK1(3) | 268 | #define M_DUART_ISR_IN _SB_MAKEMASK1(3) |
269 | #define M_DUART_ISR_ALL _SB_MAKEMASK(4,0) | ||
265 | #define M_DUART_ISR_RESERVED _SB_MAKEMASK(4,4) | 270 | #define M_DUART_ISR_RESERVED _SB_MAKEMASK(4,4) |
266 | 271 | ||
267 | /* | 272 | /* |
diff --git a/include/asm-parisc/a.out.h b/include/asm-parisc/a.out.h index 2a490cc9ec91..23e2c90943e5 100644 --- a/include/asm-parisc/a.out.h +++ b/include/asm-parisc/a.out.h | |||
@@ -23,6 +23,7 @@ struct exec | |||
23 | * prumpf */ | 23 | * prumpf */ |
24 | 24 | ||
25 | #define STACK_TOP TASK_SIZE | 25 | #define STACK_TOP TASK_SIZE |
26 | #define STACK_TOP_MAX DEFAULT_TASK_SIZE | ||
26 | 27 | ||
27 | #endif | 28 | #endif |
28 | 29 | ||
diff --git a/include/asm-parisc/fb.h b/include/asm-parisc/fb.h new file mode 100644 index 000000000000..4d503a023ab2 --- /dev/null +++ b/include/asm-parisc/fb.h | |||
@@ -0,0 +1,19 @@ | |||
1 | #ifndef _ASM_FB_H_ | ||
2 | #define _ASM_FB_H_ | ||
3 | |||
4 | #include <linux/fb.h> | ||
5 | #include <linux/fs.h> | ||
6 | #include <asm/page.h> | ||
7 | |||
8 | static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, | ||
9 | unsigned long off) | ||
10 | { | ||
11 | pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; | ||
12 | } | ||
13 | |||
14 | static inline int fb_is_primary_device(struct fb_info *info) | ||
15 | { | ||
16 | return 0; | ||
17 | } | ||
18 | |||
19 | #endif /* _ASM_FB_H_ */ | ||
diff --git a/include/asm-parisc/pgtable.h b/include/asm-parisc/pgtable.h index 7e222c8ba739..e88cacd63724 100644 --- a/include/asm-parisc/pgtable.h +++ b/include/asm-parisc/pgtable.h | |||
@@ -447,21 +447,6 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned | |||
447 | #endif | 447 | #endif |
448 | } | 448 | } |
449 | 449 | ||
450 | static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | ||
451 | { | ||
452 | #ifdef CONFIG_SMP | ||
453 | if (!pte_dirty(*ptep)) | ||
454 | return 0; | ||
455 | return test_and_clear_bit(xlate_pabit(_PAGE_DIRTY_BIT), &pte_val(*ptep)); | ||
456 | #else | ||
457 | pte_t pte = *ptep; | ||
458 | if (!pte_dirty(pte)) | ||
459 | return 0; | ||
460 | set_pte_at(vma->vm_mm, addr, ptep, pte_mkclean(pte)); | ||
461 | return 1; | ||
462 | #endif | ||
463 | } | ||
464 | |||
465 | extern spinlock_t pa_dbit_lock; | 450 | extern spinlock_t pa_dbit_lock; |
466 | 451 | ||
467 | struct mm_struct; | 452 | struct mm_struct; |
@@ -529,7 +514,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | |||
529 | #define HAVE_ARCH_UNMAPPED_AREA | 514 | #define HAVE_ARCH_UNMAPPED_AREA |
530 | 515 | ||
531 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | 516 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
532 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | ||
533 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | 517 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
534 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | 518 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
535 | #define __HAVE_ARCH_PTE_SAME | 519 | #define __HAVE_ARCH_PTE_SAME |
diff --git a/include/asm-powerpc/a.out.h b/include/asm-powerpc/a.out.h index c7393a977364..5c5ea83f9349 100644 --- a/include/asm-powerpc/a.out.h +++ b/include/asm-powerpc/a.out.h | |||
@@ -26,9 +26,12 @@ struct exec | |||
26 | #define STACK_TOP (test_thread_flag(TIF_32BIT) ? \ | 26 | #define STACK_TOP (test_thread_flag(TIF_32BIT) ? \ |
27 | STACK_TOP_USER32 : STACK_TOP_USER64) | 27 | STACK_TOP_USER32 : STACK_TOP_USER64) |
28 | 28 | ||
29 | #define STACK_TOP_MAX STACK_TOP_USER64 | ||
30 | |||
29 | #else /* __powerpc64__ */ | 31 | #else /* __powerpc64__ */ |
30 | 32 | ||
31 | #define STACK_TOP TASK_SIZE | 33 | #define STACK_TOP TASK_SIZE |
34 | #define STACK_TOP_MAX STACK_TOP | ||
32 | 35 | ||
33 | #endif /* __powerpc64__ */ | 36 | #endif /* __powerpc64__ */ |
34 | #endif /* __KERNEL__ */ | 37 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-powerpc/fb.h b/include/asm-powerpc/fb.h new file mode 100644 index 000000000000..411af8d17a69 --- /dev/null +++ b/include/asm-powerpc/fb.h | |||
@@ -0,0 +1,21 @@ | |||
1 | #ifndef _ASM_FB_H_ | ||
2 | #define _ASM_FB_H_ | ||
3 | |||
4 | #include <linux/fb.h> | ||
5 | #include <linux/fs.h> | ||
6 | #include <asm/page.h> | ||
7 | |||
8 | static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, | ||
9 | unsigned long off) | ||
10 | { | ||
11 | vma->vm_page_prot = phys_mem_access_prot(file, off >> PAGE_SHIFT, | ||
12 | vma->vm_end - vma->vm_start, | ||
13 | vma->vm_page_prot); | ||
14 | } | ||
15 | |||
16 | static inline int fb_is_primary_device(struct fb_info *info) | ||
17 | { | ||
18 | return 0; | ||
19 | } | ||
20 | |||
21 | #endif /* _ASM_FB_H_ */ | ||
diff --git a/include/asm-powerpc/kprobes.h b/include/asm-powerpc/kprobes.h index b0e40ff32ee0..8b08b447d6f3 100644 --- a/include/asm-powerpc/kprobes.h +++ b/include/asm-powerpc/kprobes.h | |||
@@ -65,20 +65,18 @@ typedef unsigned int kprobe_opcode_t; | |||
65 | } else if (name[0] != '.') \ | 65 | } else if (name[0] != '.') \ |
66 | addr = *(kprobe_opcode_t **)addr; \ | 66 | addr = *(kprobe_opcode_t **)addr; \ |
67 | } else { \ | 67 | } else { \ |
68 | char dot_name[KSYM_NAME_LEN+1]; \ | 68 | char dot_name[KSYM_NAME_LEN]; \ |
69 | dot_name[0] = '.'; \ | 69 | dot_name[0] = '.'; \ |
70 | dot_name[1] = '\0'; \ | 70 | dot_name[1] = '\0'; \ |
71 | strncat(dot_name, name, KSYM_NAME_LEN); \ | 71 | strncat(dot_name, name, KSYM_NAME_LEN - 2); \ |
72 | addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name); \ | 72 | addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name); \ |
73 | } \ | 73 | } \ |
74 | } | 74 | } |
75 | 75 | ||
76 | #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)((func_descr_t *)pentry) | ||
77 | #define is_trap(instr) (IS_TW(instr) || IS_TD(instr) || \ | 76 | #define is_trap(instr) (IS_TW(instr) || IS_TD(instr) || \ |
78 | IS_TWI(instr) || IS_TDI(instr)) | 77 | IS_TWI(instr) || IS_TDI(instr)) |
79 | #else | 78 | #else |
80 | /* Use stock kprobe_lookup_name since ppc32 doesn't use function descriptors */ | 79 | /* Use stock kprobe_lookup_name since ppc32 doesn't use function descriptors */ |
81 | #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)(pentry) | ||
82 | #define is_trap(instr) (IS_TW(instr) || IS_TWI(instr)) | 80 | #define is_trap(instr) (IS_TW(instr) || IS_TWI(instr)) |
83 | #endif | 81 | #endif |
84 | 82 | ||
diff --git a/include/asm-powerpc/percpu.h b/include/asm-powerpc/percpu.h index 2f2e3024fa61..73dc8ba4010d 100644 --- a/include/asm-powerpc/percpu.h +++ b/include/asm-powerpc/percpu.h | |||
@@ -20,6 +20,11 @@ | |||
20 | #define DEFINE_PER_CPU(type, name) \ | 20 | #define DEFINE_PER_CPU(type, name) \ |
21 | __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name | 21 | __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name |
22 | 22 | ||
23 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | ||
24 | __attribute__((__section__(".data.percpu.shared_aligned"))) \ | ||
25 | __typeof__(type) per_cpu__##name \ | ||
26 | ____cacheline_aligned_in_smp | ||
27 | |||
23 | /* var is in discarded region: offset to particular copy we want */ | 28 | /* var is in discarded region: offset to particular copy we want */ |
24 | #define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu))) | 29 | #define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu))) |
25 | #define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset())) | 30 | #define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset())) |
@@ -40,6 +45,8 @@ extern void setup_per_cpu_areas(void); | |||
40 | 45 | ||
41 | #define DEFINE_PER_CPU(type, name) \ | 46 | #define DEFINE_PER_CPU(type, name) \ |
42 | __typeof__(type) per_cpu__##name | 47 | __typeof__(type) per_cpu__##name |
48 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | ||
49 | DEFINE_PER_CPU(type, name) | ||
43 | 50 | ||
44 | #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) | 51 | #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) |
45 | #define __get_cpu_var(var) per_cpu__##var | 52 | #define __get_cpu_var(var) per_cpu__##var |
diff --git a/include/asm-powerpc/pgtable-ppc32.h b/include/asm-powerpc/pgtable-ppc32.h index 6c236d4d6262..86a54a4a8a2a 100644 --- a/include/asm-powerpc/pgtable-ppc32.h +++ b/include/asm-powerpc/pgtable-ppc32.h | |||
@@ -621,13 +621,6 @@ static inline int __ptep_test_and_clear_young(unsigned int context, unsigned lon | |||
621 | #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ | 621 | #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ |
622 | __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep) | 622 | __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep) |
623 | 623 | ||
624 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | ||
625 | static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, | ||
626 | unsigned long addr, pte_t *ptep) | ||
627 | { | ||
628 | return (pte_update(ptep, (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0; | ||
629 | } | ||
630 | |||
631 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | 624 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
632 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, | 625 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, |
633 | pte_t *ptep) | 626 | pte_t *ptep) |
diff --git a/include/asm-powerpc/pgtable-ppc64.h b/include/asm-powerpc/pgtable-ppc64.h index 7ca8b5c10019..300f9a199bf2 100644 --- a/include/asm-powerpc/pgtable-ppc64.h +++ b/include/asm-powerpc/pgtable-ppc64.h | |||
@@ -292,29 +292,6 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, | |||
292 | __r; \ | 292 | __r; \ |
293 | }) | 293 | }) |
294 | 294 | ||
295 | /* | ||
296 | * On RW/DIRTY bit transitions we can avoid flushing the hpte. For the | ||
297 | * moment we always flush but we need to fix hpte_update and test if the | ||
298 | * optimisation is worth it. | ||
299 | */ | ||
300 | static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm, | ||
301 | unsigned long addr, pte_t *ptep) | ||
302 | { | ||
303 | unsigned long old; | ||
304 | |||
305 | if ((pte_val(*ptep) & _PAGE_DIRTY) == 0) | ||
306 | return 0; | ||
307 | old = pte_update(mm, addr, ptep, _PAGE_DIRTY, 0); | ||
308 | return (old & _PAGE_DIRTY) != 0; | ||
309 | } | ||
310 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | ||
311 | #define ptep_test_and_clear_dirty(__vma, __addr, __ptep) \ | ||
312 | ({ \ | ||
313 | int __r; \ | ||
314 | __r = __ptep_test_and_clear_dirty((__vma)->vm_mm, __addr, __ptep); \ | ||
315 | __r; \ | ||
316 | }) | ||
317 | |||
318 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | 295 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
319 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | 296 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, |
320 | pte_t *ptep) | 297 | pte_t *ptep) |
@@ -342,14 +319,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | |||
342 | __young; \ | 319 | __young; \ |
343 | }) | 320 | }) |
344 | 321 | ||
345 | #define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH | ||
346 | #define ptep_clear_flush_dirty(__vma, __address, __ptep) \ | ||
347 | ({ \ | ||
348 | int __dirty = __ptep_test_and_clear_dirty((__vma)->vm_mm, __address, \ | ||
349 | __ptep); \ | ||
350 | __dirty; \ | ||
351 | }) | ||
352 | |||
353 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | 322 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
354 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, | 323 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, |
355 | unsigned long addr, pte_t *ptep) | 324 | unsigned long addr, pte_t *ptep) |
diff --git a/include/asm-powerpc/systbl.h b/include/asm-powerpc/systbl.h index 1cc3f9cb6f4e..cc6d87228258 100644 --- a/include/asm-powerpc/systbl.h +++ b/include/asm-powerpc/systbl.h | |||
@@ -308,6 +308,7 @@ COMPAT_SYS_SPU(move_pages) | |||
308 | SYSCALL_SPU(getcpu) | 308 | SYSCALL_SPU(getcpu) |
309 | COMPAT_SYS(epoll_pwait) | 309 | COMPAT_SYS(epoll_pwait) |
310 | COMPAT_SYS_SPU(utimensat) | 310 | COMPAT_SYS_SPU(utimensat) |
311 | COMPAT_SYS(fallocate) | ||
311 | COMPAT_SYS_SPU(signalfd) | 312 | COMPAT_SYS_SPU(signalfd) |
312 | COMPAT_SYS_SPU(timerfd) | 313 | COMPAT_SYS_SPU(timerfd) |
313 | SYSCALL_SPU(eventfd) | 314 | SYSCALL_SPU(eventfd) |
diff --git a/include/asm-powerpc/unistd.h b/include/asm-powerpc/unistd.h index f71c6061f1ec..97d82b6a9406 100644 --- a/include/asm-powerpc/unistd.h +++ b/include/asm-powerpc/unistd.h | |||
@@ -331,10 +331,11 @@ | |||
331 | #define __NR_timerfd 306 | 331 | #define __NR_timerfd 306 |
332 | #define __NR_eventfd 307 | 332 | #define __NR_eventfd 307 |
333 | #define __NR_sync_file_range2 308 | 333 | #define __NR_sync_file_range2 308 |
334 | #define __NR_fallocate 309 | ||
334 | 335 | ||
335 | #ifdef __KERNEL__ | 336 | #ifdef __KERNEL__ |
336 | 337 | ||
337 | #define __NR_syscalls 309 | 338 | #define __NR_syscalls 310 |
338 | 339 | ||
339 | #define __NR__exit __NR_exit | 340 | #define __NR__exit __NR_exit |
340 | #define NR_syscalls __NR_syscalls | 341 | #define NR_syscalls __NR_syscalls |
diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h index 18aa776313b9..c159315d2c8f 100644 --- a/include/asm-ppc/pgtable.h +++ b/include/asm-ppc/pgtable.h | |||
@@ -654,13 +654,6 @@ static inline int __ptep_test_and_clear_young(unsigned int context, unsigned lon | |||
654 | #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ | 654 | #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ |
655 | __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep) | 655 | __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep) |
656 | 656 | ||
657 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | ||
658 | static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, | ||
659 | unsigned long addr, pte_t *ptep) | ||
660 | { | ||
661 | return (pte_update(ptep, (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0; | ||
662 | } | ||
663 | |||
664 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | 657 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
665 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, | 658 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, |
666 | pte_t *ptep) | 659 | pte_t *ptep) |
diff --git a/include/asm-s390/a.out.h b/include/asm-s390/a.out.h index 72adee6ef338..46158dcaf517 100644 --- a/include/asm-s390/a.out.h +++ b/include/asm-s390/a.out.h | |||
@@ -32,6 +32,7 @@ struct exec | |||
32 | #ifdef __KERNEL__ | 32 | #ifdef __KERNEL__ |
33 | 33 | ||
34 | #define STACK_TOP TASK_SIZE | 34 | #define STACK_TOP TASK_SIZE |
35 | #define STACK_TOP_MAX DEFAULT_TASK_SIZE | ||
35 | 36 | ||
36 | #endif | 37 | #endif |
37 | 38 | ||
diff --git a/include/asm-s390/fb.h b/include/asm-s390/fb.h new file mode 100644 index 000000000000..c7df38030992 --- /dev/null +++ b/include/asm-s390/fb.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef _ASM_FB_H_ | ||
2 | #define _ASM_FB_H_ | ||
3 | #include <linux/fb.h> | ||
4 | |||
5 | #define fb_pgprotect(...) do {} while (0) | ||
6 | |||
7 | static inline int fb_is_primary_device(struct fb_info *info) | ||
8 | { | ||
9 | return 0; | ||
10 | } | ||
11 | |||
12 | #endif /* _ASM_FB_H_ */ | ||
diff --git a/include/asm-s390/kprobes.h b/include/asm-s390/kprobes.h index 830fe4c4eea6..340ba10446ea 100644 --- a/include/asm-s390/kprobes.h +++ b/include/asm-s390/kprobes.h | |||
@@ -46,8 +46,6 @@ typedef u16 kprobe_opcode_t; | |||
46 | ? (MAX_STACK_SIZE) \ | 46 | ? (MAX_STACK_SIZE) \ |
47 | : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) | 47 | : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) |
48 | 48 | ||
49 | #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)(pentry) | ||
50 | |||
51 | #define ARCH_SUPPORTS_KRETPROBES | 49 | #define ARCH_SUPPORTS_KRETPROBES |
52 | #define ARCH_INACTIVE_KPROBE_COUNT 0 | 50 | #define ARCH_INACTIVE_KPROBE_COUNT 0 |
53 | 51 | ||
diff --git a/include/asm-s390/page.h b/include/asm-s390/page.h index 05ea6f172786..f326451ed6ec 100644 --- a/include/asm-s390/page.h +++ b/include/asm-s390/page.h | |||
@@ -64,7 +64,8 @@ static inline void copy_page(void *to, void *from) | |||
64 | #define clear_user_page(page, vaddr, pg) clear_page(page) | 64 | #define clear_user_page(page, vaddr, pg) clear_page(page) |
65 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) | 65 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) |
66 | 66 | ||
67 | #define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr) | 67 | #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ |
68 | alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) | ||
68 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE | 69 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE |
69 | 70 | ||
70 | /* | 71 | /* |
diff --git a/include/asm-s390/percpu.h b/include/asm-s390/percpu.h index 9ea7f1023e57..545857e64443 100644 --- a/include/asm-s390/percpu.h +++ b/include/asm-s390/percpu.h | |||
@@ -41,6 +41,11 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; | |||
41 | __attribute__((__section__(".data.percpu"))) \ | 41 | __attribute__((__section__(".data.percpu"))) \ |
42 | __typeof__(type) per_cpu__##name | 42 | __typeof__(type) per_cpu__##name |
43 | 43 | ||
44 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | ||
45 | __attribute__((__section__(".data.percpu.shared_aligned"))) \ | ||
46 | __typeof__(type) per_cpu__##name \ | ||
47 | ____cacheline_aligned_in_smp | ||
48 | |||
44 | #define __get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset) | 49 | #define __get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset) |
45 | #define __raw_get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset) | 50 | #define __raw_get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset) |
46 | #define per_cpu(var,cpu) __reloc_hide(var,__per_cpu_offset[cpu]) | 51 | #define per_cpu(var,cpu) __reloc_hide(var,__per_cpu_offset[cpu]) |
@@ -59,6 +64,8 @@ do { \ | |||
59 | 64 | ||
60 | #define DEFINE_PER_CPU(type, name) \ | 65 | #define DEFINE_PER_CPU(type, name) \ |
61 | __typeof__(type) per_cpu__##name | 66 | __typeof__(type) per_cpu__##name |
67 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | ||
68 | DEFINE_PER_CPU(type, name) | ||
62 | 69 | ||
63 | #define __get_cpu_var(var) __reloc_hide(var,0) | 70 | #define __get_cpu_var(var) __reloc_hide(var,0) |
64 | #define __raw_get_cpu_var(var) __reloc_hide(var,0) | 71 | #define __raw_get_cpu_var(var) __reloc_hide(var,0) |
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h index 26215a976127..3208dc6c412c 100644 --- a/include/asm-s390/pgtable.h +++ b/include/asm-s390/pgtable.h | |||
@@ -669,19 +669,6 @@ ptep_clear_flush_young(struct vm_area_struct *vma, | |||
669 | return ptep_test_and_clear_young(vma, address, ptep); | 669 | return ptep_test_and_clear_young(vma, address, ptep); |
670 | } | 670 | } |
671 | 671 | ||
672 | static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | ||
673 | { | ||
674 | return 0; | ||
675 | } | ||
676 | |||
677 | static inline int | ||
678 | ptep_clear_flush_dirty(struct vm_area_struct *vma, | ||
679 | unsigned long address, pte_t *ptep) | ||
680 | { | ||
681 | /* No need to flush TLB; bits are in storage key */ | ||
682 | return ptep_test_and_clear_dirty(vma, address, ptep); | ||
683 | } | ||
684 | |||
685 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 672 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
686 | { | 673 | { |
687 | pte_t pte = *ptep; | 674 | pte_t pte = *ptep; |
@@ -707,16 +694,19 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep) | |||
707 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; | 694 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; |
708 | } | 695 | } |
709 | 696 | ||
710 | static inline pte_t | 697 | static inline void ptep_invalidate(unsigned long address, pte_t *ptep) |
711 | ptep_clear_flush(struct vm_area_struct *vma, | ||
712 | unsigned long address, pte_t *ptep) | ||
713 | { | 698 | { |
714 | pte_t pte = *ptep; | ||
715 | pte_t *shadow_pte = get_shadow_pte(ptep); | ||
716 | |||
717 | __ptep_ipte(address, ptep); | 699 | __ptep_ipte(address, ptep); |
718 | if (shadow_pte) | 700 | ptep = get_shadow_pte(ptep); |
719 | __ptep_ipte(address, shadow_pte); | 701 | if (ptep) |
702 | __ptep_ipte(address, ptep); | ||
703 | } | ||
704 | |||
705 | static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, | ||
706 | unsigned long address, pte_t *ptep) | ||
707 | { | ||
708 | pte_t pte = *ptep; | ||
709 | ptep_invalidate(address, ptep); | ||
720 | return pte; | 710 | return pte; |
721 | } | 711 | } |
722 | 712 | ||
@@ -726,21 +716,14 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | |||
726 | set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); | 716 | set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); |
727 | } | 717 | } |
728 | 718 | ||
729 | static inline void | 719 | #define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ |
730 | ptep_establish(struct vm_area_struct *vma, | 720 | ({ \ |
731 | unsigned long address, pte_t *ptep, | 721 | int __changed = !pte_same(*(__ptep), __entry); \ |
732 | pte_t entry) | 722 | if (__changed) { \ |
733 | { | 723 | ptep_invalidate(__addr, __ptep); \ |
734 | ptep_clear_flush(vma, address, ptep); | 724 | set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \ |
735 | set_pte(ptep, entry); | 725 | } \ |
736 | } | 726 | __changed; \ |
737 | |||
738 | #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ | ||
739 | ({ \ | ||
740 | int __changed = !pte_same(*(__ptep), __entry); \ | ||
741 | if (__changed) \ | ||
742 | ptep_establish(__vma, __address, __ptep, __entry); \ | ||
743 | __changed; \ | ||
744 | }) | 727 | }) |
745 | 728 | ||
746 | /* | 729 | /* |
@@ -940,12 +923,9 @@ extern int remove_shared_memory(unsigned long start, unsigned long size); | |||
940 | #define __HAVE_ARCH_MEMMAP_INIT | 923 | #define __HAVE_ARCH_MEMMAP_INIT |
941 | extern void memmap_init(unsigned long, int, unsigned long, unsigned long); | 924 | extern void memmap_init(unsigned long, int, unsigned long, unsigned long); |
942 | 925 | ||
943 | #define __HAVE_ARCH_PTEP_ESTABLISH | ||
944 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | 926 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
945 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | 927 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
946 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | 928 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH |
947 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | ||
948 | #define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH | ||
949 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | 929 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
950 | #define __HAVE_ARCH_PTEP_CLEAR_FLUSH | 930 | #define __HAVE_ARCH_PTEP_CLEAR_FLUSH |
951 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | 931 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
diff --git a/include/asm-sh/a.out.h b/include/asm-sh/a.out.h index 6e9fca9ee333..685d0f6125fa 100644 --- a/include/asm-sh/a.out.h +++ b/include/asm-sh/a.out.h | |||
@@ -20,6 +20,7 @@ struct exec | |||
20 | #ifdef __KERNEL__ | 20 | #ifdef __KERNEL__ |
21 | 21 | ||
22 | #define STACK_TOP TASK_SIZE | 22 | #define STACK_TOP TASK_SIZE |
23 | #define STACK_TOP_MAX STACK_TOP | ||
23 | 24 | ||
24 | #endif | 25 | #endif |
25 | 26 | ||
diff --git a/include/asm-sh/fb.h b/include/asm-sh/fb.h new file mode 100644 index 000000000000..d92e99cd8c8a --- /dev/null +++ b/include/asm-sh/fb.h | |||
@@ -0,0 +1,19 @@ | |||
1 | #ifndef _ASM_FB_H_ | ||
2 | #define _ASM_FB_H_ | ||
3 | |||
4 | #include <linux/fb.h> | ||
5 | #include <linux/fs.h> | ||
6 | #include <asm/page.h> | ||
7 | |||
8 | static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, | ||
9 | unsigned long off) | ||
10 | { | ||
11 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | ||
12 | } | ||
13 | |||
14 | static inline int fb_is_primary_device(struct fb_info *info) | ||
15 | { | ||
16 | return 0; | ||
17 | } | ||
18 | |||
19 | #endif /* _ASM_FB_H_ */ | ||
diff --git a/include/asm-sh64/a.out.h b/include/asm-sh64/a.out.h index e1995e86b663..237ee4e5b72a 100644 --- a/include/asm-sh64/a.out.h +++ b/include/asm-sh64/a.out.h | |||
@@ -31,6 +31,7 @@ struct exec | |||
31 | #ifdef __KERNEL__ | 31 | #ifdef __KERNEL__ |
32 | 32 | ||
33 | #define STACK_TOP TASK_SIZE | 33 | #define STACK_TOP TASK_SIZE |
34 | #define STACK_TOP_MAX STACK_TOP | ||
34 | 35 | ||
35 | #endif | 36 | #endif |
36 | 37 | ||
diff --git a/include/asm-sh64/fb.h b/include/asm-sh64/fb.h new file mode 100644 index 000000000000..d92e99cd8c8a --- /dev/null +++ b/include/asm-sh64/fb.h | |||
@@ -0,0 +1,19 @@ | |||
1 | #ifndef _ASM_FB_H_ | ||
2 | #define _ASM_FB_H_ | ||
3 | |||
4 | #include <linux/fb.h> | ||
5 | #include <linux/fs.h> | ||
6 | #include <asm/page.h> | ||
7 | |||
8 | static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, | ||
9 | unsigned long off) | ||
10 | { | ||
11 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | ||
12 | } | ||
13 | |||
14 | static inline int fb_is_primary_device(struct fb_info *info) | ||
15 | { | ||
16 | return 0; | ||
17 | } | ||
18 | |||
19 | #endif /* _ASM_FB_H_ */ | ||
diff --git a/include/asm-sparc/a.out.h b/include/asm-sparc/a.out.h index 9090060a23e6..917e04250696 100644 --- a/include/asm-sparc/a.out.h +++ b/include/asm-sparc/a.out.h | |||
@@ -92,6 +92,7 @@ struct relocation_info /* used when header.a_machtype == M_SPARC */ | |||
92 | #include <asm/page.h> | 92 | #include <asm/page.h> |
93 | 93 | ||
94 | #define STACK_TOP (PAGE_OFFSET - PAGE_SIZE) | 94 | #define STACK_TOP (PAGE_OFFSET - PAGE_SIZE) |
95 | #define STACK_TOP_MAX STACK_TOP | ||
95 | 96 | ||
96 | #endif /* __KERNEL__ */ | 97 | #endif /* __KERNEL__ */ |
97 | 98 | ||
diff --git a/include/asm-sparc/fb.h b/include/asm-sparc/fb.h new file mode 100644 index 000000000000..c7df38030992 --- /dev/null +++ b/include/asm-sparc/fb.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef _ASM_FB_H_ | ||
2 | #define _ASM_FB_H_ | ||
3 | #include <linux/fb.h> | ||
4 | |||
5 | #define fb_pgprotect(...) do {} while (0) | ||
6 | |||
7 | static inline int fb_is_primary_device(struct fb_info *info) | ||
8 | { | ||
9 | return 0; | ||
10 | } | ||
11 | |||
12 | #endif /* _ASM_FB_H_ */ | ||
diff --git a/include/asm-sparc64/a.out.h b/include/asm-sparc64/a.out.h index eb3b8e90b279..902e07f89a42 100644 --- a/include/asm-sparc64/a.out.h +++ b/include/asm-sparc64/a.out.h | |||
@@ -101,6 +101,8 @@ struct relocation_info /* used when header.a_machtype == M_SPARC */ | |||
101 | #define STACK_TOP (test_thread_flag(TIF_32BIT) ? \ | 101 | #define STACK_TOP (test_thread_flag(TIF_32BIT) ? \ |
102 | STACK_TOP32 : STACK_TOP64) | 102 | STACK_TOP32 : STACK_TOP64) |
103 | 103 | ||
104 | #define STACK_TOP_MAX STACK_TOP64 | ||
105 | |||
104 | #endif | 106 | #endif |
105 | 107 | ||
106 | #endif /* !(__ASSEMBLY__) */ | 108 | #endif /* !(__ASSEMBLY__) */ |
diff --git a/include/asm-sparc64/fb.h b/include/asm-sparc64/fb.h new file mode 100644 index 000000000000..d6cd3a175fc3 --- /dev/null +++ b/include/asm-sparc64/fb.h | |||
@@ -0,0 +1,18 @@ | |||
1 | #ifndef _ASM_FB_H_ | ||
2 | #define _ASM_FB_H_ | ||
3 | #include <linux/fb.h> | ||
4 | #include <linux/fs.h> | ||
5 | #include <asm/page.h> | ||
6 | |||
7 | static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, | ||
8 | unsigned long off) | ||
9 | { | ||
10 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
11 | } | ||
12 | |||
13 | static inline int fb_is_primary_device(struct fb_info *info) | ||
14 | { | ||
15 | return 0; | ||
16 | } | ||
17 | |||
18 | #endif /* _ASM_FB_H_ */ | ||
diff --git a/include/asm-sparc64/io.h b/include/asm-sparc64/io.h index ad595b679842..9565a892801e 100644 --- a/include/asm-sparc64/io.h +++ b/include/asm-sparc64/io.h | |||
@@ -14,11 +14,6 @@ | |||
14 | #define __SLOW_DOWN_IO do { } while (0) | 14 | #define __SLOW_DOWN_IO do { } while (0) |
15 | #define SLOW_DOWN_IO do { } while (0) | 15 | #define SLOW_DOWN_IO do { } while (0) |
16 | 16 | ||
17 | extern unsigned long virt_to_bus_not_defined_use_pci_map(volatile void *addr); | ||
18 | #define virt_to_bus virt_to_bus_not_defined_use_pci_map | ||
19 | extern unsigned long bus_to_virt_not_defined_use_pci_map(volatile void *addr); | ||
20 | #define bus_to_virt bus_to_virt_not_defined_use_pci_map | ||
21 | |||
22 | /* BIO layer definitions. */ | 17 | /* BIO layer definitions. */ |
23 | extern unsigned long kern_base, kern_size; | 18 | extern unsigned long kern_base, kern_size; |
24 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) | 19 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) |
diff --git a/include/asm-sparc64/kprobes.h b/include/asm-sparc64/kprobes.h index a331b7b0dff2..7f6774dca5f4 100644 --- a/include/asm-sparc64/kprobes.h +++ b/include/asm-sparc64/kprobes.h | |||
@@ -10,7 +10,6 @@ typedef u32 kprobe_opcode_t; | |||
10 | #define BREAKPOINT_INSTRUCTION_2 0x91d02071 /* ta 0x71 */ | 10 | #define BREAKPOINT_INSTRUCTION_2 0x91d02071 /* ta 0x71 */ |
11 | #define MAX_INSN_SIZE 2 | 11 | #define MAX_INSN_SIZE 2 |
12 | 12 | ||
13 | #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry | ||
14 | #define arch_remove_kprobe(p) do {} while (0) | 13 | #define arch_remove_kprobe(p) do {} while (0) |
15 | #define ARCH_INACTIVE_KPROBE_COUNT 0 | 14 | #define ARCH_INACTIVE_KPROBE_COUNT 0 |
16 | 15 | ||
diff --git a/include/asm-sparc64/mdesc.h b/include/asm-sparc64/mdesc.h index e97c43133752..1acc7272e537 100644 --- a/include/asm-sparc64/mdesc.h +++ b/include/asm-sparc64/mdesc.h | |||
@@ -61,6 +61,16 @@ extern u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc); | |||
61 | 61 | ||
62 | extern void mdesc_update(void); | 62 | extern void mdesc_update(void); |
63 | 63 | ||
64 | struct mdesc_notifier_client { | ||
65 | void (*add)(struct mdesc_handle *handle, u64 node); | ||
66 | void (*remove)(struct mdesc_handle *handle, u64 node); | ||
67 | |||
68 | const char *node_name; | ||
69 | struct mdesc_notifier_client *next; | ||
70 | }; | ||
71 | |||
72 | extern void mdesc_register_notifier(struct mdesc_notifier_client *client); | ||
73 | |||
64 | extern void mdesc_fill_in_cpu_data(cpumask_t mask); | 74 | extern void mdesc_fill_in_cpu_data(cpumask_t mask); |
65 | 75 | ||
66 | extern void sun4v_mdesc_init(void); | 76 | extern void sun4v_mdesc_init(void); |
diff --git a/include/asm-sparc64/percpu.h b/include/asm-sparc64/percpu.h index 88db872ce2f8..caf8750792ff 100644 --- a/include/asm-sparc64/percpu.h +++ b/include/asm-sparc64/percpu.h | |||
@@ -18,6 +18,11 @@ extern unsigned long __per_cpu_shift; | |||
18 | #define DEFINE_PER_CPU(type, name) \ | 18 | #define DEFINE_PER_CPU(type, name) \ |
19 | __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name | 19 | __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name |
20 | 20 | ||
21 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | ||
22 | __attribute__((__section__(".data.percpu.shared_aligned"))) \ | ||
23 | __typeof__(type) per_cpu__##name \ | ||
24 | ____cacheline_aligned_in_smp | ||
25 | |||
21 | register unsigned long __local_per_cpu_offset asm("g5"); | 26 | register unsigned long __local_per_cpu_offset asm("g5"); |
22 | 27 | ||
23 | /* var is in discarded region: offset to particular copy we want */ | 28 | /* var is in discarded region: offset to particular copy we want */ |
@@ -38,6 +43,8 @@ do { \ | |||
38 | #define real_setup_per_cpu_areas() do { } while (0) | 43 | #define real_setup_per_cpu_areas() do { } while (0) |
39 | #define DEFINE_PER_CPU(type, name) \ | 44 | #define DEFINE_PER_CPU(type, name) \ |
40 | __typeof__(type) per_cpu__##name | 45 | __typeof__(type) per_cpu__##name |
46 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | ||
47 | DEFINE_PER_CPU(type, name) | ||
41 | 48 | ||
42 | #define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var)) | 49 | #define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var)) |
43 | #define __get_cpu_var(var) per_cpu__##var | 50 | #define __get_cpu_var(var) per_cpu__##var |
diff --git a/include/asm-sparc64/vio.h b/include/asm-sparc64/vio.h index 83c96422e9d6..c0a8d4ed5bcb 100644 --- a/include/asm-sparc64/vio.h +++ b/include/asm-sparc64/vio.h | |||
@@ -264,7 +264,7 @@ static inline u32 vio_dring_avail(struct vio_dring_state *dr, | |||
264 | ((dr->prod - dr->cons) & (ring_size - 1))); | 264 | ((dr->prod - dr->cons) & (ring_size - 1))); |
265 | } | 265 | } |
266 | 266 | ||
267 | #define VIO_MAX_TYPE_LEN 64 | 267 | #define VIO_MAX_TYPE_LEN 32 |
268 | #define VIO_MAX_COMPAT_LEN 64 | 268 | #define VIO_MAX_COMPAT_LEN 64 |
269 | 269 | ||
270 | struct vio_dev { | 270 | struct vio_dev { |
diff --git a/include/asm-um/a.out.h b/include/asm-um/a.out.h index 7016b893ac9d..78bc9eed26b2 100644 --- a/include/asm-um/a.out.h +++ b/include/asm-um/a.out.h | |||
@@ -17,4 +17,6 @@ extern int honeypot; | |||
17 | #define STACK_TOP \ | 17 | #define STACK_TOP \ |
18 | CHOOSE_MODE((honeypot ? host_task_size : task_size), task_size) | 18 | CHOOSE_MODE((honeypot ? host_task_size : task_size), task_size) |
19 | 19 | ||
20 | #define STACK_TOP_MAX STACK_TOP | ||
21 | |||
20 | #endif | 22 | #endif |
diff --git a/include/asm-v850/fb.h b/include/asm-v850/fb.h new file mode 100644 index 000000000000..c7df38030992 --- /dev/null +++ b/include/asm-v850/fb.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef _ASM_FB_H_ | ||
2 | #define _ASM_FB_H_ | ||
3 | #include <linux/fb.h> | ||
4 | |||
5 | #define fb_pgprotect(...) do {} while (0) | ||
6 | |||
7 | static inline int fb_is_primary_device(struct fb_info *info) | ||
8 | { | ||
9 | return 0; | ||
10 | } | ||
11 | |||
12 | #endif /* _ASM_FB_H_ */ | ||
diff --git a/include/asm-x86_64/a.out.h b/include/asm-x86_64/a.out.h index 7255cde06538..e789300e41a5 100644 --- a/include/asm-x86_64/a.out.h +++ b/include/asm-x86_64/a.out.h | |||
@@ -21,7 +21,8 @@ struct exec | |||
21 | 21 | ||
22 | #ifdef __KERNEL__ | 22 | #ifdef __KERNEL__ |
23 | #include <linux/thread_info.h> | 23 | #include <linux/thread_info.h> |
24 | #define STACK_TOP TASK_SIZE | 24 | #define STACK_TOP TASK_SIZE |
25 | #define STACK_TOP_MAX TASK_SIZE64 | ||
25 | #endif | 26 | #endif |
26 | 27 | ||
27 | #endif /* __A_OUT_GNU_H__ */ | 28 | #endif /* __A_OUT_GNU_H__ */ |
diff --git a/include/asm-x86_64/fb.h b/include/asm-x86_64/fb.h new file mode 100644 index 000000000000..60548e651d12 --- /dev/null +++ b/include/asm-x86_64/fb.h | |||
@@ -0,0 +1,19 @@ | |||
1 | #ifndef _ASM_FB_H_ | ||
2 | #define _ASM_FB_H_ | ||
3 | #include <linux/fb.h> | ||
4 | #include <linux/fs.h> | ||
5 | #include <asm/page.h> | ||
6 | |||
7 | static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, | ||
8 | unsigned long off) | ||
9 | { | ||
10 | if (boot_cpu_data.x86 > 3) | ||
11 | pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; | ||
12 | } | ||
13 | |||
14 | static inline int fb_is_primary_device(struct fb_info *info) | ||
15 | { | ||
16 | return 0; | ||
17 | } | ||
18 | |||
19 | #endif /* _ASM_FB_H_ */ | ||
diff --git a/include/asm-x86_64/kprobes.h b/include/asm-x86_64/kprobes.h index cf5317898fb0..7db825403e01 100644 --- a/include/asm-x86_64/kprobes.h +++ b/include/asm-x86_64/kprobes.h | |||
@@ -41,7 +41,6 @@ typedef u8 kprobe_opcode_t; | |||
41 | ? (MAX_STACK_SIZE) \ | 41 | ? (MAX_STACK_SIZE) \ |
42 | : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) | 42 | : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) |
43 | 43 | ||
44 | #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry | ||
45 | #define ARCH_SUPPORTS_KRETPROBES | 44 | #define ARCH_SUPPORTS_KRETPROBES |
46 | #define ARCH_INACTIVE_KPROBE_COUNT 1 | 45 | #define ARCH_INACTIVE_KPROBE_COUNT 1 |
47 | 46 | ||
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h index e327c830da0c..88adf1afb0a2 100644 --- a/include/asm-x86_64/page.h +++ b/include/asm-x86_64/page.h | |||
@@ -48,7 +48,8 @@ void copy_page(void *, void *); | |||
48 | #define clear_user_page(page, vaddr, pg) clear_page(page) | 48 | #define clear_user_page(page, vaddr, pg) clear_page(page) |
49 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) | 49 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) |
50 | 50 | ||
51 | #define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr) | 51 | #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ |
52 | alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) | ||
52 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE | 53 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE |
53 | /* | 54 | /* |
54 | * These are used to make use of C type-checking.. | 55 | * These are used to make use of C type-checking.. |
diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h index c6fbb67eac90..5abd48270101 100644 --- a/include/asm-x86_64/percpu.h +++ b/include/asm-x86_64/percpu.h | |||
@@ -20,6 +20,11 @@ | |||
20 | #define DEFINE_PER_CPU(type, name) \ | 20 | #define DEFINE_PER_CPU(type, name) \ |
21 | __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name | 21 | __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name |
22 | 22 | ||
23 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | ||
24 | __attribute__((__section__(".data.percpu.shared_aligned"))) \ | ||
25 | __typeof__(type) per_cpu__##name \ | ||
26 | ____cacheline_internodealigned_in_smp | ||
27 | |||
23 | /* var is in discarded region: offset to particular copy we want */ | 28 | /* var is in discarded region: offset to particular copy we want */ |
24 | #define per_cpu(var, cpu) (*({ \ | 29 | #define per_cpu(var, cpu) (*({ \ |
25 | extern int simple_identifier_##var(void); \ | 30 | extern int simple_identifier_##var(void); \ |
@@ -46,6 +51,8 @@ extern void setup_per_cpu_areas(void); | |||
46 | 51 | ||
47 | #define DEFINE_PER_CPU(type, name) \ | 52 | #define DEFINE_PER_CPU(type, name) \ |
48 | __typeof__(type) per_cpu__##name | 53 | __typeof__(type) per_cpu__##name |
54 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | ||
55 | DEFINE_PER_CPU(type, name) | ||
49 | 56 | ||
50 | #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) | 57 | #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) |
51 | #define __get_cpu_var(var) per_cpu__##var | 58 | #define __get_cpu_var(var) per_cpu__##var |
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h index 4f169ac6b10a..3ba53099297d 100644 --- a/include/asm-x86_64/pgtable.h +++ b/include/asm-x86_64/pgtable.h | |||
@@ -284,13 +284,6 @@ static inline pte_t pte_clrhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & | |||
284 | 284 | ||
285 | struct vm_area_struct; | 285 | struct vm_area_struct; |
286 | 286 | ||
287 | static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | ||
288 | { | ||
289 | if (!pte_dirty(*ptep)) | ||
290 | return 0; | ||
291 | return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte); | ||
292 | } | ||
293 | |||
294 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | 287 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) |
295 | { | 288 | { |
296 | if (!pte_young(*ptep)) | 289 | if (!pte_young(*ptep)) |
@@ -427,7 +420,6 @@ extern int kern_addr_valid(unsigned long addr); | |||
427 | (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o)) | 420 | (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o)) |
428 | 421 | ||
429 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | 422 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
430 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | ||
431 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | 423 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
432 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | 424 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL |
433 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | 425 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h index 8696f8ad401e..fc4e73f5f1fa 100644 --- a/include/asm-x86_64/unistd.h +++ b/include/asm-x86_64/unistd.h | |||
@@ -630,6 +630,8 @@ __SYSCALL(__NR_signalfd, sys_signalfd) | |||
630 | __SYSCALL(__NR_timerfd, sys_timerfd) | 630 | __SYSCALL(__NR_timerfd, sys_timerfd) |
631 | #define __NR_eventfd 284 | 631 | #define __NR_eventfd 284 |
632 | __SYSCALL(__NR_eventfd, sys_eventfd) | 632 | __SYSCALL(__NR_eventfd, sys_eventfd) |
633 | #define __NR_fallocate 285 | ||
634 | __SYSCALL(__NR_fallocate, sys_fallocate) | ||
633 | 635 | ||
634 | #ifndef __NO_STUBS | 636 | #ifndef __NO_STUBS |
635 | #define __ARCH_WANT_OLD_READDIR | 637 | #define __ARCH_WANT_OLD_READDIR |
diff --git a/include/asm-xtensa/a.out.h b/include/asm-xtensa/a.out.h index ffc4dcfd6ac1..05a2f67c6768 100644 --- a/include/asm-xtensa/a.out.h +++ b/include/asm-xtensa/a.out.h | |||
@@ -17,6 +17,7 @@ | |||
17 | /* Note: the kernel needs the a.out definitions, even if only ELF is used. */ | 17 | /* Note: the kernel needs the a.out definitions, even if only ELF is used. */ |
18 | 18 | ||
19 | #define STACK_TOP TASK_SIZE | 19 | #define STACK_TOP TASK_SIZE |
20 | #define STACK_TOP_MAX STACK_TOP | ||
20 | 21 | ||
21 | struct exec | 22 | struct exec |
22 | { | 23 | { |
diff --git a/include/asm-xtensa/fb.h b/include/asm-xtensa/fb.h new file mode 100644 index 000000000000..c7df38030992 --- /dev/null +++ b/include/asm-xtensa/fb.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef _ASM_FB_H_ | ||
2 | #define _ASM_FB_H_ | ||
3 | #include <linux/fb.h> | ||
4 | |||
5 | #define fb_pgprotect(...) do {} while (0) | ||
6 | |||
7 | static inline int fb_is_primary_device(struct fb_info *info) | ||
8 | { | ||
9 | return 0; | ||
10 | } | ||
11 | |||
12 | #endif /* _ASM_FB_H_ */ | ||
diff --git a/include/asm-xtensa/pgtable.h b/include/asm-xtensa/pgtable.h index e9fc512cc247..06850f3b26a7 100644 --- a/include/asm-xtensa/pgtable.h +++ b/include/asm-xtensa/pgtable.h | |||
@@ -267,17 +267,6 @@ ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, | |||
267 | return 1; | 267 | return 1; |
268 | } | 268 | } |
269 | 269 | ||
270 | static inline int | ||
271 | ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, | ||
272 | pte_t *ptep) | ||
273 | { | ||
274 | pte_t pte = *ptep; | ||
275 | if (!pte_dirty(pte)) | ||
276 | return 0; | ||
277 | update_pte(ptep, pte_mkclean(pte)); | ||
278 | return 1; | ||
279 | } | ||
280 | |||
281 | static inline pte_t | 270 | static inline pte_t |
282 | ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 271 | ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
283 | { | 272 | { |
@@ -418,7 +407,6 @@ typedef pte_t *pte_addr_t; | |||
418 | #endif /* !defined (__ASSEMBLY__) */ | 407 | #endif /* !defined (__ASSEMBLY__) */ |
419 | 408 | ||
420 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | 409 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
421 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | ||
422 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | 410 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
423 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | 411 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
424 | #define __HAVE_ARCH_PTEP_MKDIRTY | 412 | #define __HAVE_ARCH_PTEP_MKDIRTY |
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index fccd8b548d93..dc234c508a6f 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
@@ -122,7 +122,7 @@ extern struct acpi_mcfg_allocation *pci_mmcfg_config; | |||
122 | extern int pci_mmcfg_config_num; | 122 | extern int pci_mmcfg_config_num; |
123 | 123 | ||
124 | extern int sbf_port; | 124 | extern int sbf_port; |
125 | extern unsigned long acpi_video_flags; | 125 | extern unsigned long acpi_realmode_flags; |
126 | 126 | ||
127 | #else /* !CONFIG_ACPI */ | 127 | #else /* !CONFIG_ACPI */ |
128 | 128 | ||
diff --git a/include/linux/aio.h b/include/linux/aio.h index b903fc02bdb7..d10e608f232d 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h | |||
@@ -86,7 +86,7 @@ struct kioctx; | |||
86 | */ | 86 | */ |
87 | struct kiocb { | 87 | struct kiocb { |
88 | struct list_head ki_run_list; | 88 | struct list_head ki_run_list; |
89 | long ki_flags; | 89 | unsigned long ki_flags; |
90 | int ki_users; | 90 | int ki_users; |
91 | unsigned ki_key; /* id of this request */ | 91 | unsigned ki_key; /* id of this request */ |
92 | 92 | ||
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index e1a708337be3..91c8c07fe8b7 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h | |||
@@ -6,11 +6,13 @@ | |||
6 | struct pt_regs; | 6 | struct pt_regs; |
7 | 7 | ||
8 | /* | 8 | /* |
9 | * MAX_ARG_PAGES defines the number of pages allocated for arguments | 9 | * These are the maximum length and maximum number of strings passed to the |
10 | * and envelope for the new program. 32 should suffice, this gives | 10 | * execve() system call. MAX_ARG_STRLEN is essentially random but serves to |
11 | * a maximum env+arg of 128kB w/4KB pages! | 11 | * prevent the kernel from being unduly impacted by misaddressed pointers. |
12 | * MAX_ARG_STRINGS is chosen to fit in a signed 32-bit integer. | ||
12 | */ | 13 | */ |
13 | #define MAX_ARG_PAGES 32 | 14 | #define MAX_ARG_STRLEN (PAGE_SIZE * 32) |
15 | #define MAX_ARG_STRINGS 0x7FFFFFFF | ||
14 | 16 | ||
15 | /* sizeof(linux_binprm->buf) */ | 17 | /* sizeof(linux_binprm->buf) */ |
16 | #define BINPRM_BUF_SIZE 128 | 18 | #define BINPRM_BUF_SIZE 128 |
@@ -24,7 +26,12 @@ struct pt_regs; | |||
24 | */ | 26 | */ |
25 | struct linux_binprm{ | 27 | struct linux_binprm{ |
26 | char buf[BINPRM_BUF_SIZE]; | 28 | char buf[BINPRM_BUF_SIZE]; |
29 | #ifdef CONFIG_MMU | ||
30 | struct vm_area_struct *vma; | ||
31 | #else | ||
32 | # define MAX_ARG_PAGES 32 | ||
27 | struct page *page[MAX_ARG_PAGES]; | 33 | struct page *page[MAX_ARG_PAGES]; |
34 | #endif | ||
28 | struct mm_struct *mm; | 35 | struct mm_struct *mm; |
29 | unsigned long p; /* current top of mem */ | 36 | unsigned long p; /* current top of mem */ |
30 | int sh_bang; | 37 | int sh_bang; |
@@ -40,6 +47,7 @@ struct linux_binprm{ | |||
40 | unsigned interp_flags; | 47 | unsigned interp_flags; |
41 | unsigned interp_data; | 48 | unsigned interp_data; |
42 | unsigned long loader, exec; | 49 | unsigned long loader, exec; |
50 | unsigned long argv_len; | ||
43 | }; | 51 | }; |
44 | 52 | ||
45 | #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0 | 53 | #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0 |
@@ -68,7 +76,7 @@ extern int register_binfmt(struct linux_binfmt *); | |||
68 | extern int unregister_binfmt(struct linux_binfmt *); | 76 | extern int unregister_binfmt(struct linux_binfmt *); |
69 | 77 | ||
70 | extern int prepare_binprm(struct linux_binprm *); | 78 | extern int prepare_binprm(struct linux_binprm *); |
71 | extern void remove_arg_zero(struct linux_binprm *); | 79 | extern int __must_check remove_arg_zero(struct linux_binprm *); |
72 | extern int search_binary_handler(struct linux_binprm *,struct pt_regs *); | 80 | extern int search_binary_handler(struct linux_binprm *,struct pt_regs *); |
73 | extern int flush_old_exec(struct linux_binprm * bprm); | 81 | extern int flush_old_exec(struct linux_binprm * bprm); |
74 | 82 | ||
@@ -85,6 +93,7 @@ extern int suid_dumpable; | |||
85 | extern int setup_arg_pages(struct linux_binprm * bprm, | 93 | extern int setup_arg_pages(struct linux_binprm * bprm, |
86 | unsigned long stack_top, | 94 | unsigned long stack_top, |
87 | int executable_stack); | 95 | int executable_stack); |
96 | extern int bprm_mm_init(struct linux_binprm *bprm); | ||
88 | extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm); | 97 | extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm); |
89 | extern void compute_creds(struct linux_binprm *binprm); | 98 | extern void compute_creds(struct linux_binprm *binprm); |
90 | extern int do_coredump(long signr, int exit_code, struct pt_regs * regs); | 99 | extern int do_coredump(long signr, int exit_code, struct pt_regs * regs); |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b32564a1e105..f78965fc6426 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -624,7 +624,7 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn; | |||
624 | */ | 624 | */ |
625 | #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) | 625 | #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) |
626 | 626 | ||
627 | #ifdef CONFIG_MMU | 627 | #ifdef CONFIG_BOUNCE |
628 | extern int init_emergency_isa_pool(void); | 628 | extern int init_emergency_isa_pool(void); |
629 | extern void blk_queue_bounce(request_queue_t *q, struct bio **bio); | 629 | extern void blk_queue_bounce(request_queue_t *q, struct bio **bio); |
630 | #else | 630 | #else |
diff --git a/include/linux/bsg.h b/include/linux/bsg.h index bd998ca6cb2e..8547b10c388b 100644 --- a/include/linux/bsg.h +++ b/include/linux/bsg.h | |||
@@ -60,7 +60,6 @@ struct bsg_class_device { | |||
60 | extern int bsg_register_queue(struct request_queue *, const char *); | 60 | extern int bsg_register_queue(struct request_queue *, const char *); |
61 | extern void bsg_unregister_queue(struct request_queue *); | 61 | extern void bsg_unregister_queue(struct request_queue *); |
62 | #else | 62 | #else |
63 | struct bsg_class_device { }; | ||
64 | #define bsg_register_queue(disk, name) (0) | 63 | #define bsg_register_queue(disk, name) (0) |
65 | #define bsg_unregister_queue(disk) do { } while (0) | 64 | #define bsg_unregister_queue(disk) do { } while (0) |
66 | #endif | 65 | #endif |
diff --git a/include/linux/coda_linux.h b/include/linux/coda_linux.h index e4ac016ad272..c4079b403e9e 100644 --- a/include/linux/coda_linux.h +++ b/include/linux/coda_linux.h | |||
@@ -43,9 +43,6 @@ int coda_revalidate_inode(struct dentry *); | |||
43 | int coda_getattr(struct vfsmount *, struct dentry *, struct kstat *); | 43 | int coda_getattr(struct vfsmount *, struct dentry *, struct kstat *); |
44 | int coda_setattr(struct dentry *, struct iattr *); | 44 | int coda_setattr(struct dentry *, struct iattr *); |
45 | 45 | ||
46 | /* global variables */ | ||
47 | extern int coda_fake_statfs; | ||
48 | |||
49 | /* this file: heloers */ | 46 | /* this file: heloers */ |
50 | static __inline__ struct CodaFid *coda_i2f(struct inode *); | 47 | static __inline__ struct CodaFid *coda_i2f(struct inode *); |
51 | static __inline__ char *coda_i2s(struct inode *); | 48 | static __inline__ char *coda_i2s(struct inode *); |
diff --git a/include/linux/coda_proc.h b/include/linux/coda_proc.h deleted file mode 100644 index 0dc1b0458e75..000000000000 --- a/include/linux/coda_proc.h +++ /dev/null | |||
@@ -1,76 +0,0 @@ | |||
1 | /* | ||
2 | * coda_statis.h | ||
3 | * | ||
4 | * CODA operation statistics | ||
5 | * | ||
6 | * (c) March, 1998 | ||
7 | * by Michihiro Kuramochi, Zhenyu Xia and Zhanyong Wan | ||
8 | * zhanyong.wan@yale.edu | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #ifndef _CODA_PROC_H | ||
13 | #define _CODA_PROC_H | ||
14 | |||
15 | void coda_sysctl_init(void); | ||
16 | void coda_sysctl_clean(void); | ||
17 | |||
18 | #include <linux/sysctl.h> | ||
19 | #include <linux/coda_fs_i.h> | ||
20 | #include <linux/coda.h> | ||
21 | |||
22 | /* these four files are presented to show the result of the statistics: | ||
23 | * | ||
24 | * /proc/fs/coda/vfs_stats | ||
25 | * cache_inv_stats | ||
26 | * | ||
27 | * these four files are presented to reset the statistics to 0: | ||
28 | * | ||
29 | * /proc/sys/coda/vfs_stats | ||
30 | * cache_inv_stats | ||
31 | */ | ||
32 | |||
33 | /* VFS operation statistics */ | ||
34 | struct coda_vfs_stats | ||
35 | { | ||
36 | /* file operations */ | ||
37 | int open; | ||
38 | int flush; | ||
39 | int release; | ||
40 | int fsync; | ||
41 | |||
42 | /* dir operations */ | ||
43 | int readdir; | ||
44 | |||
45 | /* inode operations */ | ||
46 | int create; | ||
47 | int lookup; | ||
48 | int link; | ||
49 | int unlink; | ||
50 | int symlink; | ||
51 | int mkdir; | ||
52 | int rmdir; | ||
53 | int rename; | ||
54 | int permission; | ||
55 | |||
56 | /* symlink operatoins*/ | ||
57 | int follow_link; | ||
58 | int readlink; | ||
59 | }; | ||
60 | |||
61 | /* cache invalidation statistics */ | ||
62 | struct coda_cache_inv_stats | ||
63 | { | ||
64 | int flush; | ||
65 | int purge_user; | ||
66 | int zap_dir; | ||
67 | int zap_file; | ||
68 | int zap_vnode; | ||
69 | int purge_fid; | ||
70 | int replace; | ||
71 | }; | ||
72 | |||
73 | /* these global variables hold the actual statistics data */ | ||
74 | extern struct coda_vfs_stats coda_vfs_stat; | ||
75 | |||
76 | #endif /* _CODA_PROC_H */ | ||
diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h index b541bb3d1f4b..aa8f454b3b77 100644 --- a/include/linux/coda_psdev.h +++ b/include/linux/coda_psdev.h | |||
@@ -8,11 +8,6 @@ | |||
8 | 8 | ||
9 | struct kstatfs; | 9 | struct kstatfs; |
10 | 10 | ||
11 | struct coda_sb_info | ||
12 | { | ||
13 | struct venus_comm *sbi_vcomm; | ||
14 | }; | ||
15 | |||
16 | /* communication pending/processing queues */ | 11 | /* communication pending/processing queues */ |
17 | struct venus_comm { | 12 | struct venus_comm { |
18 | u_long vc_seq; | 13 | u_long vc_seq; |
@@ -24,9 +19,9 @@ struct venus_comm { | |||
24 | }; | 19 | }; |
25 | 20 | ||
26 | 21 | ||
27 | static inline struct coda_sb_info *coda_sbp(struct super_block *sb) | 22 | static inline struct venus_comm *coda_vcp(struct super_block *sb) |
28 | { | 23 | { |
29 | return ((struct coda_sb_info *)((sb)->s_fs_info)); | 24 | return (struct venus_comm *)((sb)->s_fs_info); |
30 | } | 25 | } |
31 | 26 | ||
32 | 27 | ||
@@ -74,8 +69,6 @@ int venus_statfs(struct dentry *dentry, struct kstatfs *sfs); | |||
74 | 69 | ||
75 | 70 | ||
76 | /* messages between coda filesystem in kernel and Venus */ | 71 | /* messages between coda filesystem in kernel and Venus */ |
77 | extern int coda_hard; | ||
78 | extern unsigned long coda_timeout; | ||
79 | struct upc_req { | 72 | struct upc_req { |
80 | struct list_head uc_chain; | 73 | struct list_head uc_chain; |
81 | caddr_t uc_data; | 74 | caddr_t uc_data; |
@@ -85,7 +78,6 @@ struct upc_req { | |||
85 | u_short uc_opcode; /* copied from data to save lookup */ | 78 | u_short uc_opcode; /* copied from data to save lookup */ |
86 | int uc_unique; | 79 | int uc_unique; |
87 | wait_queue_head_t uc_sleep; /* process' wait queue */ | 80 | wait_queue_head_t uc_sleep; /* process' wait queue */ |
88 | unsigned long uc_posttime; | ||
89 | }; | 81 | }; |
90 | 82 | ||
91 | #define REQ_ASYNC 0x1 | 83 | #define REQ_ASYNC 0x1 |
diff --git a/include/linux/crc7.h b/include/linux/crc7.h new file mode 100644 index 000000000000..1786e772d5c6 --- /dev/null +++ b/include/linux/crc7.h | |||
@@ -0,0 +1,14 @@ | |||
1 | #ifndef _LINUX_CRC7_H | ||
2 | #define _LINUX_CRC7_H | ||
3 | #include <linux/types.h> | ||
4 | |||
5 | extern const u8 crc7_syndrome_table[256]; | ||
6 | |||
7 | static inline u8 crc7_byte(u8 crc, u8 data) | ||
8 | { | ||
9 | return crc7_syndrome_table[(crc << 1) ^ data]; | ||
10 | } | ||
11 | |||
12 | extern u8 crc7(u8 crc, const u8 *buffer, size_t len); | ||
13 | |||
14 | #endif | ||
diff --git a/include/linux/device.h b/include/linux/device.h index be2debed70d2..d9f0a57f5a2f 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
@@ -572,6 +572,16 @@ dev_dbg(struct device * dev, const char * fmt, ...) | |||
572 | } | 572 | } |
573 | #endif | 573 | #endif |
574 | 574 | ||
575 | #ifdef VERBOSE_DEBUG | ||
576 | #define dev_vdbg dev_dbg | ||
577 | #else | ||
578 | static inline int __attribute__ ((format (printf, 2, 3))) | ||
579 | dev_vdbg(struct device * dev, const char * fmt, ...) | ||
580 | { | ||
581 | return 0; | ||
582 | } | ||
583 | #endif | ||
584 | |||
575 | #define dev_err(dev, format, arg...) \ | 585 | #define dev_err(dev, format, arg...) \ |
576 | dev_printk(KERN_ERR , dev , format , ## arg) | 586 | dev_printk(KERN_ERR , dev , format , ## arg) |
577 | #define dev_info(dev, format, arg...) \ | 587 | #define dev_info(dev, format, arg...) \ |
diff --git a/include/linux/edac.h b/include/linux/edac.h new file mode 100644 index 000000000000..eab451e69a91 --- /dev/null +++ b/include/linux/edac.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * Generic EDAC defs | ||
3 | * | ||
4 | * Author: Dave Jiang <djiang@mvista.com> | ||
5 | * | ||
6 | * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under | ||
7 | * the terms of the GNU General Public License version 2. This program | ||
8 | * is licensed "as is" without any warranty of any kind, whether express | ||
9 | * or implied. | ||
10 | * | ||
11 | */ | ||
12 | #ifndef _LINUX_EDAC_H_ | ||
13 | #define _LINUX_EDAC_H_ | ||
14 | |||
15 | #include <asm/atomic.h> | ||
16 | |||
17 | #define EDAC_OPSTATE_INVAL -1 | ||
18 | #define EDAC_OPSTATE_POLL 0 | ||
19 | #define EDAC_OPSTATE_NMI 1 | ||
20 | #define EDAC_OPSTATE_INT 2 | ||
21 | |||
22 | extern int edac_op_state; | ||
23 | extern int edac_err_assert; | ||
24 | extern atomic_t edac_handlers; | ||
25 | |||
26 | extern int edac_handler_set(void); | ||
27 | extern void edac_atomic_assert_error(void); | ||
28 | |||
29 | #endif | ||
diff --git a/include/linux/efs_fs.h b/include/linux/efs_fs.h index dfed8009ebff..16cb25cbf7c5 100644 --- a/include/linux/efs_fs.h +++ b/include/linux/efs_fs.h | |||
@@ -45,6 +45,7 @@ extern efs_block_t efs_map_block(struct inode *, efs_block_t); | |||
45 | extern int efs_get_block(struct inode *, sector_t, struct buffer_head *, int); | 45 | extern int efs_get_block(struct inode *, sector_t, struct buffer_head *, int); |
46 | 46 | ||
47 | extern struct dentry *efs_lookup(struct inode *, struct dentry *, struct nameidata *); | 47 | extern struct dentry *efs_lookup(struct inode *, struct dentry *, struct nameidata *); |
48 | extern struct dentry *efs_get_dentry(struct super_block *sb, void *vobjp); | ||
48 | extern struct dentry *efs_get_parent(struct dentry *); | 49 | extern struct dentry *efs_get_parent(struct dentry *); |
49 | extern int efs_bmap(struct inode *, int); | 50 | extern int efs_bmap(struct inode *, int); |
50 | 51 | ||
diff --git a/include/linux/elfnote.h b/include/linux/elfnote.h index 9a1e0674e56c..e831759b2fb5 100644 --- a/include/linux/elfnote.h +++ b/include/linux/elfnote.h | |||
@@ -38,17 +38,25 @@ | |||
38 | * e.g. ELFNOTE(XYZCo, 42, .asciz, "forty-two") | 38 | * e.g. ELFNOTE(XYZCo, 42, .asciz, "forty-two") |
39 | * ELFNOTE(XYZCo, 12, .long, 0xdeadbeef) | 39 | * ELFNOTE(XYZCo, 12, .long, 0xdeadbeef) |
40 | */ | 40 | */ |
41 | #define ELFNOTE(name, type, desctype, descdata) \ | 41 | #define ELFNOTE_START(name, type, flags) \ |
42 | .pushsection .note.name, "",@note ; \ | 42 | .pushsection .note.name, flags,@note ; \ |
43 | .align 4 ; \ | 43 | .balign 4 ; \ |
44 | .long 2f - 1f /* namesz */ ; \ | 44 | .long 2f - 1f /* namesz */ ; \ |
45 | .long 4f - 3f /* descsz */ ; \ | 45 | .long 4484f - 3f /* descsz */ ; \ |
46 | .long type ; \ | 46 | .long type ; \ |
47 | 1:.asciz #name ; \ | 47 | 1:.asciz #name ; \ |
48 | 2:.align 4 ; \ | 48 | 2:.balign 4 ; \ |
49 | 3:desctype descdata ; \ | 49 | 3: |
50 | 4:.align 4 ; \ | 50 | |
51 | #define ELFNOTE_END \ | ||
52 | 4484:.balign 4 ; \ | ||
51 | .popsection ; | 53 | .popsection ; |
54 | |||
55 | #define ELFNOTE(name, type, desc) \ | ||
56 | ELFNOTE_START(name, type, "") \ | ||
57 | desc ; \ | ||
58 | ELFNOTE_END | ||
59 | |||
52 | #else /* !__ASSEMBLER__ */ | 60 | #else /* !__ASSEMBLER__ */ |
53 | #include <linux/elf.h> | 61 | #include <linux/elf.h> |
54 | /* | 62 | /* |
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h new file mode 100644 index 000000000000..8872fe8392d6 --- /dev/null +++ b/include/linux/exportfs.h | |||
@@ -0,0 +1,126 @@ | |||
1 | #ifndef LINUX_EXPORTFS_H | ||
2 | #define LINUX_EXPORTFS_H 1 | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | struct dentry; | ||
7 | struct super_block; | ||
8 | struct vfsmount; | ||
9 | |||
10 | |||
11 | /** | ||
12 | * struct export_operations - for nfsd to communicate with file systems | ||
13 | * @decode_fh: decode a file handle fragment and return a &struct dentry | ||
14 | * @encode_fh: encode a file handle fragment from a dentry | ||
15 | * @get_name: find the name for a given inode in a given directory | ||
16 | * @get_parent: find the parent of a given directory | ||
17 | * @get_dentry: find a dentry for the inode given a file handle sub-fragment | ||
18 | * @find_exported_dentry: | ||
19 | * set by the exporting module to a standard helper function. | ||
20 | * | ||
21 | * Description: | ||
22 | * The export_operations structure provides a means for nfsd to communicate | ||
23 | * with a particular exported file system - particularly enabling nfsd and | ||
24 | * the filesystem to co-operate when dealing with file handles. | ||
25 | * | ||
26 | * export_operations contains two basic operation for dealing with file | ||
27 | * handles, decode_fh() and encode_fh(), and allows for some other | ||
28 | * operations to be defined which standard helper routines use to get | ||
29 | * specific information from the filesystem. | ||
30 | * | ||
31 | * nfsd encodes information use to determine which filesystem a filehandle | ||
32 | * applies to in the initial part of the file handle. The remainder, termed | ||
33 | * a file handle fragment, is controlled completely by the filesystem. The | ||
34 | * standard helper routines assume that this fragment will contain one or | ||
35 | * two sub-fragments, one which identifies the file, and one which may be | ||
36 | * used to identify the (a) directory containing the file. | ||
37 | * | ||
38 | * In some situations, nfsd needs to get a dentry which is connected into a | ||
39 | * specific part of the file tree. To allow for this, it passes the | ||
40 | * function acceptable() together with a @context which can be used to see | ||
41 | * if the dentry is acceptable. As there can be multiple dentrys for a | ||
42 | * given file, the filesystem should check each one for acceptability before | ||
43 | * looking for the next. As soon as an acceptable one is found, it should | ||
44 | * be returned. | ||
45 | * | ||
46 | * decode_fh: | ||
47 | * @decode_fh is given a &struct super_block (@sb), a file handle fragment | ||
48 | * (@fh, @fh_len) and an acceptability testing function (@acceptable, | ||
49 | * @context). It should return a &struct dentry which refers to the same | ||
50 | * file that the file handle fragment refers to, and which passes the | ||
51 | * acceptability test. If it cannot, it should return a %NULL pointer if | ||
52 | * the file was found but no acceptable &dentries were available, or a | ||
53 | * %ERR_PTR error code indicating why it couldn't be found (e.g. %ENOENT or | ||
54 | * %ENOMEM). | ||
55 | * | ||
56 | * encode_fh: | ||
57 | * @encode_fh should store in the file handle fragment @fh (using at most | ||
58 | * @max_len bytes) information that can be used by @decode_fh to recover the | ||
59 | * file refered to by the &struct dentry @de. If the @connectable flag is | ||
60 | * set, the encode_fh() should store sufficient information so that a good | ||
61 | * attempt can be made to find not only the file but also it's place in the | ||
62 | * filesystem. This typically means storing a reference to de->d_parent in | ||
63 | * the filehandle fragment. encode_fh() should return the number of bytes | ||
64 | * stored or a negative error code such as %-ENOSPC | ||
65 | * | ||
66 | * get_name: | ||
67 | * @get_name should find a name for the given @child in the given @parent | ||
68 | * directory. The name should be stored in the @name (with the | ||
69 | * understanding that it is already pointing to a a %NAME_MAX+1 sized | ||
70 | * buffer. get_name() should return %0 on success, a negative error code | ||
71 | * or error. @get_name will be called without @parent->i_mutex held. | ||
72 | * | ||
73 | * get_parent: | ||
74 | * @get_parent should find the parent directory for the given @child which | ||
75 | * is also a directory. In the event that it cannot be found, or storage | ||
76 | * space cannot be allocated, a %ERR_PTR should be returned. | ||
77 | * | ||
78 | * get_dentry: | ||
79 | * Given a &super_block (@sb) and a pointer to a file-system specific inode | ||
80 | * identifier, possibly an inode number, (@inump) get_dentry() should find | ||
81 | * the identified inode and return a dentry for that inode. Any suitable | ||
82 | * dentry can be returned including, if necessary, a new dentry created with | ||
83 | * d_alloc_root. The caller can then find any other extant dentrys by | ||
84 | * following the d_alias links. If a new dentry was created using | ||
85 | * d_alloc_root, DCACHE_NFSD_DISCONNECTED should be set, and the dentry | ||
86 | * should be d_rehash()ed. | ||
87 | * | ||
88 | * If the inode cannot be found, either a %NULL pointer or an %ERR_PTR code | ||
89 | * can be returned. The @inump will be whatever was passed to | ||
90 | * nfsd_find_fh_dentry() in either the @obj or @parent parameters. | ||
91 | * | ||
92 | * Locking rules: | ||
93 | * get_parent is called with child->d_inode->i_mutex down | ||
94 | * get_name is not (which is possibly inconsistent) | ||
95 | */ | ||
96 | |||
97 | struct export_operations { | ||
98 | struct dentry *(*decode_fh)(struct super_block *sb, __u32 *fh, | ||
99 | int fh_len, int fh_type, | ||
100 | int (*acceptable)(void *context, struct dentry *de), | ||
101 | void *context); | ||
102 | int (*encode_fh)(struct dentry *de, __u32 *fh, int *max_len, | ||
103 | int connectable); | ||
104 | int (*get_name)(struct dentry *parent, char *name, | ||
105 | struct dentry *child); | ||
106 | struct dentry * (*get_parent)(struct dentry *child); | ||
107 | struct dentry * (*get_dentry)(struct super_block *sb, void *inump); | ||
108 | |||
109 | /* This is set by the exporting module to a standard helper */ | ||
110 | struct dentry * (*find_exported_dentry)( | ||
111 | struct super_block *sb, void *obj, void *parent, | ||
112 | int (*acceptable)(void *context, struct dentry *de), | ||
113 | void *context); | ||
114 | }; | ||
115 | |||
116 | extern struct dentry *find_exported_dentry(struct super_block *sb, void *obj, | ||
117 | void *parent, int (*acceptable)(void *context, struct dentry *de), | ||
118 | void *context); | ||
119 | |||
120 | extern int exportfs_encode_fh(struct dentry *dentry, __u32 *fh, int *max_len, | ||
121 | int connectable); | ||
122 | extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, __u32 *fh, | ||
123 | int fh_len, int fileid_type, int (*acceptable)(void *, struct dentry *), | ||
124 | void *context); | ||
125 | |||
126 | #endif /* LINUX_EXPORTFS_H */ | ||
diff --git a/include/linux/ext4_fs.h b/include/linux/ext4_fs.h index de1f9f78625a..cdee7aaa57aa 100644 --- a/include/linux/ext4_fs.h +++ b/include/linux/ext4_fs.h | |||
@@ -71,7 +71,7 @@ | |||
71 | /* | 71 | /* |
72 | * Maximal count of links to a file | 72 | * Maximal count of links to a file |
73 | */ | 73 | */ |
74 | #define EXT4_LINK_MAX 32000 | 74 | #define EXT4_LINK_MAX 65000 |
75 | 75 | ||
76 | /* | 76 | /* |
77 | * Macro-instructions used to manage several block sizes | 77 | * Macro-instructions used to manage several block sizes |
@@ -102,6 +102,7 @@ | |||
102 | EXT4_GOOD_OLD_FIRST_INO : \ | 102 | EXT4_GOOD_OLD_FIRST_INO : \ |
103 | (s)->s_first_ino) | 103 | (s)->s_first_ino) |
104 | #endif | 104 | #endif |
105 | #define EXT4_BLOCK_ALIGN(size, blkbits) ALIGN((size), (1 << (blkbits))) | ||
105 | 106 | ||
106 | /* | 107 | /* |
107 | * Macro-instructions used to manage fragments | 108 | * Macro-instructions used to manage fragments |
@@ -201,6 +202,7 @@ struct ext4_group_desc | |||
201 | #define EXT4_STATE_JDATA 0x00000001 /* journaled data exists */ | 202 | #define EXT4_STATE_JDATA 0x00000001 /* journaled data exists */ |
202 | #define EXT4_STATE_NEW 0x00000002 /* inode is newly created */ | 203 | #define EXT4_STATE_NEW 0x00000002 /* inode is newly created */ |
203 | #define EXT4_STATE_XATTR 0x00000004 /* has in-inode xattrs */ | 204 | #define EXT4_STATE_XATTR 0x00000004 /* has in-inode xattrs */ |
205 | #define EXT4_STATE_NO_EXPAND 0x00000008 /* No space for expansion */ | ||
204 | 206 | ||
205 | /* Used to pass group descriptor data when online resize is done */ | 207 | /* Used to pass group descriptor data when online resize is done */ |
206 | struct ext4_new_group_input { | 208 | struct ext4_new_group_input { |
@@ -225,6 +227,11 @@ struct ext4_new_group_data { | |||
225 | __u32 free_blocks_count; | 227 | __u32 free_blocks_count; |
226 | }; | 228 | }; |
227 | 229 | ||
230 | /* | ||
231 | * Following is used by preallocation code to tell get_blocks() that we | ||
232 | * want uninitialzed extents. | ||
233 | */ | ||
234 | #define EXT4_CREATE_UNINITIALIZED_EXT 2 | ||
228 | 235 | ||
229 | /* | 236 | /* |
230 | * ioctl commands | 237 | * ioctl commands |
@@ -237,7 +244,7 @@ struct ext4_new_group_data { | |||
237 | #define EXT4_IOC_GROUP_ADD _IOW('f', 8,struct ext4_new_group_input) | 244 | #define EXT4_IOC_GROUP_ADD _IOW('f', 8,struct ext4_new_group_input) |
238 | #define EXT4_IOC_GETVERSION_OLD FS_IOC_GETVERSION | 245 | #define EXT4_IOC_GETVERSION_OLD FS_IOC_GETVERSION |
239 | #define EXT4_IOC_SETVERSION_OLD FS_IOC_SETVERSION | 246 | #define EXT4_IOC_SETVERSION_OLD FS_IOC_SETVERSION |
240 | #ifdef CONFIG_JBD_DEBUG | 247 | #ifdef CONFIG_JBD2_DEBUG |
241 | #define EXT4_IOC_WAIT_FOR_READONLY _IOR('f', 99, long) | 248 | #define EXT4_IOC_WAIT_FOR_READONLY _IOR('f', 99, long) |
242 | #endif | 249 | #endif |
243 | #define EXT4_IOC_GETRSVSZ _IOR('f', 5, long) | 250 | #define EXT4_IOC_GETRSVSZ _IOR('f', 5, long) |
@@ -253,7 +260,7 @@ struct ext4_new_group_data { | |||
253 | #define EXT4_IOC32_GETRSVSZ _IOR('f', 5, int) | 260 | #define EXT4_IOC32_GETRSVSZ _IOR('f', 5, int) |
254 | #define EXT4_IOC32_SETRSVSZ _IOW('f', 6, int) | 261 | #define EXT4_IOC32_SETRSVSZ _IOW('f', 6, int) |
255 | #define EXT4_IOC32_GROUP_EXTEND _IOW('f', 7, unsigned int) | 262 | #define EXT4_IOC32_GROUP_EXTEND _IOW('f', 7, unsigned int) |
256 | #ifdef CONFIG_JBD_DEBUG | 263 | #ifdef CONFIG_JBD2_DEBUG |
257 | #define EXT4_IOC32_WAIT_FOR_READONLY _IOR('f', 99, int) | 264 | #define EXT4_IOC32_WAIT_FOR_READONLY _IOR('f', 99, int) |
258 | #endif | 265 | #endif |
259 | #define EXT4_IOC32_GETVERSION_OLD FS_IOC32_GETVERSION | 266 | #define EXT4_IOC32_GETVERSION_OLD FS_IOC32_GETVERSION |
@@ -282,7 +289,7 @@ struct ext4_inode { | |||
282 | __le16 i_uid; /* Low 16 bits of Owner Uid */ | 289 | __le16 i_uid; /* Low 16 bits of Owner Uid */ |
283 | __le32 i_size; /* Size in bytes */ | 290 | __le32 i_size; /* Size in bytes */ |
284 | __le32 i_atime; /* Access time */ | 291 | __le32 i_atime; /* Access time */ |
285 | __le32 i_ctime; /* Creation time */ | 292 | __le32 i_ctime; /* Inode Change time */ |
286 | __le32 i_mtime; /* Modification time */ | 293 | __le32 i_mtime; /* Modification time */ |
287 | __le32 i_dtime; /* Deletion Time */ | 294 | __le32 i_dtime; /* Deletion Time */ |
288 | __le16 i_gid; /* Low 16 bits of Group Id */ | 295 | __le16 i_gid; /* Low 16 bits of Group Id */ |
@@ -331,10 +338,85 @@ struct ext4_inode { | |||
331 | } osd2; /* OS dependent 2 */ | 338 | } osd2; /* OS dependent 2 */ |
332 | __le16 i_extra_isize; | 339 | __le16 i_extra_isize; |
333 | __le16 i_pad1; | 340 | __le16 i_pad1; |
341 | __le32 i_ctime_extra; /* extra Change time (nsec << 2 | epoch) */ | ||
342 | __le32 i_mtime_extra; /* extra Modification time(nsec << 2 | epoch) */ | ||
343 | __le32 i_atime_extra; /* extra Access time (nsec << 2 | epoch) */ | ||
344 | __le32 i_crtime; /* File Creation time */ | ||
345 | __le32 i_crtime_extra; /* extra FileCreationtime (nsec << 2 | epoch) */ | ||
334 | }; | 346 | }; |
335 | 347 | ||
336 | #define i_size_high i_dir_acl | 348 | #define i_size_high i_dir_acl |
337 | 349 | ||
350 | #define EXT4_EPOCH_BITS 2 | ||
351 | #define EXT4_EPOCH_MASK ((1 << EXT4_EPOCH_BITS) - 1) | ||
352 | #define EXT4_NSEC_MASK (~0UL << EXT4_EPOCH_BITS) | ||
353 | |||
354 | /* | ||
355 | * Extended fields will fit into an inode if the filesystem was formatted | ||
356 | * with large inodes (-I 256 or larger) and there are not currently any EAs | ||
357 | * consuming all of the available space. For new inodes we always reserve | ||
358 | * enough space for the kernel's known extended fields, but for inodes | ||
359 | * created with an old kernel this might not have been the case. None of | ||
360 | * the extended inode fields is critical for correct filesystem operation. | ||
361 | * This macro checks if a certain field fits in the inode. Note that | ||
362 | * inode-size = GOOD_OLD_INODE_SIZE + i_extra_isize | ||
363 | */ | ||
364 | #define EXT4_FITS_IN_INODE(ext4_inode, einode, field) \ | ||
365 | ((offsetof(typeof(*ext4_inode), field) + \ | ||
366 | sizeof((ext4_inode)->field)) \ | ||
367 | <= (EXT4_GOOD_OLD_INODE_SIZE + \ | ||
368 | (einode)->i_extra_isize)) \ | ||
369 | |||
370 | static inline __le32 ext4_encode_extra_time(struct timespec *time) | ||
371 | { | ||
372 | return cpu_to_le32((sizeof(time->tv_sec) > 4 ? | ||
373 | time->tv_sec >> 32 : 0) | | ||
374 | ((time->tv_nsec << 2) & EXT4_NSEC_MASK)); | ||
375 | } | ||
376 | |||
377 | static inline void ext4_decode_extra_time(struct timespec *time, __le32 extra) | ||
378 | { | ||
379 | if (sizeof(time->tv_sec) > 4) | ||
380 | time->tv_sec |= (__u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK) | ||
381 | << 32; | ||
382 | time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> 2; | ||
383 | } | ||
384 | |||
385 | #define EXT4_INODE_SET_XTIME(xtime, inode, raw_inode) \ | ||
386 | do { \ | ||
387 | (raw_inode)->xtime = cpu_to_le32((inode)->xtime.tv_sec); \ | ||
388 | if (EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra)) \ | ||
389 | (raw_inode)->xtime ## _extra = \ | ||
390 | ext4_encode_extra_time(&(inode)->xtime); \ | ||
391 | } while (0) | ||
392 | |||
393 | #define EXT4_EINODE_SET_XTIME(xtime, einode, raw_inode) \ | ||
394 | do { \ | ||
395 | if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \ | ||
396 | (raw_inode)->xtime = cpu_to_le32((einode)->xtime.tv_sec); \ | ||
397 | if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \ | ||
398 | (raw_inode)->xtime ## _extra = \ | ||
399 | ext4_encode_extra_time(&(einode)->xtime); \ | ||
400 | } while (0) | ||
401 | |||
402 | #define EXT4_INODE_GET_XTIME(xtime, inode, raw_inode) \ | ||
403 | do { \ | ||
404 | (inode)->xtime.tv_sec = (signed)le32_to_cpu((raw_inode)->xtime); \ | ||
405 | if (EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra)) \ | ||
406 | ext4_decode_extra_time(&(inode)->xtime, \ | ||
407 | raw_inode->xtime ## _extra); \ | ||
408 | } while (0) | ||
409 | |||
410 | #define EXT4_EINODE_GET_XTIME(xtime, einode, raw_inode) \ | ||
411 | do { \ | ||
412 | if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \ | ||
413 | (einode)->xtime.tv_sec = \ | ||
414 | (signed)le32_to_cpu((raw_inode)->xtime); \ | ||
415 | if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \ | ||
416 | ext4_decode_extra_time(&(einode)->xtime, \ | ||
417 | raw_inode->xtime ## _extra); \ | ||
418 | } while (0) | ||
419 | |||
338 | #if defined(__KERNEL__) || defined(__linux__) | 420 | #if defined(__KERNEL__) || defined(__linux__) |
339 | #define i_reserved1 osd1.linux1.l_i_reserved1 | 421 | #define i_reserved1 osd1.linux1.l_i_reserved1 |
340 | #define i_frag osd2.linux2.l_i_frag | 422 | #define i_frag osd2.linux2.l_i_frag |
@@ -533,6 +615,13 @@ static inline struct ext4_inode_info *EXT4_I(struct inode *inode) | |||
533 | return container_of(inode, struct ext4_inode_info, vfs_inode); | 615 | return container_of(inode, struct ext4_inode_info, vfs_inode); |
534 | } | 616 | } |
535 | 617 | ||
618 | static inline struct timespec ext4_current_time(struct inode *inode) | ||
619 | { | ||
620 | return (inode->i_sb->s_time_gran < NSEC_PER_SEC) ? | ||
621 | current_fs_time(inode->i_sb) : CURRENT_TIME_SEC; | ||
622 | } | ||
623 | |||
624 | |||
536 | static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) | 625 | static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) |
537 | { | 626 | { |
538 | return ino == EXT4_ROOT_INO || | 627 | return ino == EXT4_ROOT_INO || |
@@ -603,6 +692,8 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) | |||
603 | #define EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001 | 692 | #define EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001 |
604 | #define EXT4_FEATURE_RO_COMPAT_LARGE_FILE 0x0002 | 693 | #define EXT4_FEATURE_RO_COMPAT_LARGE_FILE 0x0002 |
605 | #define EXT4_FEATURE_RO_COMPAT_BTREE_DIR 0x0004 | 694 | #define EXT4_FEATURE_RO_COMPAT_BTREE_DIR 0x0004 |
695 | #define EXT4_FEATURE_RO_COMPAT_DIR_NLINK 0x0020 | ||
696 | #define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE 0x0040 | ||
606 | 697 | ||
607 | #define EXT4_FEATURE_INCOMPAT_COMPRESSION 0x0001 | 698 | #define EXT4_FEATURE_INCOMPAT_COMPRESSION 0x0001 |
608 | #define EXT4_FEATURE_INCOMPAT_FILETYPE 0x0002 | 699 | #define EXT4_FEATURE_INCOMPAT_FILETYPE 0x0002 |
@@ -620,6 +711,8 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) | |||
620 | EXT4_FEATURE_INCOMPAT_64BIT) | 711 | EXT4_FEATURE_INCOMPAT_64BIT) |
621 | #define EXT4_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \ | 712 | #define EXT4_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \ |
622 | EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \ | 713 | EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \ |
714 | EXT4_FEATURE_RO_COMPAT_DIR_NLINK | \ | ||
715 | EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE | \ | ||
623 | EXT4_FEATURE_RO_COMPAT_BTREE_DIR) | 716 | EXT4_FEATURE_RO_COMPAT_BTREE_DIR) |
624 | 717 | ||
625 | /* | 718 | /* |
@@ -862,6 +955,7 @@ extern int ext4_change_inode_journal_flag(struct inode *, int); | |||
862 | extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *); | 955 | extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *); |
863 | extern void ext4_truncate (struct inode *); | 956 | extern void ext4_truncate (struct inode *); |
864 | extern void ext4_set_inode_flags(struct inode *); | 957 | extern void ext4_set_inode_flags(struct inode *); |
958 | extern void ext4_get_inode_flags(struct ext4_inode_info *); | ||
865 | extern void ext4_set_aops(struct inode *inode); | 959 | extern void ext4_set_aops(struct inode *inode); |
866 | extern int ext4_writepage_trans_blocks(struct inode *); | 960 | extern int ext4_writepage_trans_blocks(struct inode *); |
867 | extern int ext4_block_truncate_page(handle_t *handle, struct page *page, | 961 | extern int ext4_block_truncate_page(handle_t *handle, struct page *page, |
@@ -983,6 +1077,8 @@ extern int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
983 | extern void ext4_ext_truncate(struct inode *, struct page *); | 1077 | extern void ext4_ext_truncate(struct inode *, struct page *); |
984 | extern void ext4_ext_init(struct super_block *); | 1078 | extern void ext4_ext_init(struct super_block *); |
985 | extern void ext4_ext_release(struct super_block *); | 1079 | extern void ext4_ext_release(struct super_block *); |
1080 | extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset, | ||
1081 | loff_t len); | ||
986 | static inline int | 1082 | static inline int |
987 | ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block, | 1083 | ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block, |
988 | unsigned long max_blocks, struct buffer_head *bh, | 1084 | unsigned long max_blocks, struct buffer_head *bh, |
diff --git a/include/linux/ext4_fs_extents.h b/include/linux/ext4_fs_extents.h index acfe59740b03..81406f3655d4 100644 --- a/include/linux/ext4_fs_extents.h +++ b/include/linux/ext4_fs_extents.h | |||
@@ -141,7 +141,25 @@ typedef int (*ext_prepare_callback)(struct inode *, struct ext4_ext_path *, | |||
141 | 141 | ||
142 | #define EXT_MAX_BLOCK 0xffffffff | 142 | #define EXT_MAX_BLOCK 0xffffffff |
143 | 143 | ||
144 | #define EXT_MAX_LEN ((1UL << 15) - 1) | 144 | /* |
145 | * EXT_INIT_MAX_LEN is the maximum number of blocks we can have in an | ||
146 | * initialized extent. This is 2^15 and not (2^16 - 1), since we use the | ||
147 | * MSB of ee_len field in the extent datastructure to signify if this | ||
148 | * particular extent is an initialized extent or an uninitialized (i.e. | ||
149 | * preallocated). | ||
150 | * EXT_UNINIT_MAX_LEN is the maximum number of blocks we can have in an | ||
151 | * uninitialized extent. | ||
152 | * If ee_len is <= 0x8000, it is an initialized extent. Otherwise, it is an | ||
153 | * uninitialized one. In other words, if MSB of ee_len is set, it is an | ||
154 | * uninitialized extent with only one special scenario when ee_len = 0x8000. | ||
155 | * In this case we can not have an uninitialized extent of zero length and | ||
156 | * thus we make it as a special case of initialized extent with 0x8000 length. | ||
157 | * This way we get better extent-to-group alignment for initialized extents. | ||
158 | * Hence, the maximum number of blocks we can have in an *initialized* | ||
159 | * extent is 2^15 (32768) and in an *uninitialized* extent is 2^15-1 (32767). | ||
160 | */ | ||
161 | #define EXT_INIT_MAX_LEN (1UL << 15) | ||
162 | #define EXT_UNINIT_MAX_LEN (EXT_INIT_MAX_LEN - 1) | ||
145 | 163 | ||
146 | 164 | ||
147 | #define EXT_FIRST_EXTENT(__hdr__) \ | 165 | #define EXT_FIRST_EXTENT(__hdr__) \ |
@@ -188,8 +206,31 @@ ext4_ext_invalidate_cache(struct inode *inode) | |||
188 | EXT4_I(inode)->i_cached_extent.ec_type = EXT4_EXT_CACHE_NO; | 206 | EXT4_I(inode)->i_cached_extent.ec_type = EXT4_EXT_CACHE_NO; |
189 | } | 207 | } |
190 | 208 | ||
209 | static inline void ext4_ext_mark_uninitialized(struct ext4_extent *ext) | ||
210 | { | ||
211 | /* We can not have an uninitialized extent of zero length! */ | ||
212 | BUG_ON((le16_to_cpu(ext->ee_len) & ~EXT_INIT_MAX_LEN) == 0); | ||
213 | ext->ee_len |= cpu_to_le16(EXT_INIT_MAX_LEN); | ||
214 | } | ||
215 | |||
216 | static inline int ext4_ext_is_uninitialized(struct ext4_extent *ext) | ||
217 | { | ||
218 | /* Extent with ee_len of 0x8000 is treated as an initialized extent */ | ||
219 | return (le16_to_cpu(ext->ee_len) > EXT_INIT_MAX_LEN); | ||
220 | } | ||
221 | |||
222 | static inline int ext4_ext_get_actual_len(struct ext4_extent *ext) | ||
223 | { | ||
224 | return (le16_to_cpu(ext->ee_len) <= EXT_INIT_MAX_LEN ? | ||
225 | le16_to_cpu(ext->ee_len) : | ||
226 | (le16_to_cpu(ext->ee_len) - EXT_INIT_MAX_LEN)); | ||
227 | } | ||
228 | |||
191 | extern int ext4_extent_tree_init(handle_t *, struct inode *); | 229 | extern int ext4_extent_tree_init(handle_t *, struct inode *); |
192 | extern int ext4_ext_calc_credits_for_insert(struct inode *, struct ext4_ext_path *); | 230 | extern int ext4_ext_calc_credits_for_insert(struct inode *, struct ext4_ext_path *); |
231 | extern int ext4_ext_try_to_merge(struct inode *inode, | ||
232 | struct ext4_ext_path *path, | ||
233 | struct ext4_extent *); | ||
193 | extern unsigned int ext4_ext_check_overlap(struct inode *, struct ext4_extent *, struct ext4_ext_path *); | 234 | extern unsigned int ext4_ext_check_overlap(struct inode *, struct ext4_extent *, struct ext4_ext_path *); |
194 | extern int ext4_ext_insert_extent(handle_t *, struct inode *, struct ext4_ext_path *, struct ext4_extent *); | 235 | extern int ext4_ext_insert_extent(handle_t *, struct inode *, struct ext4_ext_path *, struct ext4_extent *); |
195 | extern int ext4_ext_walk_space(struct inode *, unsigned long, unsigned long, ext_prepare_callback, void *); | 236 | extern int ext4_ext_walk_space(struct inode *, unsigned long, unsigned long, ext_prepare_callback, void *); |
diff --git a/include/linux/ext4_fs_i.h b/include/linux/ext4_fs_i.h index 9de494406995..1a511e9905aa 100644 --- a/include/linux/ext4_fs_i.h +++ b/include/linux/ext4_fs_i.h | |||
@@ -153,6 +153,11 @@ struct ext4_inode_info { | |||
153 | 153 | ||
154 | unsigned long i_ext_generation; | 154 | unsigned long i_ext_generation; |
155 | struct ext4_ext_cache i_cached_extent; | 155 | struct ext4_ext_cache i_cached_extent; |
156 | /* | ||
157 | * File creation time. Its function is same as that of | ||
158 | * struct timespec i_{a,c,m}time in the generic inode. | ||
159 | */ | ||
160 | struct timespec i_crtime; | ||
156 | }; | 161 | }; |
157 | 162 | ||
158 | #endif /* _LINUX_EXT4_FS_I */ | 163 | #endif /* _LINUX_EXT4_FS_I */ |
diff --git a/include/linux/ext4_fs_sb.h b/include/linux/ext4_fs_sb.h index 2347557a327a..1b2ffee12be9 100644 --- a/include/linux/ext4_fs_sb.h +++ b/include/linux/ext4_fs_sb.h | |||
@@ -73,7 +73,7 @@ struct ext4_sb_info { | |||
73 | struct list_head s_orphan; | 73 | struct list_head s_orphan; |
74 | unsigned long s_commit_interval; | 74 | unsigned long s_commit_interval; |
75 | struct block_device *journal_bdev; | 75 | struct block_device *journal_bdev; |
76 | #ifdef CONFIG_JBD_DEBUG | 76 | #ifdef CONFIG_JBD2_DEBUG |
77 | struct timer_list turn_ro_timer; /* For turning read-only (crash simulation) */ | 77 | struct timer_list turn_ro_timer; /* For turning read-only (crash simulation) */ |
78 | wait_queue_head_t ro_wait_queue; /* For people waiting for the fs to go read-only */ | 78 | wait_queue_head_t ro_wait_queue; /* For people waiting for the fs to go read-only */ |
79 | #endif | 79 | #endif |
@@ -81,6 +81,7 @@ struct ext4_sb_info { | |||
81 | char *s_qf_names[MAXQUOTAS]; /* Names of quota files with journalled quota */ | 81 | char *s_qf_names[MAXQUOTAS]; /* Names of quota files with journalled quota */ |
82 | int s_jquota_fmt; /* Format of quota to use */ | 82 | int s_jquota_fmt; /* Format of quota to use */ |
83 | #endif | 83 | #endif |
84 | unsigned int s_want_extra_isize; /* New inodes should reserve # bytes */ | ||
84 | 85 | ||
85 | #ifdef EXTENTS_STATS | 86 | #ifdef EXTENTS_STATS |
86 | /* ext4 extents stats */ | 87 | /* ext4 extents stats */ |
diff --git a/include/linux/falloc.h b/include/linux/falloc.h new file mode 100644 index 000000000000..8e912ab6a072 --- /dev/null +++ b/include/linux/falloc.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _FALLOC_H_ | ||
2 | #define _FALLOC_H_ | ||
3 | |||
4 | #define FALLOC_FL_KEEP_SIZE 0x01 /* default is extend size */ | ||
5 | |||
6 | #endif /* _FALLOC_H_ */ | ||
diff --git a/include/linux/fb.h b/include/linux/fb.h index 66226824ab68..cec54106aa87 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h | |||
@@ -119,6 +119,7 @@ struct dentry; | |||
119 | #define FB_ACCEL_NV_40 46 /* nVidia Arch 40 */ | 119 | #define FB_ACCEL_NV_40 46 /* nVidia Arch 40 */ |
120 | #define FB_ACCEL_XGI_VOLARI_V 47 /* XGI Volari V3XT, V5, V8 */ | 120 | #define FB_ACCEL_XGI_VOLARI_V 47 /* XGI Volari V3XT, V5, V8 */ |
121 | #define FB_ACCEL_XGI_VOLARI_Z 48 /* XGI Volari Z7 */ | 121 | #define FB_ACCEL_XGI_VOLARI_Z 48 /* XGI Volari Z7 */ |
122 | #define FB_ACCEL_OMAP1610 49 /* TI OMAP16xx */ | ||
122 | #define FB_ACCEL_NEOMAGIC_NM2070 90 /* NeoMagic NM2070 */ | 123 | #define FB_ACCEL_NEOMAGIC_NM2070 90 /* NeoMagic NM2070 */ |
123 | #define FB_ACCEL_NEOMAGIC_NM2090 91 /* NeoMagic NM2090 */ | 124 | #define FB_ACCEL_NEOMAGIC_NM2090 91 /* NeoMagic NM2090 */ |
124 | #define FB_ACCEL_NEOMAGIC_NM2093 92 /* NeoMagic NM2093 */ | 125 | #define FB_ACCEL_NEOMAGIC_NM2093 92 /* NeoMagic NM2093 */ |
@@ -529,6 +530,8 @@ struct fb_cursor_user { | |||
529 | #define FB_EVENT_CONBLANK 0x0C | 530 | #define FB_EVENT_CONBLANK 0x0C |
530 | /* Get drawing requirements */ | 531 | /* Get drawing requirements */ |
531 | #define FB_EVENT_GET_REQ 0x0D | 532 | #define FB_EVENT_GET_REQ 0x0D |
533 | /* Unbind from the console if possible */ | ||
534 | #define FB_EVENT_FB_UNBIND 0x0E | ||
532 | 535 | ||
533 | struct fb_event { | 536 | struct fb_event { |
534 | struct fb_info *info; | 537 | struct fb_info *info; |
diff --git a/include/linux/freezer.h b/include/linux/freezer.h index 4631086f5060..c8e02de737f6 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h | |||
@@ -1,5 +1,8 @@ | |||
1 | /* Freezer declarations */ | 1 | /* Freezer declarations */ |
2 | 2 | ||
3 | #ifndef FREEZER_H_INCLUDED | ||
4 | #define FREEZER_H_INCLUDED | ||
5 | |||
3 | #include <linux/sched.h> | 6 | #include <linux/sched.h> |
4 | 7 | ||
5 | #ifdef CONFIG_PM | 8 | #ifdef CONFIG_PM |
@@ -22,7 +25,7 @@ static inline int freezing(struct task_struct *p) | |||
22 | /* | 25 | /* |
23 | * Request that a process be frozen | 26 | * Request that a process be frozen |
24 | */ | 27 | */ |
25 | static inline void freeze(struct task_struct *p) | 28 | static inline void set_freeze_flag(struct task_struct *p) |
26 | { | 29 | { |
27 | set_tsk_thread_flag(p, TIF_FREEZE); | 30 | set_tsk_thread_flag(p, TIF_FREEZE); |
28 | } | 31 | } |
@@ -30,7 +33,7 @@ static inline void freeze(struct task_struct *p) | |||
30 | /* | 33 | /* |
31 | * Sometimes we may need to cancel the previous 'freeze' request | 34 | * Sometimes we may need to cancel the previous 'freeze' request |
32 | */ | 35 | */ |
33 | static inline void do_not_freeze(struct task_struct *p) | 36 | static inline void clear_freeze_flag(struct task_struct *p) |
34 | { | 37 | { |
35 | clear_tsk_thread_flag(p, TIF_FREEZE); | 38 | clear_tsk_thread_flag(p, TIF_FREEZE); |
36 | } | 39 | } |
@@ -53,7 +56,7 @@ static inline int thaw_process(struct task_struct *p) | |||
53 | wake_up_process(p); | 56 | wake_up_process(p); |
54 | return 1; | 57 | return 1; |
55 | } | 58 | } |
56 | clear_tsk_thread_flag(p, TIF_FREEZE); | 59 | clear_freeze_flag(p); |
57 | task_unlock(p); | 60 | task_unlock(p); |
58 | return 0; | 61 | return 0; |
59 | } | 62 | } |
@@ -115,10 +118,19 @@ static inline int freezer_should_skip(struct task_struct *p) | |||
115 | return !!(p->flags & PF_FREEZER_SKIP); | 118 | return !!(p->flags & PF_FREEZER_SKIP); |
116 | } | 119 | } |
117 | 120 | ||
121 | /* | ||
122 | * Tell the freezer that the current task should be frozen by it | ||
123 | */ | ||
124 | static inline void set_freezable(void) | ||
125 | { | ||
126 | current->flags &= ~PF_NOFREEZE; | ||
127 | } | ||
128 | |||
118 | #else | 129 | #else |
119 | static inline int frozen(struct task_struct *p) { return 0; } | 130 | static inline int frozen(struct task_struct *p) { return 0; } |
120 | static inline int freezing(struct task_struct *p) { return 0; } | 131 | static inline int freezing(struct task_struct *p) { return 0; } |
121 | static inline void freeze(struct task_struct *p) { BUG(); } | 132 | static inline void set_freeze_flag(struct task_struct *p) {} |
133 | static inline void clear_freeze_flag(struct task_struct *p) {} | ||
122 | static inline int thaw_process(struct task_struct *p) { return 1; } | 134 | static inline int thaw_process(struct task_struct *p) { return 1; } |
123 | 135 | ||
124 | static inline void refrigerator(void) {} | 136 | static inline void refrigerator(void) {} |
@@ -130,4 +142,7 @@ static inline int try_to_freeze(void) { return 0; } | |||
130 | static inline void freezer_do_not_count(void) {} | 142 | static inline void freezer_do_not_count(void) {} |
131 | static inline void freezer_count(void) {} | 143 | static inline void freezer_count(void) {} |
132 | static inline int freezer_should_skip(struct task_struct *p) { return 0; } | 144 | static inline int freezer_should_skip(struct task_struct *p) { return 0; } |
145 | static inline void set_freezable(void) {} | ||
133 | #endif | 146 | #endif |
147 | |||
148 | #endif /* FREEZER_H_INCLUDED */ | ||
diff --git a/include/linux/fs.h b/include/linux/fs.h index e68780810279..d33beadd9a43 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -283,11 +283,14 @@ extern int dir_notify_enable; | |||
283 | #include <linux/init.h> | 283 | #include <linux/init.h> |
284 | #include <linux/pid.h> | 284 | #include <linux/pid.h> |
285 | #include <linux/mutex.h> | 285 | #include <linux/mutex.h> |
286 | #include <linux/sysctl.h> | ||
287 | #include <linux/capability.h> | ||
286 | 288 | ||
287 | #include <asm/atomic.h> | 289 | #include <asm/atomic.h> |
288 | #include <asm/semaphore.h> | 290 | #include <asm/semaphore.h> |
289 | #include <asm/byteorder.h> | 291 | #include <asm/byteorder.h> |
290 | 292 | ||
293 | struct export_operations; | ||
291 | struct hd_geometry; | 294 | struct hd_geometry; |
292 | struct iovec; | 295 | struct iovec; |
293 | struct nameidata; | 296 | struct nameidata; |
@@ -694,20 +697,26 @@ struct fown_struct { | |||
694 | * Track a single file's readahead state | 697 | * Track a single file's readahead state |
695 | */ | 698 | */ |
696 | struct file_ra_state { | 699 | struct file_ra_state { |
697 | unsigned long start; /* Current window */ | 700 | pgoff_t start; /* where readahead started */ |
698 | unsigned long size; | 701 | unsigned long size; /* # of readahead pages */ |
699 | unsigned long flags; /* ra flags RA_FLAG_xxx*/ | 702 | unsigned long async_size; /* do asynchronous readahead when |
700 | unsigned long cache_hit; /* cache hit count*/ | 703 | there are only # of pages ahead */ |
701 | unsigned long prev_index; /* Cache last read() position */ | 704 | |
702 | unsigned long ahead_start; /* Ahead window */ | ||
703 | unsigned long ahead_size; | ||
704 | unsigned long ra_pages; /* Maximum readahead window */ | 705 | unsigned long ra_pages; /* Maximum readahead window */ |
705 | unsigned long mmap_hit; /* Cache hit stat for mmap accesses */ | 706 | unsigned long mmap_hit; /* Cache hit stat for mmap accesses */ |
706 | unsigned long mmap_miss; /* Cache miss stat for mmap accesses */ | 707 | unsigned long mmap_miss; /* Cache miss stat for mmap accesses */ |
708 | unsigned long prev_index; /* Cache last read() position */ | ||
707 | unsigned int prev_offset; /* Offset where last read() ended in a page */ | 709 | unsigned int prev_offset; /* Offset where last read() ended in a page */ |
708 | }; | 710 | }; |
709 | #define RA_FLAG_MISS 0x01 /* a cache miss occured against this file */ | 711 | |
710 | #define RA_FLAG_INCACHE 0x02 /* file is already in cache */ | 712 | /* |
713 | * Check if @index falls in the readahead windows. | ||
714 | */ | ||
715 | static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index) | ||
716 | { | ||
717 | return (index >= ra->start && | ||
718 | index < ra->start + ra->size); | ||
719 | } | ||
711 | 720 | ||
712 | struct file { | 721 | struct file { |
713 | /* | 722 | /* |
@@ -859,7 +868,7 @@ extern void locks_init_lock(struct file_lock *); | |||
859 | extern void locks_copy_lock(struct file_lock *, struct file_lock *); | 868 | extern void locks_copy_lock(struct file_lock *, struct file_lock *); |
860 | extern void locks_remove_posix(struct file *, fl_owner_t); | 869 | extern void locks_remove_posix(struct file *, fl_owner_t); |
861 | extern void locks_remove_flock(struct file *); | 870 | extern void locks_remove_flock(struct file *); |
862 | extern int posix_test_lock(struct file *, struct file_lock *); | 871 | extern void posix_test_lock(struct file *, struct file_lock *); |
863 | extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *); | 872 | extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *); |
864 | extern int posix_lock_file_wait(struct file *, struct file_lock *); | 873 | extern int posix_lock_file_wait(struct file *, struct file_lock *); |
865 | extern int posix_unblock_lock(struct file *, struct file_lock *); | 874 | extern int posix_unblock_lock(struct file *, struct file_lock *); |
@@ -870,6 +879,7 @@ extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl); | |||
870 | extern int __break_lease(struct inode *inode, unsigned int flags); | 879 | extern int __break_lease(struct inode *inode, unsigned int flags); |
871 | extern void lease_get_mtime(struct inode *, struct timespec *time); | 880 | extern void lease_get_mtime(struct inode *, struct timespec *time); |
872 | extern int setlease(struct file *, long, struct file_lock **); | 881 | extern int setlease(struct file *, long, struct file_lock **); |
882 | extern int vfs_setlease(struct file *, long, struct file_lock **); | ||
873 | extern int lease_modify(struct file_lock **, int); | 883 | extern int lease_modify(struct file_lock **, int); |
874 | extern int lock_may_read(struct inode *, loff_t start, unsigned long count); | 884 | extern int lock_may_read(struct inode *, loff_t start, unsigned long count); |
875 | extern int lock_may_write(struct inode *, loff_t start, unsigned long count); | 885 | extern int lock_may_write(struct inode *, loff_t start, unsigned long count); |
@@ -988,6 +998,9 @@ enum { | |||
988 | #define put_fs_excl() atomic_dec(¤t->fs_excl) | 998 | #define put_fs_excl() atomic_dec(¤t->fs_excl) |
989 | #define has_fs_excl() atomic_read(¤t->fs_excl) | 999 | #define has_fs_excl() atomic_read(¤t->fs_excl) |
990 | 1000 | ||
1001 | #define is_owner_or_cap(inode) \ | ||
1002 | ((current->fsuid == (inode)->i_uid) || capable(CAP_FOWNER)) | ||
1003 | |||
991 | /* not quite ready to be deprecated, but... */ | 1004 | /* not quite ready to be deprecated, but... */ |
992 | extern void lock_super(struct super_block *); | 1005 | extern void lock_super(struct super_block *); |
993 | extern void unlock_super(struct super_block *); | 1006 | extern void unlock_super(struct super_block *); |
@@ -1116,6 +1129,7 @@ struct file_operations { | |||
1116 | int (*flock) (struct file *, int, struct file_lock *); | 1129 | int (*flock) (struct file *, int, struct file_lock *); |
1117 | ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); | 1130 | ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); |
1118 | ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); | 1131 | ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); |
1132 | int (*setlease)(struct file *, long, struct file_lock **); | ||
1119 | }; | 1133 | }; |
1120 | 1134 | ||
1121 | struct inode_operations { | 1135 | struct inode_operations { |
@@ -1141,6 +1155,8 @@ struct inode_operations { | |||
1141 | ssize_t (*listxattr) (struct dentry *, char *, size_t); | 1155 | ssize_t (*listxattr) (struct dentry *, char *, size_t); |
1142 | int (*removexattr) (struct dentry *, const char *); | 1156 | int (*removexattr) (struct dentry *, const char *); |
1143 | void (*truncate_range)(struct inode *, loff_t, loff_t); | 1157 | void (*truncate_range)(struct inode *, loff_t, loff_t); |
1158 | long (*fallocate)(struct inode *inode, int mode, loff_t offset, | ||
1159 | loff_t len); | ||
1144 | }; | 1160 | }; |
1145 | 1161 | ||
1146 | struct seq_file; | 1162 | struct seq_file; |
@@ -1277,119 +1293,6 @@ static inline void file_accessed(struct file *file) | |||
1277 | 1293 | ||
1278 | int sync_inode(struct inode *inode, struct writeback_control *wbc); | 1294 | int sync_inode(struct inode *inode, struct writeback_control *wbc); |
1279 | 1295 | ||
1280 | /** | ||
1281 | * struct export_operations - for nfsd to communicate with file systems | ||
1282 | * @decode_fh: decode a file handle fragment and return a &struct dentry | ||
1283 | * @encode_fh: encode a file handle fragment from a dentry | ||
1284 | * @get_name: find the name for a given inode in a given directory | ||
1285 | * @get_parent: find the parent of a given directory | ||
1286 | * @get_dentry: find a dentry for the inode given a file handle sub-fragment | ||
1287 | * @find_exported_dentry: | ||
1288 | * set by the exporting module to a standard helper function. | ||
1289 | * | ||
1290 | * Description: | ||
1291 | * The export_operations structure provides a means for nfsd to communicate | ||
1292 | * with a particular exported file system - particularly enabling nfsd and | ||
1293 | * the filesystem to co-operate when dealing with file handles. | ||
1294 | * | ||
1295 | * export_operations contains two basic operation for dealing with file | ||
1296 | * handles, decode_fh() and encode_fh(), and allows for some other | ||
1297 | * operations to be defined which standard helper routines use to get | ||
1298 | * specific information from the filesystem. | ||
1299 | * | ||
1300 | * nfsd encodes information use to determine which filesystem a filehandle | ||
1301 | * applies to in the initial part of the file handle. The remainder, termed | ||
1302 | * a file handle fragment, is controlled completely by the filesystem. The | ||
1303 | * standard helper routines assume that this fragment will contain one or | ||
1304 | * two sub-fragments, one which identifies the file, and one which may be | ||
1305 | * used to identify the (a) directory containing the file. | ||
1306 | * | ||
1307 | * In some situations, nfsd needs to get a dentry which is connected into a | ||
1308 | * specific part of the file tree. To allow for this, it passes the | ||
1309 | * function acceptable() together with a @context which can be used to see | ||
1310 | * if the dentry is acceptable. As there can be multiple dentrys for a | ||
1311 | * given file, the filesystem should check each one for acceptability before | ||
1312 | * looking for the next. As soon as an acceptable one is found, it should | ||
1313 | * be returned. | ||
1314 | * | ||
1315 | * decode_fh: | ||
1316 | * @decode_fh is given a &struct super_block (@sb), a file handle fragment | ||
1317 | * (@fh, @fh_len) and an acceptability testing function (@acceptable, | ||
1318 | * @context). It should return a &struct dentry which refers to the same | ||
1319 | * file that the file handle fragment refers to, and which passes the | ||
1320 | * acceptability test. If it cannot, it should return a %NULL pointer if | ||
1321 | * the file was found but no acceptable &dentries were available, or a | ||
1322 | * %ERR_PTR error code indicating why it couldn't be found (e.g. %ENOENT or | ||
1323 | * %ENOMEM). | ||
1324 | * | ||
1325 | * encode_fh: | ||
1326 | * @encode_fh should store in the file handle fragment @fh (using at most | ||
1327 | * @max_len bytes) information that can be used by @decode_fh to recover the | ||
1328 | * file refered to by the &struct dentry @de. If the @connectable flag is | ||
1329 | * set, the encode_fh() should store sufficient information so that a good | ||
1330 | * attempt can be made to find not only the file but also it's place in the | ||
1331 | * filesystem. This typically means storing a reference to de->d_parent in | ||
1332 | * the filehandle fragment. encode_fh() should return the number of bytes | ||
1333 | * stored or a negative error code such as %-ENOSPC | ||
1334 | * | ||
1335 | * get_name: | ||
1336 | * @get_name should find a name for the given @child in the given @parent | ||
1337 | * directory. The name should be stored in the @name (with the | ||
1338 | * understanding that it is already pointing to a a %NAME_MAX+1 sized | ||
1339 | * buffer. get_name() should return %0 on success, a negative error code | ||
1340 | * or error. @get_name will be called without @parent->i_mutex held. | ||
1341 | * | ||
1342 | * get_parent: | ||
1343 | * @get_parent should find the parent directory for the given @child which | ||
1344 | * is also a directory. In the event that it cannot be found, or storage | ||
1345 | * space cannot be allocated, a %ERR_PTR should be returned. | ||
1346 | * | ||
1347 | * get_dentry: | ||
1348 | * Given a &super_block (@sb) and a pointer to a file-system specific inode | ||
1349 | * identifier, possibly an inode number, (@inump) get_dentry() should find | ||
1350 | * the identified inode and return a dentry for that inode. Any suitable | ||
1351 | * dentry can be returned including, if necessary, a new dentry created with | ||
1352 | * d_alloc_root. The caller can then find any other extant dentrys by | ||
1353 | * following the d_alias links. If a new dentry was created using | ||
1354 | * d_alloc_root, DCACHE_NFSD_DISCONNECTED should be set, and the dentry | ||
1355 | * should be d_rehash()ed. | ||
1356 | * | ||
1357 | * If the inode cannot be found, either a %NULL pointer or an %ERR_PTR code | ||
1358 | * can be returned. The @inump will be whatever was passed to | ||
1359 | * nfsd_find_fh_dentry() in either the @obj or @parent parameters. | ||
1360 | * | ||
1361 | * Locking rules: | ||
1362 | * get_parent is called with child->d_inode->i_mutex down | ||
1363 | * get_name is not (which is possibly inconsistent) | ||
1364 | */ | ||
1365 | |||
1366 | struct export_operations { | ||
1367 | struct dentry *(*decode_fh)(struct super_block *sb, __u32 *fh, int fh_len, int fh_type, | ||
1368 | int (*acceptable)(void *context, struct dentry *de), | ||
1369 | void *context); | ||
1370 | int (*encode_fh)(struct dentry *de, __u32 *fh, int *max_len, | ||
1371 | int connectable); | ||
1372 | |||
1373 | /* the following are only called from the filesystem itself */ | ||
1374 | int (*get_name)(struct dentry *parent, char *name, | ||
1375 | struct dentry *child); | ||
1376 | struct dentry * (*get_parent)(struct dentry *child); | ||
1377 | struct dentry * (*get_dentry)(struct super_block *sb, void *inump); | ||
1378 | |||
1379 | /* This is set by the exporting module to a standard helper */ | ||
1380 | struct dentry * (*find_exported_dentry)( | ||
1381 | struct super_block *sb, void *obj, void *parent, | ||
1382 | int (*acceptable)(void *context, struct dentry *de), | ||
1383 | void *context); | ||
1384 | |||
1385 | |||
1386 | }; | ||
1387 | |||
1388 | extern struct dentry * | ||
1389 | find_exported_dentry(struct super_block *sb, void *obj, void *parent, | ||
1390 | int (*acceptable)(void *context, struct dentry *de), | ||
1391 | void *context); | ||
1392 | |||
1393 | struct file_system_type { | 1296 | struct file_system_type { |
1394 | const char *name; | 1297 | const char *name; |
1395 | int fs_flags; | 1298 | int fs_flags; |
@@ -1526,7 +1429,7 @@ extern void putname(const char *name); | |||
1526 | 1429 | ||
1527 | #ifdef CONFIG_BLOCK | 1430 | #ifdef CONFIG_BLOCK |
1528 | extern int register_blkdev(unsigned int, const char *); | 1431 | extern int register_blkdev(unsigned int, const char *); |
1529 | extern int unregister_blkdev(unsigned int, const char *); | 1432 | extern void unregister_blkdev(unsigned int, const char *); |
1530 | extern struct block_device *bdget(dev_t); | 1433 | extern struct block_device *bdget(dev_t); |
1531 | extern void bd_set_size(struct block_device *, loff_t size); | 1434 | extern void bd_set_size(struct block_device *, loff_t size); |
1532 | extern void bd_forget(struct inode *inode); | 1435 | extern void bd_forget(struct inode *inode); |
@@ -1566,7 +1469,7 @@ extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *); | |||
1566 | extern int register_chrdev_region(dev_t, unsigned, const char *); | 1469 | extern int register_chrdev_region(dev_t, unsigned, const char *); |
1567 | extern int register_chrdev(unsigned int, const char *, | 1470 | extern int register_chrdev(unsigned int, const char *, |
1568 | const struct file_operations *); | 1471 | const struct file_operations *); |
1569 | extern int unregister_chrdev(unsigned int, const char *); | 1472 | extern void unregister_chrdev(unsigned int, const char *); |
1570 | extern void unregister_chrdev_region(dev_t, unsigned); | 1473 | extern void unregister_chrdev_region(dev_t, unsigned); |
1571 | extern int chrdev_open(struct inode *, struct file *); | 1474 | extern int chrdev_open(struct inode *, struct file *); |
1572 | extern void chrdev_show(struct seq_file *,off_t); | 1475 | extern void chrdev_show(struct seq_file *,off_t); |
@@ -2050,5 +1953,9 @@ static inline void free_secdata(void *secdata) | |||
2050 | { } | 1953 | { } |
2051 | #endif /* CONFIG_SECURITY */ | 1954 | #endif /* CONFIG_SECURITY */ |
2052 | 1955 | ||
1956 | int proc_nr_files(ctl_table *table, int write, struct file *filp, | ||
1957 | void __user *buffer, size_t *lenp, loff_t *ppos); | ||
1958 | |||
1959 | |||
2053 | #endif /* __KERNEL__ */ | 1960 | #endif /* __KERNEL__ */ |
2054 | #endif /* _LINUX_FS_H */ | 1961 | #endif /* _LINUX_FS_H */ |
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h index 12e631f0fb77..1831b196c70a 100644 --- a/include/linux/fsl_devices.h +++ b/include/linux/fsl_devices.h | |||
@@ -53,6 +53,7 @@ struct gianfar_platform_data { | |||
53 | u32 bus_id; | 53 | u32 bus_id; |
54 | u32 phy_id; | 54 | u32 phy_id; |
55 | u8 mac_addr[6]; | 55 | u8 mac_addr[6]; |
56 | phy_interface_t interface; | ||
56 | }; | 57 | }; |
57 | 58 | ||
58 | struct gianfar_mdio_data { | 59 | struct gianfar_mdio_data { |
@@ -112,7 +113,7 @@ struct fsl_usb2_platform_data { | |||
112 | struct fsl_spi_platform_data { | 113 | struct fsl_spi_platform_data { |
113 | u32 initial_spmode; /* initial SPMODE value */ | 114 | u32 initial_spmode; /* initial SPMODE value */ |
114 | u16 bus_num; | 115 | u16 bus_num; |
115 | 116 | bool qe_mode; | |
116 | /* board specific information */ | 117 | /* board specific information */ |
117 | u16 max_chipselect; | 118 | u16 max_chipselect; |
118 | void (*activate_cs)(u8 cs, u8 polarity); | 119 | void (*activate_cs)(u8 cs, u8 polarity); |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 0d2ef0b082a6..bc68dd9a6d41 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -30,6 +30,9 @@ struct vm_area_struct; | |||
30 | * cannot handle allocation failures. | 30 | * cannot handle allocation failures. |
31 | * | 31 | * |
32 | * __GFP_NORETRY: The VM implementation must not retry indefinitely. | 32 | * __GFP_NORETRY: The VM implementation must not retry indefinitely. |
33 | * | ||
34 | * __GFP_MOVABLE: Flag that this page will be movable by the page migration | ||
35 | * mechanism or reclaimed | ||
33 | */ | 36 | */ |
34 | #define __GFP_WAIT ((__force gfp_t)0x10u) /* Can wait and reschedule? */ | 37 | #define __GFP_WAIT ((__force gfp_t)0x10u) /* Can wait and reschedule? */ |
35 | #define __GFP_HIGH ((__force gfp_t)0x20u) /* Should access emergency pools? */ | 38 | #define __GFP_HIGH ((__force gfp_t)0x20u) /* Should access emergency pools? */ |
@@ -45,6 +48,7 @@ struct vm_area_struct; | |||
45 | #define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ | 48 | #define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ |
46 | #define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */ | 49 | #define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */ |
47 | #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ | 50 | #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ |
51 | #define __GFP_MOVABLE ((__force gfp_t)0x80000u) /* Page is movable */ | ||
48 | 52 | ||
49 | #define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */ | 53 | #define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */ |
50 | #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) | 54 | #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) |
@@ -53,7 +57,8 @@ struct vm_area_struct; | |||
53 | #define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \ | 57 | #define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \ |
54 | __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \ | 58 | __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \ |
55 | __GFP_NOFAIL|__GFP_NORETRY|__GFP_COMP| \ | 59 | __GFP_NOFAIL|__GFP_NORETRY|__GFP_COMP| \ |
56 | __GFP_NOMEMALLOC|__GFP_HARDWALL|__GFP_THISNODE) | 60 | __GFP_NOMEMALLOC|__GFP_HARDWALL|__GFP_THISNODE| \ |
61 | __GFP_MOVABLE) | ||
57 | 62 | ||
58 | /* This equals 0, but use constants in case they ever change */ | 63 | /* This equals 0, but use constants in case they ever change */ |
59 | #define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH) | 64 | #define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH) |
@@ -65,6 +70,15 @@ struct vm_area_struct; | |||
65 | #define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) | 70 | #define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) |
66 | #define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \ | 71 | #define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \ |
67 | __GFP_HIGHMEM) | 72 | __GFP_HIGHMEM) |
73 | #define GFP_HIGHUSER_MOVABLE (__GFP_WAIT | __GFP_IO | __GFP_FS | \ | ||
74 | __GFP_HARDWALL | __GFP_HIGHMEM | \ | ||
75 | __GFP_MOVABLE) | ||
76 | #define GFP_NOFS_PAGECACHE (__GFP_WAIT | __GFP_IO | __GFP_MOVABLE) | ||
77 | #define GFP_USER_PAGECACHE (__GFP_WAIT | __GFP_IO | __GFP_FS | \ | ||
78 | __GFP_HARDWALL | __GFP_MOVABLE) | ||
79 | #define GFP_HIGHUSER_PAGECACHE (__GFP_WAIT | __GFP_IO | __GFP_FS | \ | ||
80 | __GFP_HARDWALL | __GFP_HIGHMEM | \ | ||
81 | __GFP_MOVABLE) | ||
68 | 82 | ||
69 | #ifdef CONFIG_NUMA | 83 | #ifdef CONFIG_NUMA |
70 | #define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) | 84 | #define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) |
@@ -92,6 +106,9 @@ static inline enum zone_type gfp_zone(gfp_t flags) | |||
92 | if (flags & __GFP_DMA32) | 106 | if (flags & __GFP_DMA32) |
93 | return ZONE_DMA32; | 107 | return ZONE_DMA32; |
94 | #endif | 108 | #endif |
109 | if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) == | ||
110 | (__GFP_HIGHMEM | __GFP_MOVABLE)) | ||
111 | return ZONE_MOVABLE; | ||
95 | #ifdef CONFIG_HIGHMEM | 112 | #ifdef CONFIG_HIGHMEM |
96 | if (flags & __GFP_HIGHMEM) | 113 | if (flags & __GFP_HIGHMEM) |
97 | return ZONE_HIGHMEM; | 114 | return ZONE_HIGHMEM; |
diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 98e2cce996a4..1fcb0033179e 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h | |||
@@ -73,10 +73,27 @@ static inline void clear_user_highpage(struct page *page, unsigned long vaddr) | |||
73 | } | 73 | } |
74 | 74 | ||
75 | #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE | 75 | #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE |
76 | /** | ||
77 | * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags | ||
78 | * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE | ||
79 | * @vma: The VMA the page is to be allocated for | ||
80 | * @vaddr: The virtual address the page will be inserted into | ||
81 | * | ||
82 | * This function will allocate a page for a VMA but the caller is expected | ||
83 | * to specify via movableflags whether the page will be movable in the | ||
84 | * future or not | ||
85 | * | ||
86 | * An architecture may override this function by defining | ||
87 | * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own | ||
88 | * implementation. | ||
89 | */ | ||
76 | static inline struct page * | 90 | static inline struct page * |
77 | alloc_zeroed_user_highpage(struct vm_area_struct *vma, unsigned long vaddr) | 91 | __alloc_zeroed_user_highpage(gfp_t movableflags, |
92 | struct vm_area_struct *vma, | ||
93 | unsigned long vaddr) | ||
78 | { | 94 | { |
79 | struct page *page = alloc_page_vma(GFP_HIGHUSER, vma, vaddr); | 95 | struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags, |
96 | vma, vaddr); | ||
80 | 97 | ||
81 | if (page) | 98 | if (page) |
82 | clear_user_highpage(page, vaddr); | 99 | clear_user_highpage(page, vaddr); |
@@ -85,6 +102,21 @@ alloc_zeroed_user_highpage(struct vm_area_struct *vma, unsigned long vaddr) | |||
85 | } | 102 | } |
86 | #endif | 103 | #endif |
87 | 104 | ||
105 | /** | ||
106 | * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move | ||
107 | * @vma: The VMA the page is to be allocated for | ||
108 | * @vaddr: The virtual address the page will be inserted into | ||
109 | * | ||
110 | * This function will allocate a page for a VMA that the caller knows will | ||
111 | * be able to migrate in the future using move_pages() or reclaimed | ||
112 | */ | ||
113 | static inline struct page * | ||
114 | alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, | ||
115 | unsigned long vaddr) | ||
116 | { | ||
117 | return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); | ||
118 | } | ||
119 | |||
88 | static inline void clear_highpage(struct page *page) | 120 | static inline void clear_highpage(struct page *page) |
89 | { | 121 | { |
90 | void *kaddr = kmap_atomic(page, KM_USER0); | 122 | void *kaddr = kmap_atomic(page, KM_USER0); |
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 2c13715e9dde..49b7053043ad 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
@@ -15,6 +15,7 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) | |||
15 | } | 15 | } |
16 | 16 | ||
17 | int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); | 17 | int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); |
18 | int hugetlb_treat_movable_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); | ||
18 | int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); | 19 | int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); |
19 | int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int); | 20 | int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int); |
20 | void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long); | 21 | void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long); |
@@ -29,6 +30,7 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to); | |||
29 | void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed); | 30 | void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed); |
30 | 31 | ||
31 | extern unsigned long max_huge_pages; | 32 | extern unsigned long max_huge_pages; |
33 | extern unsigned long hugepages_treat_as_movable; | ||
32 | extern const unsigned long hugetlb_zero, hugetlb_infinity; | 34 | extern const unsigned long hugetlb_zero, hugetlb_infinity; |
33 | extern int sysctl_hugetlb_shm_group; | 35 | extern int sysctl_hugetlb_shm_group; |
34 | 36 | ||
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 0e0fedd2039a..260d6d76c5f3 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h | |||
@@ -50,14 +50,14 @@ | |||
50 | */ | 50 | */ |
51 | #define JBD_DEFAULT_MAX_COMMIT_AGE 5 | 51 | #define JBD_DEFAULT_MAX_COMMIT_AGE 5 |
52 | 52 | ||
53 | #ifdef CONFIG_JBD_DEBUG | 53 | #ifdef CONFIG_JBD2_DEBUG |
54 | /* | 54 | /* |
55 | * Define JBD_EXPENSIVE_CHECKING to enable more expensive internal | 55 | * Define JBD_EXPENSIVE_CHECKING to enable more expensive internal |
56 | * consistency checks. By default we don't do this unless | 56 | * consistency checks. By default we don't do this unless |
57 | * CONFIG_JBD_DEBUG is on. | 57 | * CONFIG_JBD2_DEBUG is on. |
58 | */ | 58 | */ |
59 | #define JBD_EXPENSIVE_CHECKING | 59 | #define JBD_EXPENSIVE_CHECKING |
60 | extern int jbd2_journal_enable_debug; | 60 | extern u8 jbd2_journal_enable_debug; |
61 | 61 | ||
62 | #define jbd_debug(n, f, a...) \ | 62 | #define jbd_debug(n, f, a...) \ |
63 | do { \ | 63 | do { \ |
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 5f06527dca21..f73de6fb5c68 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h | |||
@@ -7,9 +7,9 @@ | |||
7 | 7 | ||
8 | #include <linux/errno.h> | 8 | #include <linux/errno.h> |
9 | 9 | ||
10 | #define KSYM_NAME_LEN 127 | 10 | #define KSYM_NAME_LEN 128 |
11 | #define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + KSYM_NAME_LEN + \ | 11 | #define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \ |
12 | 2*(BITS_PER_LONG*3/10) + MODULE_NAME_LEN + 1) | 12 | 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1) |
13 | 13 | ||
14 | #ifdef CONFIG_KALLSYMS | 14 | #ifdef CONFIG_KALLSYMS |
15 | /* Lookup the address for a symbol. Returns 0 if not found. */ | 15 | /* Lookup the address for a symbol. Returns 0 if not found. */ |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 7a4852505914..1eb9cde550c4 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -210,6 +210,7 @@ extern enum system_states { | |||
210 | #define TAINT_MACHINE_CHECK (1<<4) | 210 | #define TAINT_MACHINE_CHECK (1<<4) |
211 | #define TAINT_BAD_PAGE (1<<5) | 211 | #define TAINT_BAD_PAGE (1<<5) |
212 | #define TAINT_USER (1<<6) | 212 | #define TAINT_USER (1<<6) |
213 | #define TAINT_DIE (1<<7) | ||
213 | 214 | ||
214 | extern void dump_stack(void); | 215 | extern void dump_stack(void); |
215 | 216 | ||
diff --git a/include/linux/kernelcapi.h b/include/linux/kernelcapi.h index aea34e74c496..8c4350a9ed87 100644 --- a/include/linux/kernelcapi.h +++ b/include/linux/kernelcapi.h | |||
@@ -64,7 +64,7 @@ struct capi20_appl { | |||
64 | unsigned long nrecvdatapkt; | 64 | unsigned long nrecvdatapkt; |
65 | unsigned long nsentctlpkt; | 65 | unsigned long nsentctlpkt; |
66 | unsigned long nsentdatapkt; | 66 | unsigned long nsentdatapkt; |
67 | struct semaphore recv_sem; | 67 | struct mutex recv_mtx; |
68 | struct sk_buff_head recv_queue; | 68 | struct sk_buff_head recv_queue; |
69 | struct work_struct recv_work; | 69 | struct work_struct recv_work; |
70 | int release_in_progress; | 70 | int release_in_progress; |
diff --git a/include/linux/kmod.h b/include/linux/kmod.h index 10f505c8431d..5dc13848891b 100644 --- a/include/linux/kmod.h +++ b/include/linux/kmod.h | |||
@@ -36,13 +36,57 @@ static inline int request_module(const char * name, ...) { return -ENOSYS; } | |||
36 | #define try_then_request_module(x, mod...) ((x) ?: (request_module(mod), (x))) | 36 | #define try_then_request_module(x, mod...) ((x) ?: (request_module(mod), (x))) |
37 | 37 | ||
38 | struct key; | 38 | struct key; |
39 | extern int call_usermodehelper_keys(char *path, char *argv[], char *envp[], | 39 | struct file; |
40 | struct key *session_keyring, int wait); | 40 | struct subprocess_info; |
41 | |||
42 | /* Allocate a subprocess_info structure */ | ||
43 | struct subprocess_info *call_usermodehelper_setup(char *path, | ||
44 | char **argv, char **envp); | ||
45 | |||
46 | /* Set various pieces of state into the subprocess_info structure */ | ||
47 | void call_usermodehelper_setkeys(struct subprocess_info *info, | ||
48 | struct key *session_keyring); | ||
49 | int call_usermodehelper_stdinpipe(struct subprocess_info *sub_info, | ||
50 | struct file **filp); | ||
51 | void call_usermodehelper_setcleanup(struct subprocess_info *info, | ||
52 | void (*cleanup)(char **argv, char **envp)); | ||
53 | |||
54 | enum umh_wait { | ||
55 | UMH_NO_WAIT = -1, /* don't wait at all */ | ||
56 | UMH_WAIT_EXEC = 0, /* wait for the exec, but not the process */ | ||
57 | UMH_WAIT_PROC = 1, /* wait for the process to complete */ | ||
58 | }; | ||
59 | |||
60 | /* Actually execute the sub-process */ | ||
61 | int call_usermodehelper_exec(struct subprocess_info *info, enum umh_wait wait); | ||
62 | |||
63 | /* Free the subprocess_info. This is only needed if you're not going | ||
64 | to call call_usermodehelper_exec */ | ||
65 | void call_usermodehelper_freeinfo(struct subprocess_info *info); | ||
41 | 66 | ||
42 | static inline int | 67 | static inline int |
43 | call_usermodehelper(char *path, char **argv, char **envp, int wait) | 68 | call_usermodehelper(char *path, char **argv, char **envp, enum umh_wait wait) |
44 | { | 69 | { |
45 | return call_usermodehelper_keys(path, argv, envp, NULL, wait); | 70 | struct subprocess_info *info; |
71 | |||
72 | info = call_usermodehelper_setup(path, argv, envp); | ||
73 | if (info == NULL) | ||
74 | return -ENOMEM; | ||
75 | return call_usermodehelper_exec(info, wait); | ||
76 | } | ||
77 | |||
78 | static inline int | ||
79 | call_usermodehelper_keys(char *path, char **argv, char **envp, | ||
80 | struct key *session_keyring, enum umh_wait wait) | ||
81 | { | ||
82 | struct subprocess_info *info; | ||
83 | |||
84 | info = call_usermodehelper_setup(path, argv, envp); | ||
85 | if (info == NULL) | ||
86 | return -ENOMEM; | ||
87 | |||
88 | call_usermodehelper_setkeys(info, session_keyring); | ||
89 | return call_usermodehelper_exec(info, wait); | ||
46 | } | 90 | } |
47 | 91 | ||
48 | extern void usermodehelper_init(void); | 92 | extern void usermodehelper_init(void); |
diff --git a/include/linux/kobject.h b/include/linux/kobject.h index 06cbf41d32d2..aa2fe22b1baa 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h | |||
@@ -36,15 +36,24 @@ extern char uevent_helper[]; | |||
36 | /* counter to tag the uevent, read only except for the kobject core */ | 36 | /* counter to tag the uevent, read only except for the kobject core */ |
37 | extern u64 uevent_seqnum; | 37 | extern u64 uevent_seqnum; |
38 | 38 | ||
39 | /* the actions here must match the proper string in lib/kobject_uevent.c */ | 39 | /* |
40 | typedef int __bitwise kobject_action_t; | 40 | * The actions here must match the index to the string array |
41 | * in lib/kobject_uevent.c | ||
42 | * | ||
43 | * Do not add new actions here without checking with the driver-core | ||
44 | * maintainers. Action strings are not meant to express subsystem | ||
45 | * or device specific properties. In most cases you want to send a | ||
46 | * kobject_uevent_env(kobj, KOBJ_CHANGE, env) with additional event | ||
47 | * specific variables added to the event environment. | ||
48 | */ | ||
41 | enum kobject_action { | 49 | enum kobject_action { |
42 | KOBJ_ADD = (__force kobject_action_t) 0x01, /* exclusive to core */ | 50 | KOBJ_ADD, |
43 | KOBJ_REMOVE = (__force kobject_action_t) 0x02, /* exclusive to core */ | 51 | KOBJ_REMOVE, |
44 | KOBJ_CHANGE = (__force kobject_action_t) 0x03, /* device state change */ | 52 | KOBJ_CHANGE, |
45 | KOBJ_OFFLINE = (__force kobject_action_t) 0x04, /* device offline */ | 53 | KOBJ_MOVE, |
46 | KOBJ_ONLINE = (__force kobject_action_t) 0x05, /* device online */ | 54 | KOBJ_ONLINE, |
47 | KOBJ_MOVE = (__force kobject_action_t) 0x06, /* device move */ | 55 | KOBJ_OFFLINE, |
56 | KOBJ_MAX | ||
48 | }; | 57 | }; |
49 | 58 | ||
50 | struct kobject { | 59 | struct kobject { |
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 23adf6075ae4..51464d12a4e5 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h | |||
@@ -116,9 +116,12 @@ struct kprobe { | |||
116 | */ | 116 | */ |
117 | struct jprobe { | 117 | struct jprobe { |
118 | struct kprobe kp; | 118 | struct kprobe kp; |
119 | kprobe_opcode_t *entry; /* probe handling code to jump to */ | 119 | void *entry; /* probe handling code to jump to */ |
120 | }; | 120 | }; |
121 | 121 | ||
122 | /* For backward compatibility with old code using JPROBE_ENTRY() */ | ||
123 | #define JPROBE_ENTRY(handler) (handler) | ||
124 | |||
122 | DECLARE_PER_CPU(struct kprobe *, current_kprobe); | 125 | DECLARE_PER_CPU(struct kprobe *, current_kprobe); |
123 | DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); | 126 | DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); |
124 | 127 | ||
@@ -211,6 +214,7 @@ int longjmp_break_handler(struct kprobe *, struct pt_regs *); | |||
211 | int register_jprobe(struct jprobe *p); | 214 | int register_jprobe(struct jprobe *p); |
212 | void unregister_jprobe(struct jprobe *p); | 215 | void unregister_jprobe(struct jprobe *p); |
213 | void jprobe_return(void); | 216 | void jprobe_return(void); |
217 | unsigned long arch_deref_entry_point(void *); | ||
214 | 218 | ||
215 | int register_kretprobe(struct kretprobe *rp); | 219 | int register_kretprobe(struct kretprobe *rp); |
216 | void unregister_kretprobe(struct kretprobe *rp); | 220 | void unregister_kretprobe(struct kretprobe *rp); |
diff --git a/include/linux/lguest.h b/include/linux/lguest.h new file mode 100644 index 000000000000..500aace21ca7 --- /dev/null +++ b/include/linux/lguest.h | |||
@@ -0,0 +1,85 @@ | |||
1 | /* Things the lguest guest needs to know. Note: like all lguest interfaces, | ||
2 | * this is subject to wild and random change between versions. */ | ||
3 | #ifndef _ASM_LGUEST_H | ||
4 | #define _ASM_LGUEST_H | ||
5 | |||
6 | #ifndef __ASSEMBLY__ | ||
7 | #include <asm/irq.h> | ||
8 | |||
9 | #define LHCALL_FLUSH_ASYNC 0 | ||
10 | #define LHCALL_LGUEST_INIT 1 | ||
11 | #define LHCALL_CRASH 2 | ||
12 | #define LHCALL_LOAD_GDT 3 | ||
13 | #define LHCALL_NEW_PGTABLE 4 | ||
14 | #define LHCALL_FLUSH_TLB 5 | ||
15 | #define LHCALL_LOAD_IDT_ENTRY 6 | ||
16 | #define LHCALL_SET_STACK 7 | ||
17 | #define LHCALL_TS 8 | ||
18 | #define LHCALL_SET_CLOCKEVENT 9 | ||
19 | #define LHCALL_HALT 10 | ||
20 | #define LHCALL_GET_WALLCLOCK 11 | ||
21 | #define LHCALL_BIND_DMA 12 | ||
22 | #define LHCALL_SEND_DMA 13 | ||
23 | #define LHCALL_SET_PTE 14 | ||
24 | #define LHCALL_SET_PMD 15 | ||
25 | #define LHCALL_LOAD_TLS 16 | ||
26 | |||
27 | #define LG_CLOCK_MIN_DELTA 100UL | ||
28 | #define LG_CLOCK_MAX_DELTA ULONG_MAX | ||
29 | |||
30 | #define LGUEST_TRAP_ENTRY 0x1F | ||
31 | |||
32 | static inline unsigned long | ||
33 | hcall(unsigned long call, | ||
34 | unsigned long arg1, unsigned long arg2, unsigned long arg3) | ||
35 | { | ||
36 | asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY) | ||
37 | : "=a"(call) | ||
38 | : "a"(call), "d"(arg1), "b"(arg2), "c"(arg3) | ||
39 | : "memory"); | ||
40 | return call; | ||
41 | } | ||
42 | |||
43 | void async_hcall(unsigned long call, | ||
44 | unsigned long arg1, unsigned long arg2, unsigned long arg3); | ||
45 | |||
46 | /* Can't use our min() macro here: needs to be a constant */ | ||
47 | #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) | ||
48 | |||
49 | #define LHCALL_RING_SIZE 64 | ||
50 | struct hcall_ring | ||
51 | { | ||
52 | u32 eax, edx, ebx, ecx; | ||
53 | }; | ||
54 | |||
55 | /* All the good stuff happens here: guest registers it with LGUEST_INIT */ | ||
56 | struct lguest_data | ||
57 | { | ||
58 | /* Fields which change during running: */ | ||
59 | /* 512 == enabled (same as eflags) */ | ||
60 | unsigned int irq_enabled; | ||
61 | /* Interrupts blocked by guest. */ | ||
62 | DECLARE_BITMAP(blocked_interrupts, LGUEST_IRQS); | ||
63 | |||
64 | /* Virtual address of page fault. */ | ||
65 | unsigned long cr2; | ||
66 | |||
67 | /* Async hypercall ring. 0xFF == done, 0 == pending. */ | ||
68 | u8 hcall_status[LHCALL_RING_SIZE]; | ||
69 | struct hcall_ring hcalls[LHCALL_RING_SIZE]; | ||
70 | |||
71 | /* Fields initialized by the hypervisor at boot: */ | ||
72 | /* Memory not to try to access */ | ||
73 | unsigned long reserve_mem; | ||
74 | /* ID of this guest (used by network driver to set ethernet address) */ | ||
75 | u16 guestid; | ||
76 | /* KHz for the TSC clock. */ | ||
77 | u32 tsc_khz; | ||
78 | |||
79 | /* Fields initialized by the guest at boot: */ | ||
80 | /* Instruction range to suppress interrupts even if enabled */ | ||
81 | unsigned long noirq_start, noirq_end; | ||
82 | }; | ||
83 | extern struct lguest_data lguest_data; | ||
84 | #endif /* __ASSEMBLY__ */ | ||
85 | #endif /* _ASM_LGUEST_H */ | ||
diff --git a/include/linux/lguest_bus.h b/include/linux/lguest_bus.h new file mode 100644 index 000000000000..c9b4e05fee49 --- /dev/null +++ b/include/linux/lguest_bus.h | |||
@@ -0,0 +1,48 @@ | |||
1 | #ifndef _ASM_LGUEST_DEVICE_H | ||
2 | #define _ASM_LGUEST_DEVICE_H | ||
3 | /* Everything you need to know about lguest devices. */ | ||
4 | #include <linux/device.h> | ||
5 | #include <linux/lguest.h> | ||
6 | #include <linux/lguest_launcher.h> | ||
7 | |||
8 | struct lguest_device { | ||
9 | /* Unique busid, and index into lguest_page->devices[] */ | ||
10 | unsigned int index; | ||
11 | |||
12 | struct device dev; | ||
13 | |||
14 | /* Driver can hang data off here. */ | ||
15 | void *private; | ||
16 | }; | ||
17 | |||
18 | /* By convention, each device can use irq index+1 if it wants to. */ | ||
19 | static inline int lgdev_irq(const struct lguest_device *dev) | ||
20 | { | ||
21 | return dev->index + 1; | ||
22 | } | ||
23 | |||
24 | /* dma args must not be vmalloced! */ | ||
25 | void lguest_send_dma(unsigned long key, struct lguest_dma *dma); | ||
26 | int lguest_bind_dma(unsigned long key, struct lguest_dma *dmas, | ||
27 | unsigned int num, u8 irq); | ||
28 | void lguest_unbind_dma(unsigned long key, struct lguest_dma *dmas); | ||
29 | |||
30 | /* Map the virtual device space */ | ||
31 | void *lguest_map(unsigned long phys_addr, unsigned long pages); | ||
32 | void lguest_unmap(void *); | ||
33 | |||
34 | struct lguest_driver { | ||
35 | const char *name; | ||
36 | struct module *owner; | ||
37 | u16 device_type; | ||
38 | int (*probe)(struct lguest_device *dev); | ||
39 | void (*remove)(struct lguest_device *dev); | ||
40 | |||
41 | struct device_driver drv; | ||
42 | }; | ||
43 | |||
44 | extern int register_lguest_driver(struct lguest_driver *drv); | ||
45 | extern void unregister_lguest_driver(struct lguest_driver *drv); | ||
46 | |||
47 | extern struct lguest_device_desc *lguest_devices; /* Just past max_pfn */ | ||
48 | #endif /* _ASM_LGUEST_DEVICE_H */ | ||
diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h new file mode 100644 index 000000000000..0ba414a40c80 --- /dev/null +++ b/include/linux/lguest_launcher.h | |||
@@ -0,0 +1,73 @@ | |||
1 | #ifndef _ASM_LGUEST_USER | ||
2 | #define _ASM_LGUEST_USER | ||
3 | /* Everything the "lguest" userspace program needs to know. */ | ||
4 | /* They can register up to 32 arrays of lguest_dma. */ | ||
5 | #define LGUEST_MAX_DMA 32 | ||
6 | /* At most we can dma 16 lguest_dma in one op. */ | ||
7 | #define LGUEST_MAX_DMA_SECTIONS 16 | ||
8 | |||
9 | /* How many devices? Assume each one wants up to two dma arrays per device. */ | ||
10 | #define LGUEST_MAX_DEVICES (LGUEST_MAX_DMA/2) | ||
11 | |||
12 | struct lguest_dma | ||
13 | { | ||
14 | /* 0 if free to be used, filled by hypervisor. */ | ||
15 | u32 used_len; | ||
16 | unsigned long addr[LGUEST_MAX_DMA_SECTIONS]; | ||
17 | u16 len[LGUEST_MAX_DMA_SECTIONS]; | ||
18 | }; | ||
19 | |||
20 | struct lguest_block_page | ||
21 | { | ||
22 | /* 0 is a read, 1 is a write. */ | ||
23 | int type; | ||
24 | u32 sector; /* Offset in device = sector * 512. */ | ||
25 | u32 bytes; /* Length expected to be read/written in bytes */ | ||
26 | /* 0 = pending, 1 = done, 2 = done, error */ | ||
27 | int result; | ||
28 | u32 num_sectors; /* Disk length = num_sectors * 512 */ | ||
29 | }; | ||
30 | |||
31 | /* There is a shared page of these. */ | ||
32 | struct lguest_net | ||
33 | { | ||
34 | /* Simply the mac address (with multicast bit meaning promisc). */ | ||
35 | unsigned char mac[6]; | ||
36 | }; | ||
37 | |||
38 | /* Where the Host expects the Guest to SEND_DMA console output to. */ | ||
39 | #define LGUEST_CONSOLE_DMA_KEY 0 | ||
40 | |||
41 | /* We have a page of these descriptors in the lguest_device page. */ | ||
42 | struct lguest_device_desc { | ||
43 | u16 type; | ||
44 | #define LGUEST_DEVICE_T_CONSOLE 1 | ||
45 | #define LGUEST_DEVICE_T_NET 2 | ||
46 | #define LGUEST_DEVICE_T_BLOCK 3 | ||
47 | |||
48 | u16 features; | ||
49 | #define LGUEST_NET_F_NOCSUM 0x4000 /* Don't bother checksumming */ | ||
50 | #define LGUEST_DEVICE_F_RANDOMNESS 0x8000 /* IRQ is fairly random */ | ||
51 | |||
52 | u16 status; | ||
53 | /* 256 and above are device specific. */ | ||
54 | #define LGUEST_DEVICE_S_ACKNOWLEDGE 1 /* We have seen device. */ | ||
55 | #define LGUEST_DEVICE_S_DRIVER 2 /* We have found a driver */ | ||
56 | #define LGUEST_DEVICE_S_DRIVER_OK 4 /* Driver says OK! */ | ||
57 | #define LGUEST_DEVICE_S_REMOVED 8 /* Device has gone away. */ | ||
58 | #define LGUEST_DEVICE_S_REMOVED_ACK 16 /* Driver has been told. */ | ||
59 | #define LGUEST_DEVICE_S_FAILED 128 /* Something actually failed */ | ||
60 | |||
61 | u16 num_pages; | ||
62 | u32 pfn; | ||
63 | }; | ||
64 | |||
65 | /* Write command first word is a request. */ | ||
66 | enum lguest_req | ||
67 | { | ||
68 | LHREQ_INITIALIZE, /* + pfnlimit, pgdir, start, pageoffset */ | ||
69 | LHREQ_GETDMA, /* + addr (returns &lguest_dma, irq in ->used_len) */ | ||
70 | LHREQ_IRQ, /* + irq */ | ||
71 | LHREQ_BREAK, /* + on/off flag (on blocks until someone does off) */ | ||
72 | }; | ||
73 | #endif /* _ASM_LGUEST_USER */ | ||
diff --git a/include/linux/limits.h b/include/linux/limits.h index eaf2e099f125..2d0f94162fb3 100644 --- a/include/linux/limits.h +++ b/include/linux/limits.h | |||
@@ -5,8 +5,6 @@ | |||
5 | 5 | ||
6 | #define NGROUPS_MAX 65536 /* supplemental group IDs are available */ | 6 | #define NGROUPS_MAX 65536 /* supplemental group IDs are available */ |
7 | #define ARG_MAX 131072 /* # bytes of args + environ for exec() */ | 7 | #define ARG_MAX 131072 /* # bytes of args + environ for exec() */ |
8 | #define CHILD_MAX 999 /* no limit :-) */ | ||
9 | #define OPEN_MAX 256 /* # open files a process may have */ | ||
10 | #define LINK_MAX 127 /* # links a file may have */ | 8 | #define LINK_MAX 127 /* # links a file may have */ |
11 | #define MAX_CANON 255 /* size of the canonical input queue */ | 9 | #define MAX_CANON 255 /* size of the canonical input queue */ |
12 | #define MAX_INPUT 255 /* size of the type-ahead buffer */ | 10 | #define MAX_INPUT 255 /* size of the type-ahead buffer */ |
diff --git a/include/linux/linux_logo.h b/include/linux/linux_logo.h index 9c01bde5bf1b..08a92969c76e 100644 --- a/include/linux/linux_logo.h +++ b/include/linux/linux_logo.h | |||
@@ -33,5 +33,13 @@ struct linux_logo { | |||
33 | }; | 33 | }; |
34 | 34 | ||
35 | extern const struct linux_logo *fb_find_logo(int depth); | 35 | extern const struct linux_logo *fb_find_logo(int depth); |
36 | #ifdef CONFIG_FB_LOGO_EXTRA | ||
37 | extern void fb_append_extra_logo(const struct linux_logo *logo, | ||
38 | unsigned int n); | ||
39 | #else | ||
40 | static inline void fb_append_extra_logo(const struct linux_logo *logo, | ||
41 | unsigned int n) | ||
42 | {} | ||
43 | #endif | ||
36 | 44 | ||
37 | #endif /* _LINUX_LINUX_LOGO_H */ | 45 | #endif /* _LINUX_LINUX_LOGO_H */ |
diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h index 246de1d84a26..6f1637c61e10 100644 --- a/include/linux/lockd/bind.h +++ b/include/linux/lockd/bind.h | |||
@@ -27,6 +27,7 @@ struct nlmsvc_binding { | |||
27 | struct nfs_fh *, | 27 | struct nfs_fh *, |
28 | struct file **); | 28 | struct file **); |
29 | void (*fclose)(struct file *); | 29 | void (*fclose)(struct file *); |
30 | unsigned long (*get_grace_period)(void); | ||
30 | }; | 31 | }; |
31 | 32 | ||
32 | extern struct nlmsvc_binding * nlmsvc_ops; | 33 | extern struct nlmsvc_binding * nlmsvc_ops; |
@@ -38,4 +39,12 @@ extern int nlmclnt_proc(struct inode *, int, struct file_lock *); | |||
38 | extern int lockd_up(int proto); | 39 | extern int lockd_up(int proto); |
39 | extern void lockd_down(void); | 40 | extern void lockd_down(void); |
40 | 41 | ||
42 | unsigned long get_nfs_grace_period(void); | ||
43 | |||
44 | #ifdef CONFIG_NFSD_V4 | ||
45 | unsigned long get_nfs4_grace_period(void); | ||
46 | #else | ||
47 | static inline unsigned long get_nfs4_grace_period(void) {return 0;} | ||
48 | #endif | ||
49 | |||
41 | #endif /* LINUX_LOCKD_BIND_H */ | 50 | #endif /* LINUX_LOCKD_BIND_H */ |
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 14c937d345cb..0e843bf65877 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -1,7 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * Runtime locking correctness validator | 2 | * Runtime locking correctness validator |
3 | * | 3 | * |
4 | * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | 4 | * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
5 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | ||
5 | * | 6 | * |
6 | * see Documentation/lockdep-design.txt for more details. | 7 | * see Documentation/lockdep-design.txt for more details. |
7 | */ | 8 | */ |
@@ -9,6 +10,7 @@ | |||
9 | #define __LINUX_LOCKDEP_H | 10 | #define __LINUX_LOCKDEP_H |
10 | 11 | ||
11 | struct task_struct; | 12 | struct task_struct; |
13 | struct lockdep_map; | ||
12 | 14 | ||
13 | #ifdef CONFIG_LOCKDEP | 15 | #ifdef CONFIG_LOCKDEP |
14 | 16 | ||
@@ -114,8 +116,44 @@ struct lock_class { | |||
114 | 116 | ||
115 | const char *name; | 117 | const char *name; |
116 | int name_version; | 118 | int name_version; |
119 | |||
120 | #ifdef CONFIG_LOCK_STAT | ||
121 | unsigned long contention_point[4]; | ||
122 | #endif | ||
123 | }; | ||
124 | |||
125 | #ifdef CONFIG_LOCK_STAT | ||
126 | struct lock_time { | ||
127 | s64 min; | ||
128 | s64 max; | ||
129 | s64 total; | ||
130 | unsigned long nr; | ||
131 | }; | ||
132 | |||
133 | enum bounce_type { | ||
134 | bounce_acquired_write, | ||
135 | bounce_acquired_read, | ||
136 | bounce_contended_write, | ||
137 | bounce_contended_read, | ||
138 | nr_bounce_types, | ||
139 | |||
140 | bounce_acquired = bounce_acquired_write, | ||
141 | bounce_contended = bounce_contended_write, | ||
117 | }; | 142 | }; |
118 | 143 | ||
144 | struct lock_class_stats { | ||
145 | unsigned long contention_point[4]; | ||
146 | struct lock_time read_waittime; | ||
147 | struct lock_time write_waittime; | ||
148 | struct lock_time read_holdtime; | ||
149 | struct lock_time write_holdtime; | ||
150 | unsigned long bounces[nr_bounce_types]; | ||
151 | }; | ||
152 | |||
153 | struct lock_class_stats lock_stats(struct lock_class *class); | ||
154 | void clear_lock_stats(struct lock_class *class); | ||
155 | #endif | ||
156 | |||
119 | /* | 157 | /* |
120 | * Map the lock object (the lock instance) to the lock-class object. | 158 | * Map the lock object (the lock instance) to the lock-class object. |
121 | * This is embedded into specific lock instances: | 159 | * This is embedded into specific lock instances: |
@@ -124,6 +162,9 @@ struct lockdep_map { | |||
124 | struct lock_class_key *key; | 162 | struct lock_class_key *key; |
125 | struct lock_class *class_cache; | 163 | struct lock_class *class_cache; |
126 | const char *name; | 164 | const char *name; |
165 | #ifdef CONFIG_LOCK_STAT | ||
166 | int cpu; | ||
167 | #endif | ||
127 | }; | 168 | }; |
128 | 169 | ||
129 | /* | 170 | /* |
@@ -165,6 +206,10 @@ struct held_lock { | |||
165 | unsigned long acquire_ip; | 206 | unsigned long acquire_ip; |
166 | struct lockdep_map *instance; | 207 | struct lockdep_map *instance; |
167 | 208 | ||
209 | #ifdef CONFIG_LOCK_STAT | ||
210 | u64 waittime_stamp; | ||
211 | u64 holdtime_stamp; | ||
212 | #endif | ||
168 | /* | 213 | /* |
169 | * The lock-stack is unified in that the lock chains of interrupt | 214 | * The lock-stack is unified in that the lock chains of interrupt |
170 | * contexts nest ontop of process context chains, but we 'separate' | 215 | * contexts nest ontop of process context chains, but we 'separate' |
@@ -281,6 +326,30 @@ struct lock_class_key { }; | |||
281 | 326 | ||
282 | #endif /* !LOCKDEP */ | 327 | #endif /* !LOCKDEP */ |
283 | 328 | ||
329 | #ifdef CONFIG_LOCK_STAT | ||
330 | |||
331 | extern void lock_contended(struct lockdep_map *lock, unsigned long ip); | ||
332 | extern void lock_acquired(struct lockdep_map *lock); | ||
333 | |||
334 | #define LOCK_CONTENDED(_lock, try, lock) \ | ||
335 | do { \ | ||
336 | if (!try(_lock)) { \ | ||
337 | lock_contended(&(_lock)->dep_map, _RET_IP_); \ | ||
338 | lock(_lock); \ | ||
339 | } \ | ||
340 | lock_acquired(&(_lock)->dep_map); \ | ||
341 | } while (0) | ||
342 | |||
343 | #else /* CONFIG_LOCK_STAT */ | ||
344 | |||
345 | #define lock_contended(lockdep_map, ip) do {} while (0) | ||
346 | #define lock_acquired(lockdep_map) do {} while (0) | ||
347 | |||
348 | #define LOCK_CONTENDED(_lock, try, lock) \ | ||
349 | lock(_lock) | ||
350 | |||
351 | #endif /* CONFIG_LOCK_STAT */ | ||
352 | |||
284 | #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS) | 353 | #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS) |
285 | extern void early_init_irq_lock_class(void); | 354 | extern void early_init_irq_lock_class(void); |
286 | #else | 355 | #else |
diff --git a/include/linux/magic.h b/include/linux/magic.h index 9d713c03e3da..36cc20dfd142 100644 --- a/include/linux/magic.h +++ b/include/linux/magic.h | |||
@@ -13,7 +13,6 @@ | |||
13 | #define HPFS_SUPER_MAGIC 0xf995e849 | 13 | #define HPFS_SUPER_MAGIC 0xf995e849 |
14 | #define ISOFS_SUPER_MAGIC 0x9660 | 14 | #define ISOFS_SUPER_MAGIC 0x9660 |
15 | #define JFFS2_SUPER_MAGIC 0x72b6 | 15 | #define JFFS2_SUPER_MAGIC 0x72b6 |
16 | #define KVMFS_SUPER_MAGIC 0x19700426 | ||
17 | #define ANON_INODE_FS_MAGIC 0x09041934 | 16 | #define ANON_INODE_FS_MAGIC 0x09041934 |
18 | 17 | ||
19 | #define MINIX_SUPER_MAGIC 0x137F /* original minix fs */ | 18 | #define MINIX_SUPER_MAGIC 0x137F /* original minix fs */ |
diff --git a/include/linux/major.h b/include/linux/major.h index 7e7c9093919a..0cb98053537a 100644 --- a/include/linux/major.h +++ b/include/linux/major.h | |||
@@ -158,6 +158,8 @@ | |||
158 | #define VXSPEC_MAJOR 200 /* VERITAS volume config driver */ | 158 | #define VXSPEC_MAJOR 200 /* VERITAS volume config driver */ |
159 | #define VXDMP_MAJOR 201 /* VERITAS volume multipath driver */ | 159 | #define VXDMP_MAJOR 201 /* VERITAS volume multipath driver */ |
160 | 160 | ||
161 | #define XENVBD_MAJOR 202 /* Xen virtual block device */ | ||
162 | |||
161 | #define MSR_MAJOR 202 | 163 | #define MSR_MAJOR 202 |
162 | #define CPUID_MAJOR 203 | 164 | #define CPUID_MAJOR 203 |
163 | 165 | ||
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index daabb3aa1ec6..e147cf50529f 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h | |||
@@ -159,7 +159,7 @@ extern void mpol_fix_fork_child_flag(struct task_struct *p); | |||
159 | 159 | ||
160 | extern struct mempolicy default_policy; | 160 | extern struct mempolicy default_policy; |
161 | extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, | 161 | extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, |
162 | unsigned long addr); | 162 | unsigned long addr, gfp_t gfp_flags); |
163 | extern unsigned slab_node(struct mempolicy *policy); | 163 | extern unsigned slab_node(struct mempolicy *policy); |
164 | 164 | ||
165 | extern enum zone_type policy_zone; | 165 | extern enum zone_type policy_zone; |
@@ -256,9 +256,9 @@ static inline void mpol_fix_fork_child_flag(struct task_struct *p) | |||
256 | #define set_cpuset_being_rebound(x) do {} while (0) | 256 | #define set_cpuset_being_rebound(x) do {} while (0) |
257 | 257 | ||
258 | static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, | 258 | static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, |
259 | unsigned long addr) | 259 | unsigned long addr, gfp_t gfp_flags) |
260 | { | 260 | { |
261 | return NODE_DATA(0)->node_zonelists + gfp_zone(GFP_HIGHUSER); | 261 | return NODE_DATA(0)->node_zonelists + gfp_zone(gfp_flags); |
262 | } | 262 | } |
263 | 263 | ||
264 | static inline int do_migrate_pages(struct mm_struct *mm, | 264 | static inline int do_migrate_pages(struct mm_struct *mm, |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 97d0cddfd223..c456c3a1c28e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -168,6 +168,8 @@ extern unsigned int kobjsize(const void *objp); | |||
168 | #define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */ | 168 | #define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */ |
169 | #define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */ | 169 | #define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */ |
170 | 170 | ||
171 | #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ | ||
172 | |||
171 | #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ | 173 | #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ |
172 | #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS | 174 | #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS |
173 | #endif | 175 | #endif |
@@ -190,6 +192,30 @@ extern unsigned int kobjsize(const void *objp); | |||
190 | */ | 192 | */ |
191 | extern pgprot_t protection_map[16]; | 193 | extern pgprot_t protection_map[16]; |
192 | 194 | ||
195 | #define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ | ||
196 | #define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ | ||
197 | |||
198 | |||
199 | /* | ||
200 | * vm_fault is filled by the the pagefault handler and passed to the vma's | ||
201 | * ->fault function. The vma's ->fault is responsible for returning a bitmask | ||
202 | * of VM_FAULT_xxx flags that give details about how the fault was handled. | ||
203 | * | ||
204 | * pgoff should be used in favour of virtual_address, if possible. If pgoff | ||
205 | * is used, one may set VM_CAN_NONLINEAR in the vma->vm_flags to get nonlinear | ||
206 | * mapping support. | ||
207 | */ | ||
208 | struct vm_fault { | ||
209 | unsigned int flags; /* FAULT_FLAG_xxx flags */ | ||
210 | pgoff_t pgoff; /* Logical page offset based on vma */ | ||
211 | void __user *virtual_address; /* Faulting virtual address */ | ||
212 | |||
213 | struct page *page; /* ->fault handlers should return a | ||
214 | * page here, unless VM_FAULT_NOPAGE | ||
215 | * is set (which is also implied by | ||
216 | * VM_FAULT_ERROR). | ||
217 | */ | ||
218 | }; | ||
193 | 219 | ||
194 | /* | 220 | /* |
195 | * These are the virtual MM functions - opening of an area, closing and | 221 | * These are the virtual MM functions - opening of an area, closing and |
@@ -199,9 +225,11 @@ extern pgprot_t protection_map[16]; | |||
199 | struct vm_operations_struct { | 225 | struct vm_operations_struct { |
200 | void (*open)(struct vm_area_struct * area); | 226 | void (*open)(struct vm_area_struct * area); |
201 | void (*close)(struct vm_area_struct * area); | 227 | void (*close)(struct vm_area_struct * area); |
202 | struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int *type); | 228 | int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); |
203 | unsigned long (*nopfn)(struct vm_area_struct * area, unsigned long address); | 229 | struct page *(*nopage)(struct vm_area_struct *area, |
204 | int (*populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock); | 230 | unsigned long address, int *type); |
231 | unsigned long (*nopfn)(struct vm_area_struct *area, | ||
232 | unsigned long address); | ||
205 | 233 | ||
206 | /* notification that a previously read-only page is about to become | 234 | /* notification that a previously read-only page is about to become |
207 | * writable, if an error is returned it will cause a SIGBUS */ | 235 | * writable, if an error is returned it will cause a SIGBUS */ |
@@ -599,6 +627,7 @@ static inline struct address_space *page_mapping(struct page *page) | |||
599 | { | 627 | { |
600 | struct address_space *mapping = page->mapping; | 628 | struct address_space *mapping = page->mapping; |
601 | 629 | ||
630 | VM_BUG_ON(PageSlab(page)); | ||
602 | if (unlikely(PageSwapCache(page))) | 631 | if (unlikely(PageSwapCache(page))) |
603 | mapping = &swapper_space; | 632 | mapping = &swapper_space; |
604 | #ifdef CONFIG_SLUB | 633 | #ifdef CONFIG_SLUB |
@@ -654,7 +683,6 @@ static inline int page_mapped(struct page *page) | |||
654 | */ | 683 | */ |
655 | #define NOPAGE_SIGBUS (NULL) | 684 | #define NOPAGE_SIGBUS (NULL) |
656 | #define NOPAGE_OOM ((struct page *) (-1)) | 685 | #define NOPAGE_OOM ((struct page *) (-1)) |
657 | #define NOPAGE_REFAULT ((struct page *) (-2)) /* Return to userspace, rerun */ | ||
658 | 686 | ||
659 | /* | 687 | /* |
660 | * Error return values for the *_nopfn functions | 688 | * Error return values for the *_nopfn functions |
@@ -668,16 +696,18 @@ static inline int page_mapped(struct page *page) | |||
668 | * Used to decide whether a process gets delivered SIGBUS or | 696 | * Used to decide whether a process gets delivered SIGBUS or |
669 | * just gets major/minor fault counters bumped up. | 697 | * just gets major/minor fault counters bumped up. |
670 | */ | 698 | */ |
671 | #define VM_FAULT_OOM 0x00 | 699 | |
672 | #define VM_FAULT_SIGBUS 0x01 | 700 | #define VM_FAULT_MINOR 0 /* For backwards compat. Remove me quickly. */ |
673 | #define VM_FAULT_MINOR 0x02 | 701 | |
674 | #define VM_FAULT_MAJOR 0x03 | 702 | #define VM_FAULT_OOM 0x0001 |
675 | 703 | #define VM_FAULT_SIGBUS 0x0002 | |
676 | /* | 704 | #define VM_FAULT_MAJOR 0x0004 |
677 | * Special case for get_user_pages. | 705 | #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ |
678 | * Must be in a distinct bit from the above VM_FAULT_ flags. | 706 | |
679 | */ | 707 | #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ |
680 | #define VM_FAULT_WRITE 0x10 | 708 | #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ |
709 | |||
710 | #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS) | ||
681 | 711 | ||
682 | #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) | 712 | #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) |
683 | 713 | ||
@@ -761,20 +791,10 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping, | |||
761 | 791 | ||
762 | extern int vmtruncate(struct inode * inode, loff_t offset); | 792 | extern int vmtruncate(struct inode * inode, loff_t offset); |
763 | extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); | 793 | extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); |
764 | extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot); | ||
765 | extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot); | ||
766 | 794 | ||
767 | #ifdef CONFIG_MMU | 795 | #ifdef CONFIG_MMU |
768 | extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, | 796 | extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
769 | unsigned long address, int write_access); | 797 | unsigned long address, int write_access); |
770 | |||
771 | static inline int handle_mm_fault(struct mm_struct *mm, | ||
772 | struct vm_area_struct *vma, unsigned long address, | ||
773 | int write_access) | ||
774 | { | ||
775 | return __handle_mm_fault(mm, vma, address, write_access) & | ||
776 | (~VM_FAULT_WRITE); | ||
777 | } | ||
778 | #else | 798 | #else |
779 | static inline int handle_mm_fault(struct mm_struct *mm, | 799 | static inline int handle_mm_fault(struct mm_struct *mm, |
780 | struct vm_area_struct *vma, unsigned long address, | 800 | struct vm_area_struct *vma, unsigned long address, |
@@ -788,7 +808,6 @@ static inline int handle_mm_fault(struct mm_struct *mm, | |||
788 | 808 | ||
789 | extern int make_pages_present(unsigned long addr, unsigned long end); | 809 | extern int make_pages_present(unsigned long addr, unsigned long end); |
790 | extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); | 810 | extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); |
791 | void install_arg_page(struct vm_area_struct *, struct page *, unsigned long); | ||
792 | 811 | ||
793 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, | 812 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, |
794 | int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); | 813 | int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); |
@@ -805,32 +824,42 @@ int FASTCALL(set_page_dirty(struct page *page)); | |||
805 | int set_page_dirty_lock(struct page *page); | 824 | int set_page_dirty_lock(struct page *page); |
806 | int clear_page_dirty_for_io(struct page *page); | 825 | int clear_page_dirty_for_io(struct page *page); |
807 | 826 | ||
827 | extern unsigned long move_page_tables(struct vm_area_struct *vma, | ||
828 | unsigned long old_addr, struct vm_area_struct *new_vma, | ||
829 | unsigned long new_addr, unsigned long len); | ||
808 | extern unsigned long do_mremap(unsigned long addr, | 830 | extern unsigned long do_mremap(unsigned long addr, |
809 | unsigned long old_len, unsigned long new_len, | 831 | unsigned long old_len, unsigned long new_len, |
810 | unsigned long flags, unsigned long new_addr); | 832 | unsigned long flags, unsigned long new_addr); |
833 | extern int mprotect_fixup(struct vm_area_struct *vma, | ||
834 | struct vm_area_struct **pprev, unsigned long start, | ||
835 | unsigned long end, unsigned long newflags); | ||
811 | 836 | ||
812 | /* | 837 | /* |
813 | * Prototype to add a shrinker callback for ageable caches. | 838 | * A callback you can register to apply pressure to ageable caches. |
814 | * | ||
815 | * These functions are passed a count `nr_to_scan' and a gfpmask. They should | ||
816 | * scan `nr_to_scan' objects, attempting to free them. | ||
817 | * | 839 | * |
818 | * The callback must return the number of objects which remain in the cache. | 840 | * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should |
841 | * look through the least-recently-used 'nr_to_scan' entries and | ||
842 | * attempt to free them up. It should return the number of objects | ||
843 | * which remain in the cache. If it returns -1, it means it cannot do | ||
844 | * any scanning at this time (eg. there is a risk of deadlock). | ||
819 | * | 845 | * |
820 | * The callback will be passed nr_to_scan == 0 when the VM is querying the | 846 | * The 'gfpmask' refers to the allocation we are currently trying to |
821 | * cache size, so a fastpath for that case is appropriate. | 847 | * fulfil. |
822 | */ | 848 | * |
823 | typedef int (*shrinker_t)(int nr_to_scan, gfp_t gfp_mask); | 849 | * Note that 'shrink' will be passed nr_to_scan == 0 when the VM is |
824 | 850 | * querying the cache size, so a fastpath for that case is appropriate. | |
825 | /* | ||
826 | * Add an aging callback. The int is the number of 'seeks' it takes | ||
827 | * to recreate one of the objects that these functions age. | ||
828 | */ | 851 | */ |
852 | struct shrinker { | ||
853 | int (*shrink)(int nr_to_scan, gfp_t gfp_mask); | ||
854 | int seeks; /* seeks to recreate an obj */ | ||
829 | 855 | ||
830 | #define DEFAULT_SEEKS 2 | 856 | /* These are for internal use */ |
831 | struct shrinker; | 857 | struct list_head list; |
832 | extern struct shrinker *set_shrinker(int, shrinker_t); | 858 | long nr; /* objs pending delete */ |
833 | extern void remove_shrinker(struct shrinker *shrinker); | 859 | }; |
860 | #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ | ||
861 | extern void register_shrinker(struct shrinker *); | ||
862 | extern void unregister_shrinker(struct shrinker *); | ||
834 | 863 | ||
835 | /* | 864 | /* |
836 | * Some shared mappigns will want the pages marked read-only | 865 | * Some shared mappigns will want the pages marked read-only |
@@ -1099,9 +1128,7 @@ extern void truncate_inode_pages_range(struct address_space *, | |||
1099 | loff_t lstart, loff_t lend); | 1128 | loff_t lstart, loff_t lend); |
1100 | 1129 | ||
1101 | /* generic vm_area_ops exported for stackable file systems */ | 1130 | /* generic vm_area_ops exported for stackable file systems */ |
1102 | extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int *); | 1131 | extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); |
1103 | extern int filemap_populate(struct vm_area_struct *, unsigned long, | ||
1104 | unsigned long, pgprot_t, unsigned long, int); | ||
1105 | 1132 | ||
1106 | /* mm/page-writeback.c */ | 1133 | /* mm/page-writeback.c */ |
1107 | int write_one_page(struct page *page, int wait); | 1134 | int write_one_page(struct page *page, int wait); |
@@ -1116,13 +1143,20 @@ int do_page_cache_readahead(struct address_space *mapping, struct file *filp, | |||
1116 | pgoff_t offset, unsigned long nr_to_read); | 1143 | pgoff_t offset, unsigned long nr_to_read); |
1117 | int force_page_cache_readahead(struct address_space *mapping, struct file *filp, | 1144 | int force_page_cache_readahead(struct address_space *mapping, struct file *filp, |
1118 | pgoff_t offset, unsigned long nr_to_read); | 1145 | pgoff_t offset, unsigned long nr_to_read); |
1119 | unsigned long page_cache_readahead(struct address_space *mapping, | 1146 | |
1120 | struct file_ra_state *ra, | 1147 | void page_cache_sync_readahead(struct address_space *mapping, |
1121 | struct file *filp, | 1148 | struct file_ra_state *ra, |
1122 | pgoff_t offset, | 1149 | struct file *filp, |
1123 | unsigned long size); | 1150 | pgoff_t offset, |
1124 | void handle_ra_miss(struct address_space *mapping, | 1151 | unsigned long size); |
1125 | struct file_ra_state *ra, pgoff_t offset); | 1152 | |
1153 | void page_cache_async_readahead(struct address_space *mapping, | ||
1154 | struct file_ra_state *ra, | ||
1155 | struct file *filp, | ||
1156 | struct page *pg, | ||
1157 | pgoff_t offset, | ||
1158 | unsigned long size); | ||
1159 | |||
1126 | unsigned long max_sane_readahead(unsigned long nr); | 1160 | unsigned long max_sane_readahead(unsigned long nr); |
1127 | 1161 | ||
1128 | /* Do stack extension */ | 1162 | /* Do stack extension */ |
@@ -1130,6 +1164,8 @@ extern int expand_stack(struct vm_area_struct *vma, unsigned long address); | |||
1130 | #ifdef CONFIG_IA64 | 1164 | #ifdef CONFIG_IA64 |
1131 | extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); | 1165 | extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); |
1132 | #endif | 1166 | #endif |
1167 | extern int expand_stack_downwards(struct vm_area_struct *vma, | ||
1168 | unsigned long address); | ||
1133 | 1169 | ||
1134 | /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ | 1170 | /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ |
1135 | extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); | 1171 | extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 04b1636a970b..da8eb8ad9e9b 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -24,6 +24,14 @@ | |||
24 | #endif | 24 | #endif |
25 | #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) | 25 | #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) |
26 | 26 | ||
27 | /* | ||
28 | * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed | ||
29 | * costly to service. That is between allocation orders which should | ||
30 | * coelesce naturally under reasonable reclaim pressure and those which | ||
31 | * will not. | ||
32 | */ | ||
33 | #define PAGE_ALLOC_COSTLY_ORDER 3 | ||
34 | |||
27 | struct free_area { | 35 | struct free_area { |
28 | struct list_head free_list; | 36 | struct list_head free_list; |
29 | unsigned long nr_free; | 37 | unsigned long nr_free; |
@@ -146,6 +154,7 @@ enum zone_type { | |||
146 | */ | 154 | */ |
147 | ZONE_HIGHMEM, | 155 | ZONE_HIGHMEM, |
148 | #endif | 156 | #endif |
157 | ZONE_MOVABLE, | ||
149 | MAX_NR_ZONES | 158 | MAX_NR_ZONES |
150 | }; | 159 | }; |
151 | 160 | ||
@@ -167,6 +176,7 @@ enum zone_type { | |||
167 | + defined(CONFIG_ZONE_DMA32) \ | 176 | + defined(CONFIG_ZONE_DMA32) \ |
168 | + 1 \ | 177 | + 1 \ |
169 | + defined(CONFIG_HIGHMEM) \ | 178 | + defined(CONFIG_HIGHMEM) \ |
179 | + 1 \ | ||
170 | ) | 180 | ) |
171 | #if __ZONE_COUNT < 2 | 181 | #if __ZONE_COUNT < 2 |
172 | #define ZONES_SHIFT 0 | 182 | #define ZONES_SHIFT 0 |
@@ -499,10 +509,22 @@ static inline int populated_zone(struct zone *zone) | |||
499 | return (!!zone->present_pages); | 509 | return (!!zone->present_pages); |
500 | } | 510 | } |
501 | 511 | ||
512 | extern int movable_zone; | ||
513 | |||
514 | static inline int zone_movable_is_highmem(void) | ||
515 | { | ||
516 | #if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP) | ||
517 | return movable_zone == ZONE_HIGHMEM; | ||
518 | #else | ||
519 | return 0; | ||
520 | #endif | ||
521 | } | ||
522 | |||
502 | static inline int is_highmem_idx(enum zone_type idx) | 523 | static inline int is_highmem_idx(enum zone_type idx) |
503 | { | 524 | { |
504 | #ifdef CONFIG_HIGHMEM | 525 | #ifdef CONFIG_HIGHMEM |
505 | return (idx == ZONE_HIGHMEM); | 526 | return (idx == ZONE_HIGHMEM || |
527 | (idx == ZONE_MOVABLE && zone_movable_is_highmem())); | ||
506 | #else | 528 | #else |
507 | return 0; | 529 | return 0; |
508 | #endif | 530 | #endif |
@@ -522,7 +544,9 @@ static inline int is_normal_idx(enum zone_type idx) | |||
522 | static inline int is_highmem(struct zone *zone) | 544 | static inline int is_highmem(struct zone *zone) |
523 | { | 545 | { |
524 | #ifdef CONFIG_HIGHMEM | 546 | #ifdef CONFIG_HIGHMEM |
525 | return zone == zone->zone_pgdat->node_zones + ZONE_HIGHMEM; | 547 | int zone_idx = zone - zone->zone_pgdat->node_zones; |
548 | return zone_idx == ZONE_HIGHMEM || | ||
549 | (zone_idx == ZONE_MOVABLE && zone_movable_is_highmem()); | ||
526 | #else | 550 | #else |
527 | return 0; | 551 | return 0; |
528 | #endif | 552 | #endif |
diff --git a/include/linux/namei.h b/include/linux/namei.h index b7dd24917f0d..6c38efbd810f 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h | |||
@@ -69,8 +69,8 @@ extern int FASTCALL(__user_walk_fd(int dfd, const char __user *, unsigned, struc | |||
69 | #define user_path_walk_link(name,nd) \ | 69 | #define user_path_walk_link(name,nd) \ |
70 | __user_walk_fd(AT_FDCWD, name, 0, nd) | 70 | __user_walk_fd(AT_FDCWD, name, 0, nd) |
71 | extern int FASTCALL(path_lookup(const char *, unsigned, struct nameidata *)); | 71 | extern int FASTCALL(path_lookup(const char *, unsigned, struct nameidata *)); |
72 | extern int FASTCALL(path_walk(const char *, struct nameidata *)); | 72 | extern int vfs_path_lookup(struct dentry *, struct vfsmount *, |
73 | extern int FASTCALL(link_path_walk(const char *, struct nameidata *)); | 73 | const char *, unsigned int, struct nameidata *); |
74 | extern void path_release(struct nameidata *); | 74 | extern void path_release(struct nameidata *); |
75 | extern void path_release_on_umount(struct nameidata *); | 75 | extern void path_release_on_umount(struct nameidata *); |
76 | 76 | ||
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index da7a13c97eb8..9820ca1e45e2 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -1098,10 +1098,8 @@ extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all | |||
1098 | extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly); | 1098 | extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly); |
1099 | extern int dev_mc_sync(struct net_device *to, struct net_device *from); | 1099 | extern int dev_mc_sync(struct net_device *to, struct net_device *from); |
1100 | extern void dev_mc_unsync(struct net_device *to, struct net_device *from); | 1100 | extern void dev_mc_unsync(struct net_device *to, struct net_device *from); |
1101 | extern void dev_mc_discard(struct net_device *dev); | ||
1102 | extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all); | 1101 | extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all); |
1103 | extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly); | 1102 | extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly); |
1104 | extern void __dev_addr_discard(struct dev_addr_list **list); | ||
1105 | extern void dev_set_promiscuity(struct net_device *dev, int inc); | 1103 | extern void dev_set_promiscuity(struct net_device *dev, int inc); |
1106 | extern void dev_set_allmulti(struct net_device *dev, int inc); | 1104 | extern void dev_set_allmulti(struct net_device *dev, int inc); |
1107 | extern void netdev_state_change(struct net_device *dev); | 1105 | extern void netdev_state_change(struct net_device *dev); |
diff --git a/include/linux/netfilter_ipv4/ipt_iprange.h b/include/linux/netfilter_ipv4/ipt_iprange.h index 34ab0fb736e2..a92fefc3c7ec 100644 --- a/include/linux/netfilter_ipv4/ipt_iprange.h +++ b/include/linux/netfilter_ipv4/ipt_iprange.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef _IPT_IPRANGE_H | 1 | #ifndef _IPT_IPRANGE_H |
2 | #define _IPT_IPRANGE_H | 2 | #define _IPT_IPRANGE_H |
3 | 3 | ||
4 | #include <linux/types.h> | ||
5 | |||
4 | #define IPRANGE_SRC 0x01 /* Match source IP address */ | 6 | #define IPRANGE_SRC 0x01 /* Match source IP address */ |
5 | #define IPRANGE_DST 0x02 /* Match destination IP address */ | 7 | #define IPRANGE_DST 0x02 /* Match destination IP address */ |
6 | #define IPRANGE_SRC_INV 0x10 /* Negate the condition */ | 8 | #define IPRANGE_SRC_INV 0x10 /* Negate the condition */ |
diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h index 9f62d6182d32..5cd192469096 100644 --- a/include/linux/nfsd/export.h +++ b/include/linux/nfsd/export.h | |||
@@ -42,6 +42,9 @@ | |||
42 | #define NFSEXP_NOACL 0x8000 /* reserved for possible ACL related use */ | 42 | #define NFSEXP_NOACL 0x8000 /* reserved for possible ACL related use */ |
43 | #define NFSEXP_ALLFLAGS 0xFE3F | 43 | #define NFSEXP_ALLFLAGS 0xFE3F |
44 | 44 | ||
45 | /* The flags that may vary depending on security flavor: */ | ||
46 | #define NFSEXP_SECINFO_FLAGS (NFSEXP_READONLY | NFSEXP_ROOTSQUASH \ | ||
47 | | NFSEXP_ALLSQUASH) | ||
45 | 48 | ||
46 | #ifdef __KERNEL__ | 49 | #ifdef __KERNEL__ |
47 | 50 | ||
@@ -64,6 +67,19 @@ struct nfsd4_fs_locations { | |||
64 | int migrated; | 67 | int migrated; |
65 | }; | 68 | }; |
66 | 69 | ||
70 | /* | ||
71 | * We keep an array of pseudoflavors with the export, in order from most | ||
72 | * to least preferred. For the forseeable future, we don't expect more | ||
73 | * than the eight pseudoflavors null, unix, krb5, krb5i, krb5p, skpm3, | ||
74 | * spkm3i, and spkm3p (and using all 8 at once should be rare). | ||
75 | */ | ||
76 | #define MAX_SECINFO_LIST 8 | ||
77 | |||
78 | struct exp_flavor_info { | ||
79 | u32 pseudoflavor; | ||
80 | u32 flags; | ||
81 | }; | ||
82 | |||
67 | struct svc_export { | 83 | struct svc_export { |
68 | struct cache_head h; | 84 | struct cache_head h; |
69 | struct auth_domain * ex_client; | 85 | struct auth_domain * ex_client; |
@@ -76,6 +92,8 @@ struct svc_export { | |||
76 | int ex_fsid; | 92 | int ex_fsid; |
77 | unsigned char * ex_uuid; /* 16 byte fsid */ | 93 | unsigned char * ex_uuid; /* 16 byte fsid */ |
78 | struct nfsd4_fs_locations ex_fslocs; | 94 | struct nfsd4_fs_locations ex_fslocs; |
95 | int ex_nflavors; | ||
96 | struct exp_flavor_info ex_flavors[MAX_SECINFO_LIST]; | ||
79 | }; | 97 | }; |
80 | 98 | ||
81 | /* an "export key" (expkey) maps a filehandlefragement to an | 99 | /* an "export key" (expkey) maps a filehandlefragement to an |
@@ -95,10 +113,11 @@ struct svc_expkey { | |||
95 | 113 | ||
96 | #define EX_SECURE(exp) (!((exp)->ex_flags & NFSEXP_INSECURE_PORT)) | 114 | #define EX_SECURE(exp) (!((exp)->ex_flags & NFSEXP_INSECURE_PORT)) |
97 | #define EX_ISSYNC(exp) (!((exp)->ex_flags & NFSEXP_ASYNC)) | 115 | #define EX_ISSYNC(exp) (!((exp)->ex_flags & NFSEXP_ASYNC)) |
98 | #define EX_RDONLY(exp) ((exp)->ex_flags & NFSEXP_READONLY) | ||
99 | #define EX_NOHIDE(exp) ((exp)->ex_flags & NFSEXP_NOHIDE) | 116 | #define EX_NOHIDE(exp) ((exp)->ex_flags & NFSEXP_NOHIDE) |
100 | #define EX_WGATHER(exp) ((exp)->ex_flags & NFSEXP_GATHERED_WRITES) | 117 | #define EX_WGATHER(exp) ((exp)->ex_flags & NFSEXP_GATHERED_WRITES) |
101 | 118 | ||
119 | int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp); | ||
120 | __be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp); | ||
102 | 121 | ||
103 | /* | 122 | /* |
104 | * Function declarations | 123 | * Function declarations |
@@ -112,13 +131,19 @@ struct svc_export * exp_get_by_name(struct auth_domain *clp, | |||
112 | struct vfsmount *mnt, | 131 | struct vfsmount *mnt, |
113 | struct dentry *dentry, | 132 | struct dentry *dentry, |
114 | struct cache_req *reqp); | 133 | struct cache_req *reqp); |
134 | struct svc_export * rqst_exp_get_by_name(struct svc_rqst *, | ||
135 | struct vfsmount *, | ||
136 | struct dentry *); | ||
115 | struct svc_export * exp_parent(struct auth_domain *clp, | 137 | struct svc_export * exp_parent(struct auth_domain *clp, |
116 | struct vfsmount *mnt, | 138 | struct vfsmount *mnt, |
117 | struct dentry *dentry, | 139 | struct dentry *dentry, |
118 | struct cache_req *reqp); | 140 | struct cache_req *reqp); |
141 | struct svc_export * rqst_exp_parent(struct svc_rqst *, | ||
142 | struct vfsmount *mnt, | ||
143 | struct dentry *dentry); | ||
119 | int exp_rootfh(struct auth_domain *, | 144 | int exp_rootfh(struct auth_domain *, |
120 | char *path, struct knfsd_fh *, int maxsize); | 145 | char *path, struct knfsd_fh *, int maxsize); |
121 | __be32 exp_pseudoroot(struct auth_domain *, struct svc_fh *fhp, struct cache_req *creq); | 146 | __be32 exp_pseudoroot(struct svc_rqst *, struct svc_fh *); |
122 | __be32 nfserrno(int errno); | 147 | __be32 nfserrno(int errno); |
123 | 148 | ||
124 | extern struct cache_detail svc_export_cache; | 149 | extern struct cache_detail svc_export_cache; |
@@ -135,6 +160,7 @@ static inline void exp_get(struct svc_export *exp) | |||
135 | extern struct svc_export * | 160 | extern struct svc_export * |
136 | exp_find(struct auth_domain *clp, int fsid_type, u32 *fsidv, | 161 | exp_find(struct auth_domain *clp, int fsid_type, u32 *fsidv, |
137 | struct cache_req *reqp); | 162 | struct cache_req *reqp); |
163 | struct svc_export * rqst_exp_find(struct svc_rqst *, int, u32 *); | ||
138 | 164 | ||
139 | #endif /* __KERNEL__ */ | 165 | #endif /* __KERNEL__ */ |
140 | 166 | ||
diff --git a/include/linux/nfsd/interface.h b/include/linux/nfsd/interface.h deleted file mode 100644 index af0979704afb..000000000000 --- a/include/linux/nfsd/interface.h +++ /dev/null | |||
@@ -1,13 +0,0 @@ | |||
1 | /* | ||
2 | * include/linux/nfsd/interface.h | ||
3 | * | ||
4 | * defines interface between nfsd and other bits of | ||
5 | * the kernel. Particularly filesystems (eventually). | ||
6 | * | ||
7 | * Copyright (C) 2000 Neil Brown <neilb@cse.unsw.edu.au> | ||
8 | */ | ||
9 | |||
10 | #ifndef LINUX_NFSD_INTERFACE_H | ||
11 | #define LINUX_NFSD_INTERFACE_H | ||
12 | |||
13 | #endif /* LINUX_NFSD_INTERFACE_H */ | ||
diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h index 72feac581aa3..e452256d3f72 100644 --- a/include/linux/nfsd/nfsd.h +++ b/include/linux/nfsd/nfsd.h | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/nfsd/export.h> | 22 | #include <linux/nfsd/export.h> |
23 | #include <linux/nfsd/auth.h> | 23 | #include <linux/nfsd/auth.h> |
24 | #include <linux/nfsd/stats.h> | 24 | #include <linux/nfsd/stats.h> |
25 | #include <linux/nfsd/interface.h> | ||
26 | /* | 25 | /* |
27 | * nfsd version | 26 | * nfsd version |
28 | */ | 27 | */ |
@@ -72,6 +71,9 @@ int nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp, | |||
72 | struct svc_export **expp); | 71 | struct svc_export **expp); |
73 | __be32 nfsd_lookup(struct svc_rqst *, struct svc_fh *, | 72 | __be32 nfsd_lookup(struct svc_rqst *, struct svc_fh *, |
74 | const char *, int, struct svc_fh *); | 73 | const char *, int, struct svc_fh *); |
74 | __be32 nfsd_lookup_dentry(struct svc_rqst *, struct svc_fh *, | ||
75 | const char *, int, | ||
76 | struct svc_export **, struct dentry **); | ||
75 | __be32 nfsd_setattr(struct svc_rqst *, struct svc_fh *, | 77 | __be32 nfsd_setattr(struct svc_rqst *, struct svc_fh *, |
76 | struct iattr *, int, time_t); | 78 | struct iattr *, int, time_t); |
77 | #ifdef CONFIG_NFSD_V4 | 79 | #ifdef CONFIG_NFSD_V4 |
@@ -120,7 +122,8 @@ __be32 nfsd_statfs(struct svc_rqst *, struct svc_fh *, | |||
120 | struct kstatfs *); | 122 | struct kstatfs *); |
121 | 123 | ||
122 | int nfsd_notify_change(struct inode *, struct iattr *); | 124 | int nfsd_notify_change(struct inode *, struct iattr *); |
123 | __be32 nfsd_permission(struct svc_export *, struct dentry *, int); | 125 | __be32 nfsd_permission(struct svc_rqst *, struct svc_export *, |
126 | struct dentry *, int); | ||
124 | int nfsd_sync_dir(struct dentry *dp); | 127 | int nfsd_sync_dir(struct dentry *dp); |
125 | 128 | ||
126 | #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) | 129 | #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) |
@@ -149,6 +152,7 @@ extern int nfsd_max_blksize; | |||
149 | * NFSv4 State | 152 | * NFSv4 State |
150 | */ | 153 | */ |
151 | #ifdef CONFIG_NFSD_V4 | 154 | #ifdef CONFIG_NFSD_V4 |
155 | extern unsigned int max_delegations; | ||
152 | void nfs4_state_init(void); | 156 | void nfs4_state_init(void); |
153 | int nfs4_state_start(void); | 157 | int nfs4_state_start(void); |
154 | void nfs4_state_shutdown(void); | 158 | void nfs4_state_shutdown(void); |
@@ -236,6 +240,7 @@ void nfsd_lockd_shutdown(void); | |||
236 | #define nfserr_badname __constant_htonl(NFSERR_BADNAME) | 240 | #define nfserr_badname __constant_htonl(NFSERR_BADNAME) |
237 | #define nfserr_cb_path_down __constant_htonl(NFSERR_CB_PATH_DOWN) | 241 | #define nfserr_cb_path_down __constant_htonl(NFSERR_CB_PATH_DOWN) |
238 | #define nfserr_locked __constant_htonl(NFSERR_LOCKED) | 242 | #define nfserr_locked __constant_htonl(NFSERR_LOCKED) |
243 | #define nfserr_wrongsec __constant_htonl(NFSERR_WRONGSEC) | ||
239 | #define nfserr_replay_me __constant_htonl(NFSERR_REPLAY_ME) | 244 | #define nfserr_replay_me __constant_htonl(NFSERR_REPLAY_ME) |
240 | 245 | ||
241 | /* error codes for internal use */ | 246 | /* error codes for internal use */ |
diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h index ab5c236bd9a7..db348f749376 100644 --- a/include/linux/nfsd/state.h +++ b/include/linux/nfsd/state.h | |||
@@ -67,7 +67,7 @@ struct nfs4_cb_recall { | |||
67 | int cbr_trunc; | 67 | int cbr_trunc; |
68 | stateid_t cbr_stateid; | 68 | stateid_t cbr_stateid; |
69 | u32 cbr_fhlen; | 69 | u32 cbr_fhlen; |
70 | u32 cbr_fhval[NFS4_FHSIZE]; | 70 | char cbr_fhval[NFS4_FHSIZE]; |
71 | struct nfs4_delegation *cbr_dp; | 71 | struct nfs4_delegation *cbr_dp; |
72 | }; | 72 | }; |
73 | 73 | ||
@@ -224,6 +224,7 @@ struct nfs4_file { | |||
224 | struct inode *fi_inode; | 224 | struct inode *fi_inode; |
225 | u32 fi_id; /* used with stateowner->so_id | 225 | u32 fi_id; /* used with stateowner->so_id |
226 | * for stateid_hashtbl hash */ | 226 | * for stateid_hashtbl hash */ |
227 | bool fi_had_conflict; | ||
227 | }; | 228 | }; |
228 | 229 | ||
229 | /* | 230 | /* |
diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h index 09799bcee0ac..1b653267133a 100644 --- a/include/linux/nfsd/xdr4.h +++ b/include/linux/nfsd/xdr4.h | |||
@@ -293,6 +293,12 @@ struct nfsd4_rename { | |||
293 | struct nfsd4_change_info rn_tinfo; /* response */ | 293 | struct nfsd4_change_info rn_tinfo; /* response */ |
294 | }; | 294 | }; |
295 | 295 | ||
296 | struct nfsd4_secinfo { | ||
297 | u32 si_namelen; /* request */ | ||
298 | char *si_name; /* request */ | ||
299 | struct svc_export *si_exp; /* response */ | ||
300 | }; | ||
301 | |||
296 | struct nfsd4_setattr { | 302 | struct nfsd4_setattr { |
297 | stateid_t sa_stateid; /* request */ | 303 | stateid_t sa_stateid; /* request */ |
298 | u32 sa_bmval[2]; /* request */ | 304 | u32 sa_bmval[2]; /* request */ |
@@ -365,6 +371,7 @@ struct nfsd4_op { | |||
365 | struct nfsd4_remove remove; | 371 | struct nfsd4_remove remove; |
366 | struct nfsd4_rename rename; | 372 | struct nfsd4_rename rename; |
367 | clientid_t renew; | 373 | clientid_t renew; |
374 | struct nfsd4_secinfo secinfo; | ||
368 | struct nfsd4_setattr setattr; | 375 | struct nfsd4_setattr setattr; |
369 | struct nfsd4_setclientid setclientid; | 376 | struct nfsd4_setclientid setclientid; |
370 | struct nfsd4_setclientid_confirm setclientid_confirm; | 377 | struct nfsd4_setclientid_confirm setclientid_confirm; |
diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 9431101bf876..be3f2bb6fcf3 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h | |||
@@ -196,6 +196,8 @@ extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh, | |||
196 | #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ | 196 | #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ |
197 | #define CPU_LOCK_ACQUIRE 0x0008 /* Acquire all hotcpu locks */ | 197 | #define CPU_LOCK_ACQUIRE 0x0008 /* Acquire all hotcpu locks */ |
198 | #define CPU_LOCK_RELEASE 0x0009 /* Release all hotcpu locks */ | 198 | #define CPU_LOCK_RELEASE 0x0009 /* Release all hotcpu locks */ |
199 | #define CPU_DYING 0x000A /* CPU (unsigned)v not running any task, | ||
200 | * not handling interrupts, soon dead */ | ||
199 | 201 | ||
200 | /* Used for CPU hotplug events occuring while tasks are frozen due to a suspend | 202 | /* Used for CPU hotplug events occuring while tasks are frozen due to a suspend |
201 | * operation in progress | 203 | * operation in progress |
@@ -208,6 +210,13 @@ extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh, | |||
208 | #define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN) | 210 | #define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN) |
209 | #define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN) | 211 | #define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN) |
210 | #define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN) | 212 | #define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN) |
213 | #define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN) | ||
214 | |||
215 | /* Hibernation and suspend events */ | ||
216 | #define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */ | ||
217 | #define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */ | ||
218 | #define PM_SUSPEND_PREPARE 0x0003 /* Going to suspend the system */ | ||
219 | #define PM_POST_SUSPEND 0x0004 /* Suspend finished */ | ||
211 | 220 | ||
212 | #endif /* __KERNEL__ */ | 221 | #endif /* __KERNEL__ */ |
213 | #endif /* _LINUX_NOTIFIER_H */ | 222 | #endif /* _LINUX_NOTIFIER_H */ |
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index ae2d79f2107e..209d3a47f50f 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
@@ -90,8 +90,12 @@ | |||
90 | #define PG_reclaim 17 /* To be reclaimed asap */ | 90 | #define PG_reclaim 17 /* To be reclaimed asap */ |
91 | #define PG_buddy 19 /* Page is free, on buddy lists */ | 91 | #define PG_buddy 19 /* Page is free, on buddy lists */ |
92 | 92 | ||
93 | /* PG_readahead is only used for file reads; PG_reclaim is only for writes */ | ||
94 | #define PG_readahead PG_reclaim /* Reminder to do async read-ahead */ | ||
95 | |||
93 | /* PG_owner_priv_1 users should have descriptive aliases */ | 96 | /* PG_owner_priv_1 users should have descriptive aliases */ |
94 | #define PG_checked PG_owner_priv_1 /* Used by some filesystems */ | 97 | #define PG_checked PG_owner_priv_1 /* Used by some filesystems */ |
98 | #define PG_pinned PG_owner_priv_1 /* Xen pinned pagetable */ | ||
95 | 99 | ||
96 | #if (BITS_PER_LONG > 32) | 100 | #if (BITS_PER_LONG > 32) |
97 | /* | 101 | /* |
@@ -170,6 +174,10 @@ static inline void SetPageUptodate(struct page *page) | |||
170 | #define SetPageChecked(page) set_bit(PG_checked, &(page)->flags) | 174 | #define SetPageChecked(page) set_bit(PG_checked, &(page)->flags) |
171 | #define ClearPageChecked(page) clear_bit(PG_checked, &(page)->flags) | 175 | #define ClearPageChecked(page) clear_bit(PG_checked, &(page)->flags) |
172 | 176 | ||
177 | #define PagePinned(page) test_bit(PG_pinned, &(page)->flags) | ||
178 | #define SetPagePinned(page) set_bit(PG_pinned, &(page)->flags) | ||
179 | #define ClearPagePinned(page) clear_bit(PG_pinned, &(page)->flags) | ||
180 | |||
173 | #define PageReserved(page) test_bit(PG_reserved, &(page)->flags) | 181 | #define PageReserved(page) test_bit(PG_reserved, &(page)->flags) |
174 | #define SetPageReserved(page) set_bit(PG_reserved, &(page)->flags) | 182 | #define SetPageReserved(page) set_bit(PG_reserved, &(page)->flags) |
175 | #define ClearPageReserved(page) clear_bit(PG_reserved, &(page)->flags) | 183 | #define ClearPageReserved(page) clear_bit(PG_reserved, &(page)->flags) |
@@ -181,37 +189,15 @@ static inline void SetPageUptodate(struct page *page) | |||
181 | #define __SetPagePrivate(page) __set_bit(PG_private, &(page)->flags) | 189 | #define __SetPagePrivate(page) __set_bit(PG_private, &(page)->flags) |
182 | #define __ClearPagePrivate(page) __clear_bit(PG_private, &(page)->flags) | 190 | #define __ClearPagePrivate(page) __clear_bit(PG_private, &(page)->flags) |
183 | 191 | ||
192 | /* | ||
193 | * Only test-and-set exist for PG_writeback. The unconditional operators are | ||
194 | * risky: they bypass page accounting. | ||
195 | */ | ||
184 | #define PageWriteback(page) test_bit(PG_writeback, &(page)->flags) | 196 | #define PageWriteback(page) test_bit(PG_writeback, &(page)->flags) |
185 | #define SetPageWriteback(page) \ | 197 | #define TestSetPageWriteback(page) test_and_set_bit(PG_writeback, \ |
186 | do { \ | 198 | &(page)->flags) |
187 | if (!test_and_set_bit(PG_writeback, \ | 199 | #define TestClearPageWriteback(page) test_and_clear_bit(PG_writeback, \ |
188 | &(page)->flags)) \ | 200 | &(page)->flags) |
189 | inc_zone_page_state(page, NR_WRITEBACK); \ | ||
190 | } while (0) | ||
191 | #define TestSetPageWriteback(page) \ | ||
192 | ({ \ | ||
193 | int ret; \ | ||
194 | ret = test_and_set_bit(PG_writeback, \ | ||
195 | &(page)->flags); \ | ||
196 | if (!ret) \ | ||
197 | inc_zone_page_state(page, NR_WRITEBACK); \ | ||
198 | ret; \ | ||
199 | }) | ||
200 | #define ClearPageWriteback(page) \ | ||
201 | do { \ | ||
202 | if (test_and_clear_bit(PG_writeback, \ | ||
203 | &(page)->flags)) \ | ||
204 | dec_zone_page_state(page, NR_WRITEBACK); \ | ||
205 | } while (0) | ||
206 | #define TestClearPageWriteback(page) \ | ||
207 | ({ \ | ||
208 | int ret; \ | ||
209 | ret = test_and_clear_bit(PG_writeback, \ | ||
210 | &(page)->flags); \ | ||
211 | if (ret) \ | ||
212 | dec_zone_page_state(page, NR_WRITEBACK); \ | ||
213 | ret; \ | ||
214 | }) | ||
215 | 201 | ||
216 | #define PageBuddy(page) test_bit(PG_buddy, &(page)->flags) | 202 | #define PageBuddy(page) test_bit(PG_buddy, &(page)->flags) |
217 | #define __SetPageBuddy(page) __set_bit(PG_buddy, &(page)->flags) | 203 | #define __SetPageBuddy(page) __set_bit(PG_buddy, &(page)->flags) |
@@ -221,6 +207,10 @@ static inline void SetPageUptodate(struct page *page) | |||
221 | #define SetPageMappedToDisk(page) set_bit(PG_mappedtodisk, &(page)->flags) | 207 | #define SetPageMappedToDisk(page) set_bit(PG_mappedtodisk, &(page)->flags) |
222 | #define ClearPageMappedToDisk(page) clear_bit(PG_mappedtodisk, &(page)->flags) | 208 | #define ClearPageMappedToDisk(page) clear_bit(PG_mappedtodisk, &(page)->flags) |
223 | 209 | ||
210 | #define PageReadahead(page) test_bit(PG_readahead, &(page)->flags) | ||
211 | #define SetPageReadahead(page) set_bit(PG_readahead, &(page)->flags) | ||
212 | #define ClearPageReadahead(page) clear_bit(PG_readahead, &(page)->flags) | ||
213 | |||
224 | #define PageReclaim(page) test_bit(PG_reclaim, &(page)->flags) | 214 | #define PageReclaim(page) test_bit(PG_reclaim, &(page)->flags) |
225 | #define SetPageReclaim(page) set_bit(PG_reclaim, &(page)->flags) | 215 | #define SetPageReclaim(page) set_bit(PG_reclaim, &(page)->flags) |
226 | #define ClearPageReclaim(page) clear_bit(PG_reclaim, &(page)->flags) | 216 | #define ClearPageReclaim(page) clear_bit(PG_reclaim, &(page)->flags) |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 2c7add169539..b15c6498fe67 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -495,6 +495,8 @@ | |||
495 | 495 | ||
496 | #define PCI_VENDOR_ID_AMD 0x1022 | 496 | #define PCI_VENDOR_ID_AMD 0x1022 |
497 | #define PCI_DEVICE_ID_AMD_K8_NB 0x1100 | 497 | #define PCI_DEVICE_ID_AMD_K8_NB 0x1100 |
498 | #define PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP 0x1101 | ||
499 | #define PCI_DEVICE_ID_AMD_K8_NB_MEMCTL 0x1102 | ||
498 | #define PCI_DEVICE_ID_AMD_K8_NB_MISC 0x1103 | 500 | #define PCI_DEVICE_ID_AMD_K8_NB_MISC 0x1103 |
499 | #define PCI_DEVICE_ID_AMD_LANCE 0x2000 | 501 | #define PCI_DEVICE_ID_AMD_LANCE 0x2000 |
500 | #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 | 502 | #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 |
@@ -2209,6 +2211,7 @@ | |||
2209 | #define PCI_DEVICE_ID_INTEL_82915GM_IG 0x2592 | 2211 | #define PCI_DEVICE_ID_INTEL_82915GM_IG 0x2592 |
2210 | #define PCI_DEVICE_ID_INTEL_82945G_HB 0x2770 | 2212 | #define PCI_DEVICE_ID_INTEL_82945G_HB 0x2770 |
2211 | #define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772 | 2213 | #define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772 |
2214 | #define PCI_DEVICE_ID_INTEL_3000_HB 0x2778 | ||
2212 | #define PCI_DEVICE_ID_INTEL_82945GM_HB 0x27A0 | 2215 | #define PCI_DEVICE_ID_INTEL_82945GM_HB 0x27A0 |
2213 | #define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27A2 | 2216 | #define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27A2 |
2214 | #define PCI_DEVICE_ID_INTEL_ICH6_0 0x2640 | 2217 | #define PCI_DEVICE_ID_INTEL_ICH6_0 0x2640 |
diff --git a/include/linux/pm.h b/include/linux/pm.h index 273781c82e4d..ad3cc2eb0d34 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
@@ -101,6 +101,7 @@ struct pm_dev | |||
101 | */ | 101 | */ |
102 | extern void (*pm_idle)(void); | 102 | extern void (*pm_idle)(void); |
103 | extern void (*pm_power_off)(void); | 103 | extern void (*pm_power_off)(void); |
104 | extern void (*pm_power_off_prepare)(void); | ||
104 | 105 | ||
105 | typedef int __bitwise suspend_state_t; | 106 | typedef int __bitwise suspend_state_t; |
106 | 107 | ||
@@ -284,8 +285,6 @@ extern int device_prepare_suspend(pm_message_t state); | |||
284 | #define device_may_wakeup(dev) \ | 285 | #define device_may_wakeup(dev) \ |
285 | (device_can_wakeup(dev) && (dev)->power.should_wakeup) | 286 | (device_can_wakeup(dev) && (dev)->power.should_wakeup) |
286 | 287 | ||
287 | extern int dpm_runtime_suspend(struct device *, pm_message_t); | ||
288 | extern void dpm_runtime_resume(struct device *); | ||
289 | extern void __suspend_report_result(const char *function, void *fn, int ret); | 288 | extern void __suspend_report_result(const char *function, void *fn, int ret); |
290 | 289 | ||
291 | #define suspend_report_result(fn, ret) \ | 290 | #define suspend_report_result(fn, ret) \ |
@@ -317,15 +316,6 @@ static inline int device_suspend(pm_message_t state) | |||
317 | #define device_set_wakeup_enable(dev,val) do{}while(0) | 316 | #define device_set_wakeup_enable(dev,val) do{}while(0) |
318 | #define device_may_wakeup(dev) (0) | 317 | #define device_may_wakeup(dev) (0) |
319 | 318 | ||
320 | static inline int dpm_runtime_suspend(struct device * dev, pm_message_t state) | ||
321 | { | ||
322 | return 0; | ||
323 | } | ||
324 | |||
325 | static inline void dpm_runtime_resume(struct device * dev) | ||
326 | { | ||
327 | } | ||
328 | |||
329 | #define suspend_report_result(fn, ret) do { } while (0) | 319 | #define suspend_report_result(fn, ret) do { } while (0) |
330 | 320 | ||
331 | static inline int call_platform_enable_wakeup(struct device *dev, int is_on) | 321 | static inline int call_platform_enable_wakeup(struct device *dev, int is_on) |
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index eeb1976ef7bf..ae8146abd746 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h | |||
@@ -110,6 +110,8 @@ static inline void ptrace_unlink(struct task_struct *child) | |||
110 | __ptrace_unlink(child); | 110 | __ptrace_unlink(child); |
111 | } | 111 | } |
112 | 112 | ||
113 | int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data); | ||
114 | int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data); | ||
113 | 115 | ||
114 | #ifndef force_successful_syscall_return | 116 | #ifndef force_successful_syscall_return |
115 | /* | 117 | /* |
diff --git a/include/linux/raid/bitmap.h b/include/linux/raid/bitmap.h index dd5a05d03d4f..75e17a05540e 100644 --- a/include/linux/raid/bitmap.h +++ b/include/linux/raid/bitmap.h | |||
@@ -262,7 +262,7 @@ int bitmap_active(struct bitmap *bitmap); | |||
262 | 262 | ||
263 | char *file_path(struct file *file, char *buf, int count); | 263 | char *file_path(struct file *file, char *buf, int count); |
264 | void bitmap_print_sb(struct bitmap *bitmap); | 264 | void bitmap_print_sb(struct bitmap *bitmap); |
265 | int bitmap_update_sb(struct bitmap *bitmap); | 265 | void bitmap_update_sb(struct bitmap *bitmap); |
266 | 266 | ||
267 | int bitmap_setallbits(struct bitmap *bitmap); | 267 | int bitmap_setallbits(struct bitmap *bitmap); |
268 | void bitmap_write_all(struct bitmap *bitmap); | 268 | void bitmap_write_all(struct bitmap *bitmap); |
@@ -278,8 +278,8 @@ int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int d | |||
278 | void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted); | 278 | void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted); |
279 | void bitmap_close_sync(struct bitmap *bitmap); | 279 | void bitmap_close_sync(struct bitmap *bitmap); |
280 | 280 | ||
281 | int bitmap_unplug(struct bitmap *bitmap); | 281 | void bitmap_unplug(struct bitmap *bitmap); |
282 | int bitmap_daemon_work(struct bitmap *bitmap); | 282 | void bitmap_daemon_work(struct bitmap *bitmap); |
283 | #endif | 283 | #endif |
284 | 284 | ||
285 | #endif | 285 | #endif |
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index de72c49747c8..28ac632b42dd 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h | |||
@@ -51,7 +51,7 @@ struct mdk_rdev_s | |||
51 | 51 | ||
52 | sector_t size; /* Device size (in blocks) */ | 52 | sector_t size; /* Device size (in blocks) */ |
53 | mddev_t *mddev; /* RAID array if running */ | 53 | mddev_t *mddev; /* RAID array if running */ |
54 | unsigned long last_events; /* IO event timestamp */ | 54 | long last_events; /* IO event timestamp */ |
55 | 55 | ||
56 | struct block_device *bdev; /* block device handle */ | 56 | struct block_device *bdev; /* block device handle */ |
57 | 57 | ||
diff --git a/include/linux/reboot.h b/include/linux/reboot.h index 1dd1c707311f..85ea63f462af 100644 --- a/include/linux/reboot.h +++ b/include/linux/reboot.h | |||
@@ -67,6 +67,11 @@ extern void kernel_power_off(void); | |||
67 | 67 | ||
68 | void ctrl_alt_del(void); | 68 | void ctrl_alt_del(void); |
69 | 69 | ||
70 | #define POWEROFF_CMD_PATH_LEN 256 | ||
71 | extern char poweroff_cmd[POWEROFF_CMD_PATH_LEN]; | ||
72 | |||
73 | extern int orderly_poweroff(bool force); | ||
74 | |||
70 | /* | 75 | /* |
71 | * Emergency restart, callable from an interrupt handler. | 76 | * Emergency restart, callable from an interrupt handler. |
72 | */ | 77 | */ |
diff --git a/include/linux/rtc/m48t59.h b/include/linux/rtc/m48t59.h new file mode 100644 index 000000000000..e8c7c21ceb1f --- /dev/null +++ b/include/linux/rtc/m48t59.h | |||
@@ -0,0 +1,57 @@ | |||
1 | /* | ||
2 | * include/linux/rtc/m48t59.h | ||
3 | * | ||
4 | * Definitions for the platform data of m48t59 RTC chip driver. | ||
5 | * | ||
6 | * Copyright (c) 2007 Wind River Systems, Inc. | ||
7 | * | ||
8 | * Mark Zhan <rongkai.zhan@windriver.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | */ | ||
14 | |||
15 | #ifndef _LINUX_RTC_M48T59_H_ | ||
16 | #define _LINUX_RTC_M48T59_H_ | ||
17 | |||
18 | /* | ||
19 | * M48T59 Register Offset | ||
20 | */ | ||
21 | #define M48T59_YEAR 0x1fff | ||
22 | #define M48T59_MONTH 0x1ffe | ||
23 | #define M48T59_MDAY 0x1ffd /* Day of Month */ | ||
24 | #define M48T59_WDAY 0x1ffc /* Day of Week */ | ||
25 | #define M48T59_WDAY_CB 0x20 /* Century Bit */ | ||
26 | #define M48T59_WDAY_CEB 0x10 /* Century Enable Bit */ | ||
27 | #define M48T59_HOUR 0x1ffb | ||
28 | #define M48T59_MIN 0x1ffa | ||
29 | #define M48T59_SEC 0x1ff9 | ||
30 | #define M48T59_CNTL 0x1ff8 | ||
31 | #define M48T59_CNTL_READ 0x40 | ||
32 | #define M48T59_CNTL_WRITE 0x80 | ||
33 | #define M48T59_WATCHDOG 0x1ff7 | ||
34 | #define M48T59_INTR 0x1ff6 | ||
35 | #define M48T59_INTR_AFE 0x80 /* Alarm Interrupt Enable */ | ||
36 | #define M48T59_INTR_ABE 0x20 | ||
37 | #define M48T59_ALARM_DATE 0x1ff5 | ||
38 | #define M48T59_ALARM_HOUR 0x1ff4 | ||
39 | #define M48T59_ALARM_MIN 0x1ff3 | ||
40 | #define M48T59_ALARM_SEC 0x1ff2 | ||
41 | #define M48T59_UNUSED 0x1ff1 | ||
42 | #define M48T59_FLAGS 0x1ff0 | ||
43 | #define M48T59_FLAGS_WDT 0x80 /* watchdog timer expired */ | ||
44 | #define M48T59_FLAGS_AF 0x40 /* alarm */ | ||
45 | #define M48T59_FLAGS_BF 0x10 /* low battery */ | ||
46 | |||
47 | #define M48T59_NVRAM_SIZE 0x1ff0 | ||
48 | |||
49 | struct m48t59_plat_data { | ||
50 | /* The method to access M48T59 registers, | ||
51 | * NOTE: The 'ofs' should be 0x00~0x1fff | ||
52 | */ | ||
53 | void (*write_byte)(struct device *dev, u32 ofs, u8 val); | ||
54 | unsigned char (*read_byte)(struct device *dev, u32 ofs); | ||
55 | }; | ||
56 | |||
57 | #endif /* _LINUX_RTC_M48T59_H_ */ | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index 731edaca8ffd..94f624aef017 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -345,6 +345,27 @@ typedef unsigned long mm_counter_t; | |||
345 | (mm)->hiwater_vm = (mm)->total_vm; \ | 345 | (mm)->hiwater_vm = (mm)->total_vm; \ |
346 | } while (0) | 346 | } while (0) |
347 | 347 | ||
348 | extern void set_dumpable(struct mm_struct *mm, int value); | ||
349 | extern int get_dumpable(struct mm_struct *mm); | ||
350 | |||
351 | /* mm flags */ | ||
352 | /* dumpable bits */ | ||
353 | #define MMF_DUMPABLE 0 /* core dump is permitted */ | ||
354 | #define MMF_DUMP_SECURELY 1 /* core file is readable only by root */ | ||
355 | #define MMF_DUMPABLE_BITS 2 | ||
356 | |||
357 | /* coredump filter bits */ | ||
358 | #define MMF_DUMP_ANON_PRIVATE 2 | ||
359 | #define MMF_DUMP_ANON_SHARED 3 | ||
360 | #define MMF_DUMP_MAPPED_PRIVATE 4 | ||
361 | #define MMF_DUMP_MAPPED_SHARED 5 | ||
362 | #define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS | ||
363 | #define MMF_DUMP_FILTER_BITS 4 | ||
364 | #define MMF_DUMP_FILTER_MASK \ | ||
365 | (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) | ||
366 | #define MMF_DUMP_FILTER_DEFAULT \ | ||
367 | ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED)) | ||
368 | |||
348 | struct mm_struct { | 369 | struct mm_struct { |
349 | struct vm_area_struct * mmap; /* list of VMAs */ | 370 | struct vm_area_struct * mmap; /* list of VMAs */ |
350 | struct rb_root mm_rb; | 371 | struct rb_root mm_rb; |
@@ -402,7 +423,7 @@ struct mm_struct { | |||
402 | unsigned int token_priority; | 423 | unsigned int token_priority; |
403 | unsigned int last_interval; | 424 | unsigned int last_interval; |
404 | 425 | ||
405 | unsigned char dumpable:2; | 426 | unsigned long flags; /* Must use atomic bitops to access the bits */ |
406 | 427 | ||
407 | /* coredumping support */ | 428 | /* coredumping support */ |
408 | int core_waiters; | 429 | int core_waiters; |
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 706ee9a4c80c..8518fa2a6f89 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h | |||
@@ -60,6 +60,8 @@ void serial8250_unregister_port(int line); | |||
60 | void serial8250_suspend_port(int line); | 60 | void serial8250_suspend_port(int line); |
61 | void serial8250_resume_port(int line); | 61 | void serial8250_resume_port(int line); |
62 | 62 | ||
63 | extern int early_serial_setup(struct uart_port *port); | ||
64 | |||
63 | extern int serial8250_find_port(struct uart_port *p); | 65 | extern int serial8250_find_port(struct uart_port *p); |
64 | extern int serial8250_find_port_for_earlycon(void); | 66 | extern int serial8250_find_port_for_earlycon(void); |
65 | extern int setup_early_serial8250_console(char *cmdline); | 67 | extern int setup_early_serial8250_console(char *cmdline); |
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 7f2c99d66e9d..773d8d8828ad 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h | |||
@@ -62,8 +62,9 @@ | |||
62 | /* NEC v850. */ | 62 | /* NEC v850. */ |
63 | #define PORT_V850E_UART 40 | 63 | #define PORT_V850E_UART 40 |
64 | 64 | ||
65 | /* DZ */ | 65 | /* DEC */ |
66 | #define PORT_DZ 47 | 66 | #define PORT_DZ 46 |
67 | #define PORT_ZS 47 | ||
67 | 68 | ||
68 | /* Parisc type numbers. */ | 69 | /* Parisc type numbers. */ |
69 | #define PORT_MUX 48 | 70 | #define PORT_MUX 48 |
@@ -142,6 +143,9 @@ | |||
142 | /* Micrel KS8695 */ | 143 | /* Micrel KS8695 */ |
143 | #define PORT_KS8695 76 | 144 | #define PORT_KS8695 76 |
144 | 145 | ||
146 | /* Broadcom SB1250, etc. SOC */ | ||
147 | #define PORT_SB1250_DUART 77 | ||
148 | |||
145 | 149 | ||
146 | #ifdef __KERNEL__ | 150 | #ifdef __KERNEL__ |
147 | 151 | ||
diff --git a/include/linux/slab.h b/include/linux/slab.h index 27402fea9b79..0e1d0daef6a2 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -31,6 +31,19 @@ | |||
31 | #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ | 31 | #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ |
32 | 32 | ||
33 | /* | 33 | /* |
34 | * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. | ||
35 | * | ||
36 | * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. | ||
37 | * | ||
38 | * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. | ||
39 | * Both make kfree a no-op. | ||
40 | */ | ||
41 | #define ZERO_SIZE_PTR ((void *)16) | ||
42 | |||
43 | #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) < \ | ||
44 | (unsigned long)ZERO_SIZE_PTR) | ||
45 | |||
46 | /* | ||
34 | * struct kmem_cache related prototypes | 47 | * struct kmem_cache related prototypes |
35 | */ | 48 | */ |
36 | void __init kmem_cache_init(void); | 49 | void __init kmem_cache_init(void); |
@@ -42,7 +55,6 @@ struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, | |||
42 | void (*)(void *, struct kmem_cache *, unsigned long)); | 55 | void (*)(void *, struct kmem_cache *, unsigned long)); |
43 | void kmem_cache_destroy(struct kmem_cache *); | 56 | void kmem_cache_destroy(struct kmem_cache *); |
44 | int kmem_cache_shrink(struct kmem_cache *); | 57 | int kmem_cache_shrink(struct kmem_cache *); |
45 | void *kmem_cache_zalloc(struct kmem_cache *, gfp_t); | ||
46 | void kmem_cache_free(struct kmem_cache *, void *); | 58 | void kmem_cache_free(struct kmem_cache *, void *); |
47 | unsigned int kmem_cache_size(struct kmem_cache *); | 59 | unsigned int kmem_cache_size(struct kmem_cache *); |
48 | const char *kmem_cache_name(struct kmem_cache *); | 60 | const char *kmem_cache_name(struct kmem_cache *); |
@@ -78,11 +90,37 @@ int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr); | |||
78 | /* | 90 | /* |
79 | * Common kmalloc functions provided by all allocators | 91 | * Common kmalloc functions provided by all allocators |
80 | */ | 92 | */ |
81 | void *__kzalloc(size_t, gfp_t); | ||
82 | void * __must_check krealloc(const void *, size_t, gfp_t); | 93 | void * __must_check krealloc(const void *, size_t, gfp_t); |
83 | void kfree(const void *); | 94 | void kfree(const void *); |
84 | size_t ksize(const void *); | 95 | size_t ksize(const void *); |
85 | 96 | ||
97 | /* | ||
98 | * Allocator specific definitions. These are mainly used to establish optimized | ||
99 | * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by | ||
100 | * selecting the appropriate general cache at compile time. | ||
101 | * | ||
102 | * Allocators must define at least: | ||
103 | * | ||
104 | * kmem_cache_alloc() | ||
105 | * __kmalloc() | ||
106 | * kmalloc() | ||
107 | * | ||
108 | * Those wishing to support NUMA must also define: | ||
109 | * | ||
110 | * kmem_cache_alloc_node() | ||
111 | * kmalloc_node() | ||
112 | * | ||
113 | * See each allocator definition file for additional comments and | ||
114 | * implementation notes. | ||
115 | */ | ||
116 | #ifdef CONFIG_SLUB | ||
117 | #include <linux/slub_def.h> | ||
118 | #elif defined(CONFIG_SLOB) | ||
119 | #include <linux/slob_def.h> | ||
120 | #else | ||
121 | #include <linux/slab_def.h> | ||
122 | #endif | ||
123 | |||
86 | /** | 124 | /** |
87 | * kcalloc - allocate memory for an array. The memory is set to zero. | 125 | * kcalloc - allocate memory for an array. The memory is set to zero. |
88 | * @n: number of elements. | 126 | * @n: number of elements. |
@@ -138,37 +176,9 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) | |||
138 | { | 176 | { |
139 | if (n != 0 && size > ULONG_MAX / n) | 177 | if (n != 0 && size > ULONG_MAX / n) |
140 | return NULL; | 178 | return NULL; |
141 | return __kzalloc(n * size, flags); | 179 | return __kmalloc(n * size, flags | __GFP_ZERO); |
142 | } | 180 | } |
143 | 181 | ||
144 | /* | ||
145 | * Allocator specific definitions. These are mainly used to establish optimized | ||
146 | * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by | ||
147 | * selecting the appropriate general cache at compile time. | ||
148 | * | ||
149 | * Allocators must define at least: | ||
150 | * | ||
151 | * kmem_cache_alloc() | ||
152 | * __kmalloc() | ||
153 | * kmalloc() | ||
154 | * kzalloc() | ||
155 | * | ||
156 | * Those wishing to support NUMA must also define: | ||
157 | * | ||
158 | * kmem_cache_alloc_node() | ||
159 | * kmalloc_node() | ||
160 | * | ||
161 | * See each allocator definition file for additional comments and | ||
162 | * implementation notes. | ||
163 | */ | ||
164 | #ifdef CONFIG_SLUB | ||
165 | #include <linux/slub_def.h> | ||
166 | #elif defined(CONFIG_SLOB) | ||
167 | #include <linux/slob_def.h> | ||
168 | #else | ||
169 | #include <linux/slab_def.h> | ||
170 | #endif | ||
171 | |||
172 | #if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB) | 182 | #if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB) |
173 | /** | 183 | /** |
174 | * kmalloc_node - allocate memory from a specific node | 184 | * kmalloc_node - allocate memory from a specific node |
@@ -242,5 +252,23 @@ extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); | |||
242 | 252 | ||
243 | #endif /* DEBUG_SLAB */ | 253 | #endif /* DEBUG_SLAB */ |
244 | 254 | ||
255 | /* | ||
256 | * Shortcuts | ||
257 | */ | ||
258 | static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) | ||
259 | { | ||
260 | return kmem_cache_alloc(k, flags | __GFP_ZERO); | ||
261 | } | ||
262 | |||
263 | /** | ||
264 | * kzalloc - allocate memory. The memory is set to zero. | ||
265 | * @size: how many bytes of memory are required. | ||
266 | * @flags: the type of memory to allocate (see kmalloc). | ||
267 | */ | ||
268 | static inline void *kzalloc(size_t size, gfp_t flags) | ||
269 | { | ||
270 | return kmalloc(size, flags | __GFP_ZERO); | ||
271 | } | ||
272 | |||
245 | #endif /* __KERNEL__ */ | 273 | #endif /* __KERNEL__ */ |
246 | #endif /* _LINUX_SLAB_H */ | 274 | #endif /* _LINUX_SLAB_H */ |
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 365d036c454a..32bdc2ffd715 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
@@ -32,6 +32,10 @@ static inline void *kmalloc(size_t size, gfp_t flags) | |||
32 | { | 32 | { |
33 | if (__builtin_constant_p(size)) { | 33 | if (__builtin_constant_p(size)) { |
34 | int i = 0; | 34 | int i = 0; |
35 | |||
36 | if (!size) | ||
37 | return ZERO_SIZE_PTR; | ||
38 | |||
35 | #define CACHE(x) \ | 39 | #define CACHE(x) \ |
36 | if (size <= x) \ | 40 | if (size <= x) \ |
37 | goto found; \ | 41 | goto found; \ |
@@ -54,32 +58,6 @@ found: | |||
54 | return __kmalloc(size, flags); | 58 | return __kmalloc(size, flags); |
55 | } | 59 | } |
56 | 60 | ||
57 | static inline void *kzalloc(size_t size, gfp_t flags) | ||
58 | { | ||
59 | if (__builtin_constant_p(size)) { | ||
60 | int i = 0; | ||
61 | #define CACHE(x) \ | ||
62 | if (size <= x) \ | ||
63 | goto found; \ | ||
64 | else \ | ||
65 | i++; | ||
66 | #include "kmalloc_sizes.h" | ||
67 | #undef CACHE | ||
68 | { | ||
69 | extern void __you_cannot_kzalloc_that_much(void); | ||
70 | __you_cannot_kzalloc_that_much(); | ||
71 | } | ||
72 | found: | ||
73 | #ifdef CONFIG_ZONE_DMA | ||
74 | if (flags & GFP_DMA) | ||
75 | return kmem_cache_zalloc(malloc_sizes[i].cs_dmacachep, | ||
76 | flags); | ||
77 | #endif | ||
78 | return kmem_cache_zalloc(malloc_sizes[i].cs_cachep, flags); | ||
79 | } | ||
80 | return __kzalloc(size, flags); | ||
81 | } | ||
82 | |||
83 | #ifdef CONFIG_NUMA | 61 | #ifdef CONFIG_NUMA |
84 | extern void *__kmalloc_node(size_t size, gfp_t flags, int node); | 62 | extern void *__kmalloc_node(size_t size, gfp_t flags, int node); |
85 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 63 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
@@ -88,6 +66,10 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
88 | { | 66 | { |
89 | if (__builtin_constant_p(size)) { | 67 | if (__builtin_constant_p(size)) { |
90 | int i = 0; | 68 | int i = 0; |
69 | |||
70 | if (!size) | ||
71 | return ZERO_SIZE_PTR; | ||
72 | |||
91 | #define CACHE(x) \ | 73 | #define CACHE(x) \ |
92 | if (size <= x) \ | 74 | if (size <= x) \ |
93 | goto found; \ | 75 | goto found; \ |
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h index a2daf2d418a9..59a3fa476ab9 100644 --- a/include/linux/slob_def.h +++ b/include/linux/slob_def.h | |||
@@ -33,14 +33,4 @@ static inline void *__kmalloc(size_t size, gfp_t flags) | |||
33 | return kmalloc(size, flags); | 33 | return kmalloc(size, flags); |
34 | } | 34 | } |
35 | 35 | ||
36 | /** | ||
37 | * kzalloc - allocate memory. The memory is set to zero. | ||
38 | * @size: how many bytes of memory are required. | ||
39 | * @flags: the type of memory to allocate (see kcalloc). | ||
40 | */ | ||
41 | static inline void *kzalloc(size_t size, gfp_t flags) | ||
42 | { | ||
43 | return __kzalloc(size, flags); | ||
44 | } | ||
45 | |||
46 | #endif /* __LINUX_SLOB_DEF_H */ | 36 | #endif /* __LINUX_SLOB_DEF_H */ |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index a582f6771525..07f7e4cbcee3 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -16,7 +16,9 @@ struct kmem_cache_node { | |||
16 | unsigned long nr_partial; | 16 | unsigned long nr_partial; |
17 | atomic_long_t nr_slabs; | 17 | atomic_long_t nr_slabs; |
18 | struct list_head partial; | 18 | struct list_head partial; |
19 | #ifdef CONFIG_SLUB_DEBUG | ||
19 | struct list_head full; | 20 | struct list_head full; |
21 | #endif | ||
20 | }; | 22 | }; |
21 | 23 | ||
22 | /* | 24 | /* |
@@ -44,7 +46,9 @@ struct kmem_cache { | |||
44 | int align; /* Alignment */ | 46 | int align; /* Alignment */ |
45 | const char *name; /* Name (only for display!) */ | 47 | const char *name; /* Name (only for display!) */ |
46 | struct list_head list; /* List of slab caches */ | 48 | struct list_head list; /* List of slab caches */ |
49 | #ifdef CONFIG_SLUB_DEBUG | ||
47 | struct kobject kobj; /* For sysfs */ | 50 | struct kobject kobj; /* For sysfs */ |
51 | #endif | ||
48 | 52 | ||
49 | #ifdef CONFIG_NUMA | 53 | #ifdef CONFIG_NUMA |
50 | int defrag_ratio; | 54 | int defrag_ratio; |
@@ -159,18 +163,6 @@ static inline struct kmem_cache *kmalloc_slab(size_t size) | |||
159 | #define SLUB_DMA 0 | 163 | #define SLUB_DMA 0 |
160 | #endif | 164 | #endif |
161 | 165 | ||
162 | |||
163 | /* | ||
164 | * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. | ||
165 | * | ||
166 | * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. | ||
167 | * | ||
168 | * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. | ||
169 | * Both make kfree a no-op. | ||
170 | */ | ||
171 | #define ZERO_SIZE_PTR ((void *)16) | ||
172 | |||
173 | |||
174 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | 166 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
175 | void *__kmalloc(size_t size, gfp_t flags); | 167 | void *__kmalloc(size_t size, gfp_t flags); |
176 | 168 | ||
@@ -187,19 +179,6 @@ static inline void *kmalloc(size_t size, gfp_t flags) | |||
187 | return __kmalloc(size, flags); | 179 | return __kmalloc(size, flags); |
188 | } | 180 | } |
189 | 181 | ||
190 | static inline void *kzalloc(size_t size, gfp_t flags) | ||
191 | { | ||
192 | if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { | ||
193 | struct kmem_cache *s = kmalloc_slab(size); | ||
194 | |||
195 | if (!s) | ||
196 | return ZERO_SIZE_PTR; | ||
197 | |||
198 | return kmem_cache_zalloc(s, flags); | ||
199 | } else | ||
200 | return __kzalloc(size, flags); | ||
201 | } | ||
202 | |||
203 | #ifdef CONFIG_NUMA | 182 | #ifdef CONFIG_NUMA |
204 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | 183 | void *__kmalloc_node(size_t size, gfp_t flags, int node); |
205 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 184 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
diff --git a/include/linux/smp.h b/include/linux/smp.h index 96ac21f8dd73..259a13c3bd98 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h | |||
@@ -99,11 +99,14 @@ static inline int up_smp_call_function(void) | |||
99 | static inline void smp_send_reschedule(int cpu) { } | 99 | static inline void smp_send_reschedule(int cpu) { } |
100 | #define num_booting_cpus() 1 | 100 | #define num_booting_cpus() 1 |
101 | #define smp_prepare_boot_cpu() do {} while (0) | 101 | #define smp_prepare_boot_cpu() do {} while (0) |
102 | static inline int smp_call_function_single(int cpuid, void (*func) (void *info), | 102 | #define smp_call_function_single(cpuid, func, info, retry, wait) \ |
103 | void *info, int retry, int wait) | 103 | ({ \ |
104 | { | 104 | WARN_ON(cpuid != 0); \ |
105 | return -EBUSY; | 105 | local_irq_disable(); \ |
106 | } | 106 | (func)(info); \ |
107 | local_irq_enable(); \ | ||
108 | 0; \ | ||
109 | }) | ||
107 | 110 | ||
108 | #endif /* !SMP */ | 111 | #endif /* !SMP */ |
109 | 112 | ||
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 1be5ea059477..302b81d1d117 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h | |||
@@ -76,6 +76,7 @@ struct spi_device { | |||
76 | #define SPI_MODE_3 (SPI_CPOL|SPI_CPHA) | 76 | #define SPI_MODE_3 (SPI_CPOL|SPI_CPHA) |
77 | #define SPI_CS_HIGH 0x04 /* chipselect active high? */ | 77 | #define SPI_CS_HIGH 0x04 /* chipselect active high? */ |
78 | #define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */ | 78 | #define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */ |
79 | #define SPI_3WIRE 0x10 /* SI/SO signals shared */ | ||
79 | u8 bits_per_word; | 80 | u8 bits_per_word; |
80 | int irq; | 81 | int irq; |
81 | void *controller_state; | 82 | void *controller_state; |
diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h index 9dbca629dcfb..b8db32cea1de 100644 --- a/include/linux/spi/spi_bitbang.h +++ b/include/linux/spi/spi_bitbang.h | |||
@@ -26,6 +26,7 @@ struct spi_bitbang { | |||
26 | struct list_head queue; | 26 | struct list_head queue; |
27 | u8 busy; | 27 | u8 busy; |
28 | u8 use_dma; | 28 | u8 use_dma; |
29 | u8 flags; /* extra spi->mode support */ | ||
29 | 30 | ||
30 | struct spi_master *master; | 31 | struct spi_master *master; |
31 | 32 | ||
diff --git a/include/linux/spi/tle62x0.h b/include/linux/spi/tle62x0.h new file mode 100644 index 000000000000..60b59187e590 --- /dev/null +++ b/include/linux/spi/tle62x0.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * tle62x0.h - platform glue to Infineon TLE62x0 driver chips | ||
3 | * | ||
4 | * Copyright 2007 Simtec Electronics | ||
5 | * Ben Dooks <ben@simtec.co.uk> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | */ | ||
20 | |||
21 | struct tle62x0_pdata { | ||
22 | unsigned int init_state; | ||
23 | unsigned int gpio_count; | ||
24 | }; | ||
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index 210549ba4ef4..f6a3a951b79e 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h | |||
@@ -9,14 +9,14 @@ | |||
9 | * Released under the General Public License (GPL). | 9 | * Released under the General Public License (GPL). |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/lockdep.h> | ||
13 | |||
14 | #if defined(CONFIG_SMP) | 12 | #if defined(CONFIG_SMP) |
15 | # include <asm/spinlock_types.h> | 13 | # include <asm/spinlock_types.h> |
16 | #else | 14 | #else |
17 | # include <linux/spinlock_types_up.h> | 15 | # include <linux/spinlock_types_up.h> |
18 | #endif | 16 | #endif |
19 | 17 | ||
18 | #include <linux/lockdep.h> | ||
19 | |||
20 | typedef struct { | 20 | typedef struct { |
21 | raw_spinlock_t raw_lock; | 21 | raw_spinlock_t raw_lock; |
22 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) | 22 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) |
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h index 27644af20b7c..04135b0e198e 100644 --- a/include/linux/spinlock_types_up.h +++ b/include/linux/spinlock_types_up.h | |||
@@ -12,14 +12,10 @@ | |||
12 | * Released under the General Public License (GPL). | 12 | * Released under the General Public License (GPL). |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #if defined(CONFIG_DEBUG_SPINLOCK) || \ | 15 | #ifdef CONFIG_DEBUG_SPINLOCK |
16 | defined(CONFIG_DEBUG_LOCK_ALLOC) | ||
17 | 16 | ||
18 | typedef struct { | 17 | typedef struct { |
19 | volatile unsigned int slock; | 18 | volatile unsigned int slock; |
20 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
21 | struct lockdep_map dep_map; | ||
22 | #endif | ||
23 | } raw_spinlock_t; | 19 | } raw_spinlock_t; |
24 | 20 | ||
25 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } | 21 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } |
@@ -34,9 +30,6 @@ typedef struct { } raw_spinlock_t; | |||
34 | 30 | ||
35 | typedef struct { | 31 | typedef struct { |
36 | /* no debug version on UP */ | 32 | /* no debug version on UP */ |
37 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
38 | struct lockdep_map dep_map; | ||
39 | #endif | ||
40 | } raw_rwlock_t; | 33 | } raw_rwlock_t; |
41 | 34 | ||
42 | #define __RAW_RW_LOCK_UNLOCKED { } | 35 | #define __RAW_RW_LOCK_UNLOCKED { } |
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 1d2b084c0185..e7fa657d0c49 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h | |||
@@ -13,7 +13,7 @@ extern void save_stack_trace(struct stack_trace *trace); | |||
13 | extern void print_stack_trace(struct stack_trace *trace, int spaces); | 13 | extern void print_stack_trace(struct stack_trace *trace, int spaces); |
14 | #else | 14 | #else |
15 | # define save_stack_trace(trace) do { } while (0) | 15 | # define save_stack_trace(trace) do { } while (0) |
16 | # define print_stack_trace(trace) do { } while (0) | 16 | # define print_stack_trace(trace, spaces) do { } while (0) |
17 | #endif | 17 | #endif |
18 | 18 | ||
19 | #endif | 19 | #endif |
diff --git a/include/linux/string.h b/include/linux/string.h index 7f2eb6a477f9..836062b7582a 100644 --- a/include/linux/string.h +++ b/include/linux/string.h | |||
@@ -105,8 +105,12 @@ extern void * memchr(const void *,int,__kernel_size_t); | |||
105 | #endif | 105 | #endif |
106 | 106 | ||
107 | extern char *kstrdup(const char *s, gfp_t gfp); | 107 | extern char *kstrdup(const char *s, gfp_t gfp); |
108 | extern char *kstrndup(const char *s, size_t len, gfp_t gfp); | ||
108 | extern void *kmemdup(const void *src, size_t len, gfp_t gfp); | 109 | extern void *kmemdup(const void *src, size_t len, gfp_t gfp); |
109 | 110 | ||
111 | extern char **argv_split(gfp_t gfp, const char *str, int *argcp); | ||
112 | extern void argv_free(char **argv); | ||
113 | |||
110 | #ifdef __cplusplus | 114 | #ifdef __cplusplus |
111 | } | 115 | } |
112 | #endif | 116 | #endif |
diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h index bbac101ac372..459c5fc11d51 100644 --- a/include/linux/sunrpc/gss_api.h +++ b/include/linux/sunrpc/gss_api.h | |||
@@ -58,6 +58,7 @@ u32 gss_unwrap( | |||
58 | u32 gss_delete_sec_context( | 58 | u32 gss_delete_sec_context( |
59 | struct gss_ctx **ctx_id); | 59 | struct gss_ctx **ctx_id); |
60 | 60 | ||
61 | u32 gss_svc_to_pseudoflavor(struct gss_api_mech *, u32 service); | ||
61 | u32 gss_pseudoflavor_to_service(struct gss_api_mech *, u32 pseudoflavor); | 62 | u32 gss_pseudoflavor_to_service(struct gss_api_mech *, u32 pseudoflavor); |
62 | char *gss_service_to_auth_domain_name(struct gss_api_mech *, u32 service); | 63 | char *gss_service_to_auth_domain_name(struct gss_api_mech *, u32 service); |
63 | 64 | ||
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 129d50f2225c..8531a70da73d 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h | |||
@@ -212,6 +212,7 @@ struct svc_rqst { | |||
212 | struct svc_pool * rq_pool; /* thread pool */ | 212 | struct svc_pool * rq_pool; /* thread pool */ |
213 | struct svc_procedure * rq_procinfo; /* procedure info */ | 213 | struct svc_procedure * rq_procinfo; /* procedure info */ |
214 | struct auth_ops * rq_authop; /* authentication flavour */ | 214 | struct auth_ops * rq_authop; /* authentication flavour */ |
215 | u32 rq_flavor; /* pseudoflavor */ | ||
215 | struct svc_cred rq_cred; /* auth info */ | 216 | struct svc_cred rq_cred; /* auth info */ |
216 | struct sk_buff * rq_skbuff; /* fast recv inet buffer */ | 217 | struct sk_buff * rq_skbuff; /* fast recv inet buffer */ |
217 | struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */ | 218 | struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */ |
@@ -248,6 +249,7 @@ struct svc_rqst { | |||
248 | */ | 249 | */ |
249 | /* Catering to nfsd */ | 250 | /* Catering to nfsd */ |
250 | struct auth_domain * rq_client; /* RPC peer info */ | 251 | struct auth_domain * rq_client; /* RPC peer info */ |
252 | struct auth_domain * rq_gssclient; /* "gss/"-style peer info */ | ||
251 | struct svc_cacherep * rq_cacherep; /* cache info */ | 253 | struct svc_cacherep * rq_cacherep; /* cache info */ |
252 | struct knfsd_fh * rq_reffh; /* Referrence filehandle, used to | 254 | struct knfsd_fh * rq_reffh; /* Referrence filehandle, used to |
253 | * determine what device number | 255 | * determine what device number |
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h index de92619b0826..22e1ef8e200e 100644 --- a/include/linux/sunrpc/svcauth.h +++ b/include/linux/sunrpc/svcauth.h | |||
@@ -127,6 +127,7 @@ extern struct auth_domain *auth_unix_lookup(struct in_addr addr); | |||
127 | extern int auth_unix_forget_old(struct auth_domain *dom); | 127 | extern int auth_unix_forget_old(struct auth_domain *dom); |
128 | extern void svcauth_unix_purge(void); | 128 | extern void svcauth_unix_purge(void); |
129 | extern void svcauth_unix_info_release(void *); | 129 | extern void svcauth_unix_info_release(void *); |
130 | extern int svcauth_unix_set_client(struct svc_rqst *rqstp); | ||
130 | 131 | ||
131 | static inline unsigned long hash_str(char *name, int bits) | 132 | static inline unsigned long hash_str(char *name, int bits) |
132 | { | 133 | { |
diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h index 5a5db16ab660..417a1def56db 100644 --- a/include/linux/sunrpc/svcauth_gss.h +++ b/include/linux/sunrpc/svcauth_gss.h | |||
@@ -22,6 +22,7 @@ | |||
22 | int gss_svc_init(void); | 22 | int gss_svc_init(void); |
23 | void gss_svc_shutdown(void); | 23 | void gss_svc_shutdown(void); |
24 | int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name); | 24 | int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name); |
25 | u32 svcauth_gss_flavor(struct auth_domain *dom); | ||
25 | 26 | ||
26 | #endif /* __KERNEL__ */ | 27 | #endif /* __KERNEL__ */ |
27 | #endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */ | 28 | #endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */ |
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 9c7cb6430666..e8e6da394c92 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
@@ -43,14 +43,19 @@ static inline void pm_restore_console(void) {} | |||
43 | * @prepare: prepare system for hibernation | 43 | * @prepare: prepare system for hibernation |
44 | * @enter: shut down system after state has been saved to disk | 44 | * @enter: shut down system after state has been saved to disk |
45 | * @finish: finish/clean up after state has been reloaded | 45 | * @finish: finish/clean up after state has been reloaded |
46 | * @pre_restore: prepare system for the restoration from a hibernation image | ||
47 | * @restore_cleanup: clean up after a failing image restoration | ||
46 | */ | 48 | */ |
47 | struct hibernation_ops { | 49 | struct hibernation_ops { |
48 | int (*prepare)(void); | 50 | int (*prepare)(void); |
49 | int (*enter)(void); | 51 | int (*enter)(void); |
50 | void (*finish)(void); | 52 | void (*finish)(void); |
53 | int (*pre_restore)(void); | ||
54 | void (*restore_cleanup)(void); | ||
51 | }; | 55 | }; |
52 | 56 | ||
53 | #if defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND) | 57 | #ifdef CONFIG_PM |
58 | #ifdef CONFIG_SOFTWARE_SUSPEND | ||
54 | /* kernel/power/snapshot.c */ | 59 | /* kernel/power/snapshot.c */ |
55 | extern void __register_nosave_region(unsigned long b, unsigned long e, int km); | 60 | extern void __register_nosave_region(unsigned long b, unsigned long e, int km); |
56 | static inline void register_nosave_region(unsigned long b, unsigned long e) | 61 | static inline void register_nosave_region(unsigned long b, unsigned long e) |
@@ -68,16 +73,14 @@ extern unsigned long get_safe_page(gfp_t gfp_mask); | |||
68 | 73 | ||
69 | extern void hibernation_set_ops(struct hibernation_ops *ops); | 74 | extern void hibernation_set_ops(struct hibernation_ops *ops); |
70 | extern int hibernate(void); | 75 | extern int hibernate(void); |
71 | #else | 76 | #else /* CONFIG_SOFTWARE_SUSPEND */ |
72 | static inline void register_nosave_region(unsigned long b, unsigned long e) {} | ||
73 | static inline void register_nosave_region_late(unsigned long b, unsigned long e) {} | ||
74 | static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } | 77 | static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } |
75 | static inline void swsusp_set_page_free(struct page *p) {} | 78 | static inline void swsusp_set_page_free(struct page *p) {} |
76 | static inline void swsusp_unset_page_free(struct page *p) {} | 79 | static inline void swsusp_unset_page_free(struct page *p) {} |
77 | 80 | ||
78 | static inline void hibernation_set_ops(struct hibernation_ops *ops) {} | 81 | static inline void hibernation_set_ops(struct hibernation_ops *ops) {} |
79 | static inline int hibernate(void) { return -ENOSYS; } | 82 | static inline int hibernate(void) { return -ENOSYS; } |
80 | #endif /* defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND) */ | 83 | #endif /* CONFIG_SOFTWARE_SUSPEND */ |
81 | 84 | ||
82 | void save_processor_state(void); | 85 | void save_processor_state(void); |
83 | void restore_processor_state(void); | 86 | void restore_processor_state(void); |
@@ -85,4 +88,43 @@ struct saved_context; | |||
85 | void __save_processor_state(struct saved_context *ctxt); | 88 | void __save_processor_state(struct saved_context *ctxt); |
86 | void __restore_processor_state(struct saved_context *ctxt); | 89 | void __restore_processor_state(struct saved_context *ctxt); |
87 | 90 | ||
91 | /* kernel/power/main.c */ | ||
92 | extern struct blocking_notifier_head pm_chain_head; | ||
93 | |||
94 | static inline int register_pm_notifier(struct notifier_block *nb) | ||
95 | { | ||
96 | return blocking_notifier_chain_register(&pm_chain_head, nb); | ||
97 | } | ||
98 | |||
99 | static inline int unregister_pm_notifier(struct notifier_block *nb) | ||
100 | { | ||
101 | return blocking_notifier_chain_unregister(&pm_chain_head, nb); | ||
102 | } | ||
103 | |||
104 | #define pm_notifier(fn, pri) { \ | ||
105 | static struct notifier_block fn##_nb = \ | ||
106 | { .notifier_call = fn, .priority = pri }; \ | ||
107 | register_pm_notifier(&fn##_nb); \ | ||
108 | } | ||
109 | #else /* CONFIG_PM */ | ||
110 | |||
111 | static inline int register_pm_notifier(struct notifier_block *nb) | ||
112 | { | ||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | static inline int unregister_pm_notifier(struct notifier_block *nb) | ||
117 | { | ||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | #define pm_notifier(fn, pri) do { (void)(fn); } while (0) | ||
122 | #endif /* CONFIG_PM */ | ||
123 | |||
124 | #if !defined CONFIG_SOFTWARE_SUSPEND || !defined(CONFIG_PM) | ||
125 | static inline void register_nosave_region(unsigned long b, unsigned long e) | ||
126 | { | ||
127 | } | ||
128 | #endif | ||
129 | |||
88 | #endif /* _LINUX_SWSUSP_H */ | 130 | #endif /* _LINUX_SWSUSP_H */ |
diff --git a/include/linux/swap.h b/include/linux/swap.h index 006868881346..665f85f2a3af 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -188,7 +188,8 @@ extern int rotate_reclaimable_page(struct page *page); | |||
188 | extern void swap_setup(void); | 188 | extern void swap_setup(void); |
189 | 189 | ||
190 | /* linux/mm/vmscan.c */ | 190 | /* linux/mm/vmscan.c */ |
191 | extern unsigned long try_to_free_pages(struct zone **, gfp_t); | 191 | extern unsigned long try_to_free_pages(struct zone **zones, int order, |
192 | gfp_t gfp_mask); | ||
192 | extern unsigned long shrink_all_memory(unsigned long nr_pages); | 193 | extern unsigned long shrink_all_memory(unsigned long nr_pages); |
193 | extern int vm_swappiness; | 194 | extern int vm_swappiness; |
194 | extern int remove_mapping(struct address_space *mapping, struct page *page); | 195 | extern int remove_mapping(struct address_space *mapping, struct page *page); |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 83d0ec11235e..7a8b1e3322e0 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
@@ -610,6 +610,7 @@ asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemas | |||
610 | asmlinkage long sys_timerfd(int ufd, int clockid, int flags, | 610 | asmlinkage long sys_timerfd(int ufd, int clockid, int flags, |
611 | const struct itimerspec __user *utmr); | 611 | const struct itimerspec __user *utmr); |
612 | asmlinkage long sys_eventfd(unsigned int count); | 612 | asmlinkage long sys_eventfd(unsigned int count); |
613 | asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len); | ||
613 | 614 | ||
614 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]); | 615 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]); |
615 | 616 | ||
diff --git a/include/linux/time.h b/include/linux/time.h index 4bb05a829be9..ec3b0ced0afe 100644 --- a/include/linux/time.h +++ b/include/linux/time.h | |||
@@ -36,7 +36,8 @@ struct timezone { | |||
36 | #define NSEC_PER_SEC 1000000000L | 36 | #define NSEC_PER_SEC 1000000000L |
37 | #define FSEC_PER_SEC 1000000000000000L | 37 | #define FSEC_PER_SEC 1000000000000000L |
38 | 38 | ||
39 | static inline int timespec_equal(struct timespec *a, struct timespec *b) | 39 | static inline int timespec_equal(const struct timespec *a, |
40 | const struct timespec *b) | ||
40 | { | 41 | { |
41 | return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec); | 42 | return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec); |
42 | } | 43 | } |
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h new file mode 100644 index 000000000000..44c28e94df50 --- /dev/null +++ b/include/linux/uio_driver.h | |||
@@ -0,0 +1,91 @@ | |||
1 | /* | ||
2 | * include/linux/uio_driver.h | ||
3 | * | ||
4 | * Copyright(C) 2005, Benedikt Spranger <b.spranger@linutronix.de> | ||
5 | * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> | ||
6 | * Copyright(C) 2006, Hans J. Koch <hjk@linutronix.de> | ||
7 | * Copyright(C) 2006, Greg Kroah-Hartman <greg@kroah.com> | ||
8 | * | ||
9 | * Userspace IO driver. | ||
10 | * | ||
11 | * Licensed under the GPLv2 only. | ||
12 | */ | ||
13 | |||
14 | #ifndef _UIO_DRIVER_H_ | ||
15 | #define _UIO_DRIVER_H_ | ||
16 | |||
17 | #include <linux/module.h> | ||
18 | #include <linux/fs.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | |||
21 | /** | ||
22 | * struct uio_mem - description of a UIO memory region | ||
23 | * @kobj: kobject for this mapping | ||
24 | * @addr: address of the device's memory | ||
25 | * @size: size of IO | ||
26 | * @memtype: type of memory addr points to | ||
27 | * @internal_addr: ioremap-ped version of addr, for driver internal use | ||
28 | */ | ||
29 | struct uio_mem { | ||
30 | struct kobject kobj; | ||
31 | unsigned long addr; | ||
32 | unsigned long size; | ||
33 | int memtype; | ||
34 | void __iomem *internal_addr; | ||
35 | }; | ||
36 | |||
37 | #define MAX_UIO_MAPS 5 | ||
38 | |||
39 | struct uio_device; | ||
40 | |||
41 | /** | ||
42 | * struct uio_info - UIO device capabilities | ||
43 | * @uio_dev: the UIO device this info belongs to | ||
44 | * @name: device name | ||
45 | * @version: device driver version | ||
46 | * @mem: list of mappable memory regions, size==0 for end of list | ||
47 | * @irq: interrupt number or UIO_IRQ_CUSTOM | ||
48 | * @irq_flags: flags for request_irq() | ||
49 | * @priv: optional private data | ||
50 | * @handler: the device's irq handler | ||
51 | * @mmap: mmap operation for this uio device | ||
52 | * @open: open operation for this uio device | ||
53 | * @release: release operation for this uio device | ||
54 | */ | ||
55 | struct uio_info { | ||
56 | struct uio_device *uio_dev; | ||
57 | char *name; | ||
58 | char *version; | ||
59 | struct uio_mem mem[MAX_UIO_MAPS]; | ||
60 | long irq; | ||
61 | unsigned long irq_flags; | ||
62 | void *priv; | ||
63 | irqreturn_t (*handler)(int irq, struct uio_info *dev_info); | ||
64 | int (*mmap)(struct uio_info *info, struct vm_area_struct *vma); | ||
65 | int (*open)(struct uio_info *info, struct inode *inode); | ||
66 | int (*release)(struct uio_info *info, struct inode *inode); | ||
67 | }; | ||
68 | |||
69 | extern int __must_check | ||
70 | __uio_register_device(struct module *owner, | ||
71 | struct device *parent, | ||
72 | struct uio_info *info); | ||
73 | static inline int __must_check | ||
74 | uio_register_device(struct device *parent, struct uio_info *info) | ||
75 | { | ||
76 | return __uio_register_device(THIS_MODULE, parent, info); | ||
77 | } | ||
78 | extern void uio_unregister_device(struct uio_info *info); | ||
79 | extern void uio_event_notify(struct uio_info *info); | ||
80 | |||
81 | /* defines for uio_device->irq */ | ||
82 | #define UIO_IRQ_CUSTOM -1 | ||
83 | #define UIO_IRQ_NONE -2 | ||
84 | |||
85 | /* defines for uio_device->memtype */ | ||
86 | #define UIO_MEM_NONE 0 | ||
87 | #define UIO_MEM_PHYS 1 | ||
88 | #define UIO_MEM_LOGICAL 2 | ||
89 | #define UIO_MEM_VIRTUAL 3 | ||
90 | |||
91 | #endif /* _LINUX_UIO_DRIVER_H_ */ | ||
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 132b260aef1e..c2b10cae5da5 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h | |||
@@ -70,6 +70,10 @@ extern int map_vm_area(struct vm_struct *area, pgprot_t prot, | |||
70 | struct page ***pages); | 70 | struct page ***pages); |
71 | extern void unmap_kernel_range(unsigned long addr, unsigned long size); | 71 | extern void unmap_kernel_range(unsigned long addr, unsigned long size); |
72 | 72 | ||
73 | /* Allocate/destroy a 'vmalloc' VM area. */ | ||
74 | extern struct vm_struct *alloc_vm_area(size_t size); | ||
75 | extern void free_vm_area(struct vm_struct *area); | ||
76 | |||
73 | /* | 77 | /* |
74 | * Internals. Dont't use.. | 78 | * Internals. Dont't use.. |
75 | */ | 79 | */ |
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index d9325cf8a134..75370ec0923e 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h | |||
@@ -25,7 +25,7 @@ | |||
25 | #define HIGHMEM_ZONE(xx) | 25 | #define HIGHMEM_ZONE(xx) |
26 | #endif | 26 | #endif |
27 | 27 | ||
28 | #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) | 28 | #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE |
29 | 29 | ||
30 | enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | 30 | enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, |
31 | FOR_ALL_ZONES(PGALLOC), | 31 | FOR_ALL_ZONES(PGALLOC), |
@@ -170,7 +170,8 @@ static inline unsigned long node_page_state(int node, | |||
170 | #ifdef CONFIG_HIGHMEM | 170 | #ifdef CONFIG_HIGHMEM |
171 | zone_page_state(&zones[ZONE_HIGHMEM], item) + | 171 | zone_page_state(&zones[ZONE_HIGHMEM], item) + |
172 | #endif | 172 | #endif |
173 | zone_page_state(&zones[ZONE_NORMAL], item); | 173 | zone_page_state(&zones[ZONE_NORMAL], item) + |
174 | zone_page_state(&zones[ZONE_MOVABLE], item); | ||
174 | } | 175 | } |
175 | 176 | ||
176 | extern void zone_statistics(struct zonelist *, struct zone *); | 177 | extern void zone_statistics(struct zonelist *, struct zone *); |
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index d961635d0e61..699b7e9864fa 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h | |||
@@ -75,6 +75,8 @@ int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc); | |||
75 | int vt_waitactive(int vt); | 75 | int vt_waitactive(int vt); |
76 | void change_console(struct vc_data *new_vc); | 76 | void change_console(struct vc_data *new_vc); |
77 | void reset_vc(struct vc_data *vc); | 77 | void reset_vc(struct vc_data *vc); |
78 | extern int unbind_con_driver(const struct consw *csw, int first, int last, | ||
79 | int deflt); | ||
78 | 80 | ||
79 | /* | 81 | /* |
80 | * vc_screen.c shares this temporary buffer with the console write code so that | 82 | * vc_screen.c shares this temporary buffer with the console write code so that |
diff --git a/include/media/saa7146.h b/include/media/saa7146.h index d3f4f5a38214..67703249b245 100644 --- a/include/media/saa7146.h +++ b/include/media/saa7146.h | |||
@@ -114,7 +114,7 @@ struct saa7146_dev | |||
114 | struct mutex lock; | 114 | struct mutex lock; |
115 | 115 | ||
116 | unsigned char __iomem *mem; /* pointer to mapped IO memory */ | 116 | unsigned char __iomem *mem; /* pointer to mapped IO memory */ |
117 | int revision; /* chip revision; needed for bug-workarounds*/ | 117 | u32 revision; /* chip revision; needed for bug-workarounds*/ |
118 | 118 | ||
119 | /* pci-device & irq stuff*/ | 119 | /* pci-device & irq stuff*/ |
120 | char name[32]; | 120 | char name[32]; |
@@ -157,8 +157,8 @@ struct saa7146_format* format_by_fourcc(struct saa7146_dev *dev, int fourcc); | |||
157 | int saa7146_pgtable_alloc(struct pci_dev *pci, struct saa7146_pgtable *pt); | 157 | int saa7146_pgtable_alloc(struct pci_dev *pci, struct saa7146_pgtable *pt); |
158 | void saa7146_pgtable_free(struct pci_dev *pci, struct saa7146_pgtable *pt); | 158 | void saa7146_pgtable_free(struct pci_dev *pci, struct saa7146_pgtable *pt); |
159 | int saa7146_pgtable_build_single(struct pci_dev *pci, struct saa7146_pgtable *pt, struct scatterlist *list, int length ); | 159 | int saa7146_pgtable_build_single(struct pci_dev *pci, struct saa7146_pgtable *pt, struct scatterlist *list, int length ); |
160 | char *saa7146_vmalloc_build_pgtable(struct pci_dev *pci, long length, struct saa7146_pgtable *pt); | 160 | void *saa7146_vmalloc_build_pgtable(struct pci_dev *pci, long length, struct saa7146_pgtable *pt); |
161 | void saa7146_vfree_destroy_pgtable(struct pci_dev *pci, char *mem, struct saa7146_pgtable *pt); | 161 | void saa7146_vfree_destroy_pgtable(struct pci_dev *pci, void *mem, struct saa7146_pgtable *pt); |
162 | void saa7146_setgpio(struct saa7146_dev *dev, int port, u32 data); | 162 | void saa7146_setgpio(struct saa7146_dev *dev, int port, u32 data); |
163 | int saa7146_wait_for_debi_done(struct saa7146_dev *dev, int nobusyloop); | 163 | int saa7146_wait_for_debi_done(struct saa7146_dev *dev, int nobusyloop); |
164 | 164 | ||
diff --git a/include/media/tuner.h b/include/media/tuner.h index 6dcf3c45707d..160381c72e4b 100644 --- a/include/media/tuner.h +++ b/include/media/tuner.h | |||
@@ -23,8 +23,6 @@ | |||
23 | #define _TUNER_H | 23 | #define _TUNER_H |
24 | 24 | ||
25 | #include <linux/videodev2.h> | 25 | #include <linux/videodev2.h> |
26 | #include <linux/i2c.h> | ||
27 | #include <media/tuner-types.h> | ||
28 | 26 | ||
29 | extern int tuner_debug; | 27 | extern int tuner_debug; |
30 | 28 | ||
@@ -124,6 +122,7 @@ extern int tuner_debug; | |||
124 | #define TUNER_THOMSON_FE6600 72 /* DViCO FusionHDTV DVB-T Hybrid */ | 122 | #define TUNER_THOMSON_FE6600 72 /* DViCO FusionHDTV DVB-T Hybrid */ |
125 | #define TUNER_SAMSUNG_TCPG_6121P30A 73 /* Hauppauge PVR-500 PAL */ | 123 | #define TUNER_SAMSUNG_TCPG_6121P30A 73 /* Hauppauge PVR-500 PAL */ |
126 | #define TUNER_TDA9887 74 /* This tuner should be used only internally */ | 124 | #define TUNER_TDA9887 74 /* This tuner should be used only internally */ |
125 | #define TUNER_TEA5761 75 /* Only FM Radio Tuner */ | ||
127 | 126 | ||
128 | /* tv card specific */ | 127 | /* tv card specific */ |
129 | #define TDA9887_PRESENT (1<<0) | 128 | #define TDA9887_PRESENT (1<<0) |
@@ -182,74 +181,6 @@ struct tuner_setup { | |||
182 | int (*tuner_callback) (void *dev, int command,int arg); | 181 | int (*tuner_callback) (void *dev, int command,int arg); |
183 | }; | 182 | }; |
184 | 183 | ||
185 | struct tuner { | ||
186 | /* device */ | ||
187 | struct i2c_client i2c; | ||
188 | |||
189 | unsigned int type; /* chip type */ | ||
190 | |||
191 | unsigned int mode; | ||
192 | unsigned int mode_mask; /* Combination of allowable modes */ | ||
193 | |||
194 | unsigned int tv_freq; /* keep track of the current settings */ | ||
195 | unsigned int radio_freq; | ||
196 | u16 last_div; | ||
197 | unsigned int audmode; | ||
198 | v4l2_std_id std; | ||
199 | |||
200 | int using_v4l2; | ||
201 | |||
202 | /* used by tda9887 */ | ||
203 | unsigned int tda9887_config; | ||
204 | unsigned char tda9887_data[4]; | ||
205 | |||
206 | /* used by MT2032 */ | ||
207 | unsigned int xogc; | ||
208 | unsigned int radio_if2; | ||
209 | |||
210 | /* used by tda8290 */ | ||
211 | unsigned char tda8290_easy_mode; | ||
212 | unsigned char tda827x_lpsel; | ||
213 | unsigned char tda827x_addr; | ||
214 | unsigned char tda827x_ver; | ||
215 | unsigned int sgIF; | ||
216 | |||
217 | unsigned int config; | ||
218 | int (*tuner_callback) (void *dev, int command,int arg); | ||
219 | |||
220 | /* function ptrs */ | ||
221 | void (*set_tv_freq)(struct i2c_client *c, unsigned int freq); | ||
222 | void (*set_radio_freq)(struct i2c_client *c, unsigned int freq); | ||
223 | int (*has_signal)(struct i2c_client *c); | ||
224 | int (*is_stereo)(struct i2c_client *c); | ||
225 | int (*get_afc)(struct i2c_client *c); | ||
226 | void (*tuner_status)(struct i2c_client *c); | ||
227 | void (*standby)(struct i2c_client *c); | ||
228 | }; | ||
229 | |||
230 | extern unsigned const int tuner_count; | ||
231 | |||
232 | extern int microtune_init(struct i2c_client *c); | ||
233 | extern int xc3028_init(struct i2c_client *c); | ||
234 | extern int tda8290_init(struct i2c_client *c); | ||
235 | extern int tda8290_probe(struct i2c_client *c); | ||
236 | extern int tea5767_tuner_init(struct i2c_client *c); | ||
237 | extern int default_tuner_init(struct i2c_client *c); | ||
238 | extern int tea5767_autodetection(struct i2c_client *c); | ||
239 | extern int tda9887_tuner_init(struct i2c_client *c); | ||
240 | |||
241 | #define tuner_warn(fmt, arg...) do {\ | ||
242 | printk(KERN_WARNING "%s %d-%04x: " fmt, t->i2c.driver->driver.name, \ | ||
243 | i2c_adapter_id(t->i2c.adapter), t->i2c.addr , ##arg); } while (0) | ||
244 | #define tuner_info(fmt, arg...) do {\ | ||
245 | printk(KERN_INFO "%s %d-%04x: " fmt, t->i2c.driver->driver.name, \ | ||
246 | i2c_adapter_id(t->i2c.adapter), t->i2c.addr , ##arg); } while (0) | ||
247 | #define tuner_dbg(fmt, arg...) do {\ | ||
248 | extern int tuner_debug; \ | ||
249 | if (tuner_debug) \ | ||
250 | printk(KERN_DEBUG "%s %d-%04x: " fmt, t->i2c.driver->driver.name, \ | ||
251 | i2c_adapter_id(t->i2c.adapter), t->i2c.addr , ##arg); } while (0) | ||
252 | |||
253 | #endif /* __KERNEL__ */ | 184 | #endif /* __KERNEL__ */ |
254 | 185 | ||
255 | #endif /* _TUNER_H */ | 186 | #endif /* _TUNER_H */ |
diff --git a/include/mtd/ubi-header.h b/include/mtd/ubi-header.h index fa479c71aa34..74efa7763479 100644 --- a/include/mtd/ubi-header.h +++ b/include/mtd/ubi-header.h | |||
@@ -74,42 +74,13 @@ enum { | |||
74 | UBI_COMPAT_REJECT = 5 | 74 | UBI_COMPAT_REJECT = 5 |
75 | }; | 75 | }; |
76 | 76 | ||
77 | /* | ||
78 | * ubi16_t/ubi32_t/ubi64_t - 16, 32, and 64-bit integers used in UBI on-flash | ||
79 | * data structures. | ||
80 | */ | ||
81 | typedef struct { | ||
82 | uint16_t int16; | ||
83 | } __attribute__ ((packed)) ubi16_t; | ||
84 | |||
85 | typedef struct { | ||
86 | uint32_t int32; | ||
87 | } __attribute__ ((packed)) ubi32_t; | ||
88 | |||
89 | typedef struct { | ||
90 | uint64_t int64; | ||
91 | } __attribute__ ((packed)) ubi64_t; | ||
92 | |||
93 | /* | ||
94 | * In this implementation of UBI uses the big-endian format for on-flash | ||
95 | * integers. The below are the corresponding conversion macros. | ||
96 | */ | ||
97 | #define cpu_to_ubi16(x) ((ubi16_t){__cpu_to_be16(x)}) | ||
98 | #define ubi16_to_cpu(x) ((uint16_t)__be16_to_cpu((x).int16)) | ||
99 | |||
100 | #define cpu_to_ubi32(x) ((ubi32_t){__cpu_to_be32(x)}) | ||
101 | #define ubi32_to_cpu(x) ((uint32_t)__be32_to_cpu((x).int32)) | ||
102 | |||
103 | #define cpu_to_ubi64(x) ((ubi64_t){__cpu_to_be64(x)}) | ||
104 | #define ubi64_to_cpu(x) ((uint64_t)__be64_to_cpu((x).int64)) | ||
105 | |||
106 | /* Sizes of UBI headers */ | 77 | /* Sizes of UBI headers */ |
107 | #define UBI_EC_HDR_SIZE sizeof(struct ubi_ec_hdr) | 78 | #define UBI_EC_HDR_SIZE sizeof(struct ubi_ec_hdr) |
108 | #define UBI_VID_HDR_SIZE sizeof(struct ubi_vid_hdr) | 79 | #define UBI_VID_HDR_SIZE sizeof(struct ubi_vid_hdr) |
109 | 80 | ||
110 | /* Sizes of UBI headers without the ending CRC */ | 81 | /* Sizes of UBI headers without the ending CRC */ |
111 | #define UBI_EC_HDR_SIZE_CRC (UBI_EC_HDR_SIZE - sizeof(ubi32_t)) | 82 | #define UBI_EC_HDR_SIZE_CRC (UBI_EC_HDR_SIZE - sizeof(__be32)) |
112 | #define UBI_VID_HDR_SIZE_CRC (UBI_VID_HDR_SIZE - sizeof(ubi32_t)) | 83 | #define UBI_VID_HDR_SIZE_CRC (UBI_VID_HDR_SIZE - sizeof(__be32)) |
113 | 84 | ||
114 | /** | 85 | /** |
115 | * struct ubi_ec_hdr - UBI erase counter header. | 86 | * struct ubi_ec_hdr - UBI erase counter header. |
@@ -137,14 +108,14 @@ typedef struct { | |||
137 | * eraseblocks. | 108 | * eraseblocks. |
138 | */ | 109 | */ |
139 | struct ubi_ec_hdr { | 110 | struct ubi_ec_hdr { |
140 | ubi32_t magic; | 111 | __be32 magic; |
141 | uint8_t version; | 112 | __u8 version; |
142 | uint8_t padding1[3]; | 113 | __u8 padding1[3]; |
143 | ubi64_t ec; /* Warning: the current limit is 31-bit anyway! */ | 114 | __be64 ec; /* Warning: the current limit is 31-bit anyway! */ |
144 | ubi32_t vid_hdr_offset; | 115 | __be32 vid_hdr_offset; |
145 | ubi32_t data_offset; | 116 | __be32 data_offset; |
146 | uint8_t padding2[36]; | 117 | __u8 padding2[36]; |
147 | ubi32_t hdr_crc; | 118 | __be32 hdr_crc; |
148 | } __attribute__ ((packed)); | 119 | } __attribute__ ((packed)); |
149 | 120 | ||
150 | /** | 121 | /** |
@@ -262,22 +233,22 @@ struct ubi_ec_hdr { | |||
262 | * software (say, cramfs) on top of the UBI volume. | 233 | * software (say, cramfs) on top of the UBI volume. |
263 | */ | 234 | */ |
264 | struct ubi_vid_hdr { | 235 | struct ubi_vid_hdr { |
265 | ubi32_t magic; | 236 | __be32 magic; |
266 | uint8_t version; | 237 | __u8 version; |
267 | uint8_t vol_type; | 238 | __u8 vol_type; |
268 | uint8_t copy_flag; | 239 | __u8 copy_flag; |
269 | uint8_t compat; | 240 | __u8 compat; |
270 | ubi32_t vol_id; | 241 | __be32 vol_id; |
271 | ubi32_t lnum; | 242 | __be32 lnum; |
272 | ubi32_t leb_ver; /* obsolete, to be removed, don't use */ | 243 | __be32 leb_ver; /* obsolete, to be removed, don't use */ |
273 | ubi32_t data_size; | 244 | __be32 data_size; |
274 | ubi32_t used_ebs; | 245 | __be32 used_ebs; |
275 | ubi32_t data_pad; | 246 | __be32 data_pad; |
276 | ubi32_t data_crc; | 247 | __be32 data_crc; |
277 | uint8_t padding1[4]; | 248 | __u8 padding1[4]; |
278 | ubi64_t sqnum; | 249 | __be64 sqnum; |
279 | uint8_t padding2[12]; | 250 | __u8 padding2[12]; |
280 | ubi32_t hdr_crc; | 251 | __be32 hdr_crc; |
281 | } __attribute__ ((packed)); | 252 | } __attribute__ ((packed)); |
282 | 253 | ||
283 | /* Internal UBI volumes count */ | 254 | /* Internal UBI volumes count */ |
@@ -306,7 +277,7 @@ struct ubi_vid_hdr { | |||
306 | #define UBI_VTBL_RECORD_SIZE sizeof(struct ubi_vtbl_record) | 277 | #define UBI_VTBL_RECORD_SIZE sizeof(struct ubi_vtbl_record) |
307 | 278 | ||
308 | /* Size of the volume table record without the ending CRC */ | 279 | /* Size of the volume table record without the ending CRC */ |
309 | #define UBI_VTBL_RECORD_SIZE_CRC (UBI_VTBL_RECORD_SIZE - sizeof(ubi32_t)) | 280 | #define UBI_VTBL_RECORD_SIZE_CRC (UBI_VTBL_RECORD_SIZE - sizeof(__be32)) |
310 | 281 | ||
311 | /** | 282 | /** |
312 | * struct ubi_vtbl_record - a record in the volume table. | 283 | * struct ubi_vtbl_record - a record in the volume table. |
@@ -346,15 +317,15 @@ struct ubi_vid_hdr { | |||
346 | * Empty records contain all zeroes and the CRC checksum of those zeroes. | 317 | * Empty records contain all zeroes and the CRC checksum of those zeroes. |
347 | */ | 318 | */ |
348 | struct ubi_vtbl_record { | 319 | struct ubi_vtbl_record { |
349 | ubi32_t reserved_pebs; | 320 | __be32 reserved_pebs; |
350 | ubi32_t alignment; | 321 | __be32 alignment; |
351 | ubi32_t data_pad; | 322 | __be32 data_pad; |
352 | uint8_t vol_type; | 323 | __u8 vol_type; |
353 | uint8_t upd_marker; | 324 | __u8 upd_marker; |
354 | ubi16_t name_len; | 325 | __be16 name_len; |
355 | uint8_t name[UBI_VOL_NAME_MAX+1]; | 326 | __u8 name[UBI_VOL_NAME_MAX+1]; |
356 | uint8_t padding2[24]; | 327 | __u8 padding2[24]; |
357 | ubi32_t crc; | 328 | __be32 crc; |
358 | } __attribute__ ((packed)); | 329 | } __attribute__ ((packed)); |
359 | 330 | ||
360 | #endif /* !__UBI_HEADER_H__ */ | 331 | #endif /* !__UBI_HEADER_H__ */ |
diff --git a/include/net/scm.h b/include/net/scm.h index 5637d5e22d5f..423cb1d5ac25 100644 --- a/include/net/scm.h +++ b/include/net/scm.h | |||
@@ -8,7 +8,7 @@ | |||
8 | /* Well, we should have at least one descriptor open | 8 | /* Well, we should have at least one descriptor open |
9 | * to accept passed FDs 8) | 9 | * to accept passed FDs 8) |
10 | */ | 10 | */ |
11 | #define SCM_MAX_FD (OPEN_MAX-1) | 11 | #define SCM_MAX_FD 255 |
12 | 12 | ||
13 | struct scm_fp_list | 13 | struct scm_fp_list |
14 | { | 14 | { |
diff --git a/include/net/tcp.h b/include/net/tcp.h index a8af9ae00177..8b404b1ef7c8 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -652,8 +652,7 @@ struct tcp_congestion_ops { | |||
652 | /* lower bound for congestion window (optional) */ | 652 | /* lower bound for congestion window (optional) */ |
653 | u32 (*min_cwnd)(const struct sock *sk); | 653 | u32 (*min_cwnd)(const struct sock *sk); |
654 | /* do new cwnd calculation (required) */ | 654 | /* do new cwnd calculation (required) */ |
655 | void (*cong_avoid)(struct sock *sk, u32 ack, | 655 | void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight, int good_ack); |
656 | u32 rtt, u32 in_flight, int good_ack); | ||
657 | /* call before changing ca_state (optional) */ | 656 | /* call before changing ca_state (optional) */ |
658 | void (*set_state)(struct sock *sk, u8 new_state); | 657 | void (*set_state)(struct sock *sk, u8 new_state); |
659 | /* call when cwnd event occurs (optional) */ | 658 | /* call when cwnd event occurs (optional) */ |
@@ -684,8 +683,7 @@ extern void tcp_slow_start(struct tcp_sock *tp); | |||
684 | 683 | ||
685 | extern struct tcp_congestion_ops tcp_init_congestion_ops; | 684 | extern struct tcp_congestion_ops tcp_init_congestion_ops; |
686 | extern u32 tcp_reno_ssthresh(struct sock *sk); | 685 | extern u32 tcp_reno_ssthresh(struct sock *sk); |
687 | extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, | 686 | extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag); |
688 | u32 rtt, u32 in_flight, int flag); | ||
689 | extern u32 tcp_reno_min_cwnd(const struct sock *sk); | 687 | extern u32 tcp_reno_min_cwnd(const struct sock *sk); |
690 | extern struct tcp_congestion_ops tcp_reno; | 688 | extern struct tcp_congestion_ops tcp_reno; |
691 | 689 | ||
diff --git a/include/net/xfrm.h b/include/net/xfrm.h index ae959e950174..a5f80bfbaaa4 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h | |||
@@ -585,7 +585,6 @@ static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ct | |||
585 | struct xfrm_dst | 585 | struct xfrm_dst |
586 | { | 586 | { |
587 | union { | 587 | union { |
588 | struct xfrm_dst *next; | ||
589 | struct dst_entry dst; | 588 | struct dst_entry dst; |
590 | struct rtable rt; | 589 | struct rtable rt; |
591 | struct rt6_info rt6; | 590 | struct rt6_info rt6; |
diff --git a/include/video/tgafb.h b/include/video/tgafb.h index 03d0dbe293a8..7bc5e2c14826 100644 --- a/include/video/tgafb.h +++ b/include/video/tgafb.h | |||
@@ -216,6 +216,7 @@ struct tga_par { | |||
216 | u32 pll_freq; /* pixclock in mhz */ | 216 | u32 pll_freq; /* pixclock in mhz */ |
217 | u32 bits_per_pixel; /* bits per pixel */ | 217 | u32 bits_per_pixel; /* bits per pixel */ |
218 | u32 sync_on_green; /* set if sync is on green */ | 218 | u32 sync_on_green; /* set if sync is on green */ |
219 | u32 palette[16]; | ||
219 | }; | 220 | }; |
220 | 221 | ||
221 | 222 | ||
diff --git a/include/xen/events.h b/include/xen/events.h new file mode 100644 index 000000000000..2bde54d29be5 --- /dev/null +++ b/include/xen/events.h | |||
@@ -0,0 +1,48 @@ | |||
1 | #ifndef _XEN_EVENTS_H | ||
2 | #define _XEN_EVENTS_H | ||
3 | |||
4 | #include <linux/interrupt.h> | ||
5 | |||
6 | #include <xen/interface/event_channel.h> | ||
7 | #include <asm/xen/hypercall.h> | ||
8 | |||
9 | enum ipi_vector { | ||
10 | XEN_RESCHEDULE_VECTOR, | ||
11 | XEN_CALL_FUNCTION_VECTOR, | ||
12 | |||
13 | XEN_NR_IPIS, | ||
14 | }; | ||
15 | |||
16 | int bind_evtchn_to_irq(unsigned int evtchn); | ||
17 | int bind_evtchn_to_irqhandler(unsigned int evtchn, | ||
18 | irq_handler_t handler, | ||
19 | unsigned long irqflags, const char *devname, | ||
20 | void *dev_id); | ||
21 | int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, | ||
22 | irq_handler_t handler, | ||
23 | unsigned long irqflags, const char *devname, | ||
24 | void *dev_id); | ||
25 | int bind_ipi_to_irqhandler(enum ipi_vector ipi, | ||
26 | unsigned int cpu, | ||
27 | irq_handler_t handler, | ||
28 | unsigned long irqflags, | ||
29 | const char *devname, | ||
30 | void *dev_id); | ||
31 | |||
32 | /* | ||
33 | * Common unbind function for all event sources. Takes IRQ to unbind from. | ||
34 | * Automatically closes the underlying event channel (even for bindings | ||
35 | * made with bind_evtchn_to_irqhandler()). | ||
36 | */ | ||
37 | void unbind_from_irqhandler(unsigned int irq, void *dev_id); | ||
38 | |||
39 | void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector); | ||
40 | |||
41 | static inline void notify_remote_via_evtchn(int port) | ||
42 | { | ||
43 | struct evtchn_send send = { .port = port }; | ||
44 | (void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send); | ||
45 | } | ||
46 | |||
47 | extern void notify_remote_via_irq(int irq); | ||
48 | #endif /* _XEN_EVENTS_H */ | ||
diff --git a/include/xen/features.h b/include/xen/features.h new file mode 100644 index 000000000000..27292d4d2a6a --- /dev/null +++ b/include/xen/features.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /****************************************************************************** | ||
2 | * features.h | ||
3 | * | ||
4 | * Query the features reported by Xen. | ||
5 | * | ||
6 | * Copyright (c) 2006, Ian Campbell | ||
7 | */ | ||
8 | |||
9 | #ifndef __XEN_FEATURES_H__ | ||
10 | #define __XEN_FEATURES_H__ | ||
11 | |||
12 | #include <xen/interface/features.h> | ||
13 | |||
14 | void xen_setup_features(void); | ||
15 | |||
16 | extern u8 xen_features[XENFEAT_NR_SUBMAPS * 32]; | ||
17 | |||
18 | static inline int xen_feature(int flag) | ||
19 | { | ||
20 | return xen_features[flag]; | ||
21 | } | ||
22 | |||
23 | #endif /* __ASM_XEN_FEATURES_H__ */ | ||
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h new file mode 100644 index 000000000000..761c83498e03 --- /dev/null +++ b/include/xen/grant_table.h | |||
@@ -0,0 +1,107 @@ | |||
1 | /****************************************************************************** | ||
2 | * grant_table.h | ||
3 | * | ||
4 | * Two sets of functionality: | ||
5 | * 1. Granting foreign access to our memory reservation. | ||
6 | * 2. Accessing others' memory reservations via grant references. | ||
7 | * (i.e., mechanisms for both sender and recipient of grant references) | ||
8 | * | ||
9 | * Copyright (c) 2004-2005, K A Fraser | ||
10 | * Copyright (c) 2005, Christopher Clark | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License version 2 | ||
14 | * as published by the Free Software Foundation; or, when distributed | ||
15 | * separately from the Linux kernel or incorporated into other | ||
16 | * software packages, subject to the following license: | ||
17 | * | ||
18 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
19 | * of this source file (the "Software"), to deal in the Software without | ||
20 | * restriction, including without limitation the rights to use, copy, modify, | ||
21 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | ||
22 | * and to permit persons to whom the Software is furnished to do so, subject to | ||
23 | * the following conditions: | ||
24 | * | ||
25 | * The above copyright notice and this permission notice shall be included in | ||
26 | * all copies or substantial portions of the Software. | ||
27 | * | ||
28 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
29 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
30 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
31 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
32 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
33 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
34 | * IN THE SOFTWARE. | ||
35 | */ | ||
36 | |||
37 | #ifndef __ASM_GNTTAB_H__ | ||
38 | #define __ASM_GNTTAB_H__ | ||
39 | |||
40 | #include <asm/xen/hypervisor.h> | ||
41 | #include <xen/interface/grant_table.h> | ||
42 | |||
43 | /* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */ | ||
44 | #define NR_GRANT_FRAMES 4 | ||
45 | |||
46 | struct gnttab_free_callback { | ||
47 | struct gnttab_free_callback *next; | ||
48 | void (*fn)(void *); | ||
49 | void *arg; | ||
50 | u16 count; | ||
51 | }; | ||
52 | |||
53 | int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, | ||
54 | int readonly); | ||
55 | |||
56 | /* | ||
57 | * End access through the given grant reference, iff the grant entry is no | ||
58 | * longer in use. Return 1 if the grant entry was freed, 0 if it is still in | ||
59 | * use. | ||
60 | */ | ||
61 | int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly); | ||
62 | |||
63 | /* | ||
64 | * Eventually end access through the given grant reference, and once that | ||
65 | * access has been ended, free the given page too. Access will be ended | ||
66 | * immediately iff the grant entry is not in use, otherwise it will happen | ||
67 | * some time later. page may be 0, in which case no freeing will occur. | ||
68 | */ | ||
69 | void gnttab_end_foreign_access(grant_ref_t ref, int readonly, | ||
70 | unsigned long page); | ||
71 | |||
72 | int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn); | ||
73 | |||
74 | unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref); | ||
75 | unsigned long gnttab_end_foreign_transfer(grant_ref_t ref); | ||
76 | |||
77 | int gnttab_query_foreign_access(grant_ref_t ref); | ||
78 | |||
79 | /* | ||
80 | * operations on reserved batches of grant references | ||
81 | */ | ||
82 | int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head); | ||
83 | |||
84 | void gnttab_free_grant_reference(grant_ref_t ref); | ||
85 | |||
86 | void gnttab_free_grant_references(grant_ref_t head); | ||
87 | |||
88 | int gnttab_empty_grant_references(const grant_ref_t *pprivate_head); | ||
89 | |||
90 | int gnttab_claim_grant_reference(grant_ref_t *pprivate_head); | ||
91 | |||
92 | void gnttab_release_grant_reference(grant_ref_t *private_head, | ||
93 | grant_ref_t release); | ||
94 | |||
95 | void gnttab_request_free_callback(struct gnttab_free_callback *callback, | ||
96 | void (*fn)(void *), void *arg, u16 count); | ||
97 | void gnttab_cancel_free_callback(struct gnttab_free_callback *callback); | ||
98 | |||
99 | void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, | ||
100 | unsigned long frame, int readonly); | ||
101 | |||
102 | void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid, | ||
103 | unsigned long pfn); | ||
104 | |||
105 | #define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr)) | ||
106 | |||
107 | #endif /* __ASM_GNTTAB_H__ */ | ||
diff --git a/include/xen/hvc-console.h b/include/xen/hvc-console.h new file mode 100644 index 000000000000..21c0ecfd786d --- /dev/null +++ b/include/xen/hvc-console.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef XEN_HVC_CONSOLE_H | ||
2 | #define XEN_HVC_CONSOLE_H | ||
3 | |||
4 | extern struct console xenboot_console; | ||
5 | |||
6 | #endif /* XEN_HVC_CONSOLE_H */ | ||
diff --git a/include/xen/interface/elfnote.h b/include/xen/interface/elfnote.h new file mode 100644 index 000000000000..a64d3df5bd95 --- /dev/null +++ b/include/xen/interface/elfnote.h | |||
@@ -0,0 +1,133 @@ | |||
1 | /****************************************************************************** | ||
2 | * elfnote.h | ||
3 | * | ||
4 | * Definitions used for the Xen ELF notes. | ||
5 | * | ||
6 | * Copyright (c) 2006, Ian Campbell, XenSource Ltd. | ||
7 | */ | ||
8 | |||
9 | #ifndef __XEN_PUBLIC_ELFNOTE_H__ | ||
10 | #define __XEN_PUBLIC_ELFNOTE_H__ | ||
11 | |||
12 | /* | ||
13 | * The notes should live in a SHT_NOTE segment and have "Xen" in the | ||
14 | * name field. | ||
15 | * | ||
16 | * Numeric types are either 4 or 8 bytes depending on the content of | ||
17 | * the desc field. | ||
18 | * | ||
19 | * LEGACY indicated the fields in the legacy __xen_guest string which | ||
20 | * this a note type replaces. | ||
21 | */ | ||
22 | |||
23 | /* | ||
24 | * NAME=VALUE pair (string). | ||
25 | * | ||
26 | * LEGACY: FEATURES and PAE | ||
27 | */ | ||
28 | #define XEN_ELFNOTE_INFO 0 | ||
29 | |||
30 | /* | ||
31 | * The virtual address of the entry point (numeric). | ||
32 | * | ||
33 | * LEGACY: VIRT_ENTRY | ||
34 | */ | ||
35 | #define XEN_ELFNOTE_ENTRY 1 | ||
36 | |||
37 | /* The virtual address of the hypercall transfer page (numeric). | ||
38 | * | ||
39 | * LEGACY: HYPERCALL_PAGE. (n.b. legacy value is a physical page | ||
40 | * number not a virtual address) | ||
41 | */ | ||
42 | #define XEN_ELFNOTE_HYPERCALL_PAGE 2 | ||
43 | |||
44 | /* The virtual address where the kernel image should be mapped (numeric). | ||
45 | * | ||
46 | * Defaults to 0. | ||
47 | * | ||
48 | * LEGACY: VIRT_BASE | ||
49 | */ | ||
50 | #define XEN_ELFNOTE_VIRT_BASE 3 | ||
51 | |||
52 | /* | ||
53 | * The offset of the ELF paddr field from the acutal required | ||
54 | * psuedo-physical address (numeric). | ||
55 | * | ||
56 | * This is used to maintain backwards compatibility with older kernels | ||
57 | * which wrote __PAGE_OFFSET into that field. This field defaults to 0 | ||
58 | * if not present. | ||
59 | * | ||
60 | * LEGACY: ELF_PADDR_OFFSET. (n.b. legacy default is VIRT_BASE) | ||
61 | */ | ||
62 | #define XEN_ELFNOTE_PADDR_OFFSET 4 | ||
63 | |||
64 | /* | ||
65 | * The version of Xen that we work with (string). | ||
66 | * | ||
67 | * LEGACY: XEN_VER | ||
68 | */ | ||
69 | #define XEN_ELFNOTE_XEN_VERSION 5 | ||
70 | |||
71 | /* | ||
72 | * The name of the guest operating system (string). | ||
73 | * | ||
74 | * LEGACY: GUEST_OS | ||
75 | */ | ||
76 | #define XEN_ELFNOTE_GUEST_OS 6 | ||
77 | |||
78 | /* | ||
79 | * The version of the guest operating system (string). | ||
80 | * | ||
81 | * LEGACY: GUEST_VER | ||
82 | */ | ||
83 | #define XEN_ELFNOTE_GUEST_VERSION 7 | ||
84 | |||
85 | /* | ||
86 | * The loader type (string). | ||
87 | * | ||
88 | * LEGACY: LOADER | ||
89 | */ | ||
90 | #define XEN_ELFNOTE_LOADER 8 | ||
91 | |||
92 | /* | ||
93 | * The kernel supports PAE (x86/32 only, string = "yes" or "no"). | ||
94 | * | ||
95 | * LEGACY: PAE (n.b. The legacy interface included a provision to | ||
96 | * indicate 'extended-cr3' support allowing L3 page tables to be | ||
97 | * placed above 4G. It is assumed that any kernel new enough to use | ||
98 | * these ELF notes will include this and therefore "yes" here is | ||
99 | * equivalent to "yes[entended-cr3]" in the __xen_guest interface. | ||
100 | */ | ||
101 | #define XEN_ELFNOTE_PAE_MODE 9 | ||
102 | |||
103 | /* | ||
104 | * The features supported/required by this kernel (string). | ||
105 | * | ||
106 | * The string must consist of a list of feature names (as given in | ||
107 | * features.h, without the "XENFEAT_" prefix) separated by '|' | ||
108 | * characters. If a feature is required for the kernel to function | ||
109 | * then the feature name must be preceded by a '!' character. | ||
110 | * | ||
111 | * LEGACY: FEATURES | ||
112 | */ | ||
113 | #define XEN_ELFNOTE_FEATURES 10 | ||
114 | |||
115 | /* | ||
116 | * The kernel requires the symbol table to be loaded (string = "yes" or "no") | ||
117 | * LEGACY: BSD_SYMTAB (n.b. The legacy treated the presence or absence | ||
118 | * of this string as a boolean flag rather than requiring "yes" or | ||
119 | * "no". | ||
120 | */ | ||
121 | #define XEN_ELFNOTE_BSD_SYMTAB 11 | ||
122 | |||
123 | #endif /* __XEN_PUBLIC_ELFNOTE_H__ */ | ||
124 | |||
125 | /* | ||
126 | * Local variables: | ||
127 | * mode: C | ||
128 | * c-set-style: "BSD" | ||
129 | * c-basic-offset: 4 | ||
130 | * tab-width: 4 | ||
131 | * indent-tabs-mode: nil | ||
132 | * End: | ||
133 | */ | ||
diff --git a/include/xen/interface/event_channel.h b/include/xen/interface/event_channel.h new file mode 100644 index 000000000000..919b5bdcb2bd --- /dev/null +++ b/include/xen/interface/event_channel.h | |||
@@ -0,0 +1,195 @@ | |||
1 | /****************************************************************************** | ||
2 | * event_channel.h | ||
3 | * | ||
4 | * Event channels between domains. | ||
5 | * | ||
6 | * Copyright (c) 2003-2004, K A Fraser. | ||
7 | */ | ||
8 | |||
9 | #ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__ | ||
10 | #define __XEN_PUBLIC_EVENT_CHANNEL_H__ | ||
11 | |||
12 | typedef uint32_t evtchn_port_t; | ||
13 | DEFINE_GUEST_HANDLE(evtchn_port_t); | ||
14 | |||
15 | /* | ||
16 | * EVTCHNOP_alloc_unbound: Allocate a port in domain <dom> and mark as | ||
17 | * accepting interdomain bindings from domain <remote_dom>. A fresh port | ||
18 | * is allocated in <dom> and returned as <port>. | ||
19 | * NOTES: | ||
20 | * 1. If the caller is unprivileged then <dom> must be DOMID_SELF. | ||
21 | * 2. <rdom> may be DOMID_SELF, allowing loopback connections. | ||
22 | */ | ||
23 | #define EVTCHNOP_alloc_unbound 6 | ||
24 | struct evtchn_alloc_unbound { | ||
25 | /* IN parameters */ | ||
26 | domid_t dom, remote_dom; | ||
27 | /* OUT parameters */ | ||
28 | evtchn_port_t port; | ||
29 | }; | ||
30 | |||
31 | /* | ||
32 | * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between | ||
33 | * the calling domain and <remote_dom>. <remote_dom,remote_port> must identify | ||
34 | * a port that is unbound and marked as accepting bindings from the calling | ||
35 | * domain. A fresh port is allocated in the calling domain and returned as | ||
36 | * <local_port>. | ||
37 | * NOTES: | ||
38 | * 2. <remote_dom> may be DOMID_SELF, allowing loopback connections. | ||
39 | */ | ||
40 | #define EVTCHNOP_bind_interdomain 0 | ||
41 | struct evtchn_bind_interdomain { | ||
42 | /* IN parameters. */ | ||
43 | domid_t remote_dom; | ||
44 | evtchn_port_t remote_port; | ||
45 | /* OUT parameters. */ | ||
46 | evtchn_port_t local_port; | ||
47 | }; | ||
48 | |||
49 | /* | ||
50 | * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ <irq> on specified | ||
51 | * vcpu. | ||
52 | * NOTES: | ||
53 | * 1. A virtual IRQ may be bound to at most one event channel per vcpu. | ||
54 | * 2. The allocated event channel is bound to the specified vcpu. The binding | ||
55 | * may not be changed. | ||
56 | */ | ||
57 | #define EVTCHNOP_bind_virq 1 | ||
58 | struct evtchn_bind_virq { | ||
59 | /* IN parameters. */ | ||
60 | uint32_t virq; | ||
61 | uint32_t vcpu; | ||
62 | /* OUT parameters. */ | ||
63 | evtchn_port_t port; | ||
64 | }; | ||
65 | |||
66 | /* | ||
67 | * EVTCHNOP_bind_pirq: Bind a local event channel to PIRQ <irq>. | ||
68 | * NOTES: | ||
69 | * 1. A physical IRQ may be bound to at most one event channel per domain. | ||
70 | * 2. Only a sufficiently-privileged domain may bind to a physical IRQ. | ||
71 | */ | ||
72 | #define EVTCHNOP_bind_pirq 2 | ||
73 | struct evtchn_bind_pirq { | ||
74 | /* IN parameters. */ | ||
75 | uint32_t pirq; | ||
76 | #define BIND_PIRQ__WILL_SHARE 1 | ||
77 | uint32_t flags; /* BIND_PIRQ__* */ | ||
78 | /* OUT parameters. */ | ||
79 | evtchn_port_t port; | ||
80 | }; | ||
81 | |||
82 | /* | ||
83 | * EVTCHNOP_bind_ipi: Bind a local event channel to receive events. | ||
84 | * NOTES: | ||
85 | * 1. The allocated event channel is bound to the specified vcpu. The binding | ||
86 | * may not be changed. | ||
87 | */ | ||
88 | #define EVTCHNOP_bind_ipi 7 | ||
89 | struct evtchn_bind_ipi { | ||
90 | uint32_t vcpu; | ||
91 | /* OUT parameters. */ | ||
92 | evtchn_port_t port; | ||
93 | }; | ||
94 | |||
95 | /* | ||
96 | * EVTCHNOP_close: Close a local event channel <port>. If the channel is | ||
97 | * interdomain then the remote end is placed in the unbound state | ||
98 | * (EVTCHNSTAT_unbound), awaiting a new connection. | ||
99 | */ | ||
100 | #define EVTCHNOP_close 3 | ||
101 | struct evtchn_close { | ||
102 | /* IN parameters. */ | ||
103 | evtchn_port_t port; | ||
104 | }; | ||
105 | |||
106 | /* | ||
107 | * EVTCHNOP_send: Send an event to the remote end of the channel whose local | ||
108 | * endpoint is <port>. | ||
109 | */ | ||
110 | #define EVTCHNOP_send 4 | ||
111 | struct evtchn_send { | ||
112 | /* IN parameters. */ | ||
113 | evtchn_port_t port; | ||
114 | }; | ||
115 | |||
116 | /* | ||
117 | * EVTCHNOP_status: Get the current status of the communication channel which | ||
118 | * has an endpoint at <dom, port>. | ||
119 | * NOTES: | ||
120 | * 1. <dom> may be specified as DOMID_SELF. | ||
121 | * 2. Only a sufficiently-privileged domain may obtain the status of an event | ||
122 | * channel for which <dom> is not DOMID_SELF. | ||
123 | */ | ||
124 | #define EVTCHNOP_status 5 | ||
125 | struct evtchn_status { | ||
126 | /* IN parameters */ | ||
127 | domid_t dom; | ||
128 | evtchn_port_t port; | ||
129 | /* OUT parameters */ | ||
130 | #define EVTCHNSTAT_closed 0 /* Channel is not in use. */ | ||
131 | #define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/ | ||
132 | #define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */ | ||
133 | #define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */ | ||
134 | #define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */ | ||
135 | #define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */ | ||
136 | uint32_t status; | ||
137 | uint32_t vcpu; /* VCPU to which this channel is bound. */ | ||
138 | union { | ||
139 | struct { | ||
140 | domid_t dom; | ||
141 | } unbound; /* EVTCHNSTAT_unbound */ | ||
142 | struct { | ||
143 | domid_t dom; | ||
144 | evtchn_port_t port; | ||
145 | } interdomain; /* EVTCHNSTAT_interdomain */ | ||
146 | uint32_t pirq; /* EVTCHNSTAT_pirq */ | ||
147 | uint32_t virq; /* EVTCHNSTAT_virq */ | ||
148 | } u; | ||
149 | }; | ||
150 | |||
151 | /* | ||
152 | * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an | ||
153 | * event is pending. | ||
154 | * NOTES: | ||
155 | * 1. IPI- and VIRQ-bound channels always notify the vcpu that initialised | ||
156 | * the binding. This binding cannot be changed. | ||
157 | * 2. All other channels notify vcpu0 by default. This default is set when | ||
158 | * the channel is allocated (a port that is freed and subsequently reused | ||
159 | * has its binding reset to vcpu0). | ||
160 | */ | ||
161 | #define EVTCHNOP_bind_vcpu 8 | ||
162 | struct evtchn_bind_vcpu { | ||
163 | /* IN parameters. */ | ||
164 | evtchn_port_t port; | ||
165 | uint32_t vcpu; | ||
166 | }; | ||
167 | |||
168 | /* | ||
169 | * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver | ||
170 | * a notification to the appropriate VCPU if an event is pending. | ||
171 | */ | ||
172 | #define EVTCHNOP_unmask 9 | ||
173 | struct evtchn_unmask { | ||
174 | /* IN parameters. */ | ||
175 | evtchn_port_t port; | ||
176 | }; | ||
177 | |||
178 | struct evtchn_op { | ||
179 | uint32_t cmd; /* EVTCHNOP_* */ | ||
180 | union { | ||
181 | struct evtchn_alloc_unbound alloc_unbound; | ||
182 | struct evtchn_bind_interdomain bind_interdomain; | ||
183 | struct evtchn_bind_virq bind_virq; | ||
184 | struct evtchn_bind_pirq bind_pirq; | ||
185 | struct evtchn_bind_ipi bind_ipi; | ||
186 | struct evtchn_close close; | ||
187 | struct evtchn_send send; | ||
188 | struct evtchn_status status; | ||
189 | struct evtchn_bind_vcpu bind_vcpu; | ||
190 | struct evtchn_unmask unmask; | ||
191 | } u; | ||
192 | }; | ||
193 | DEFINE_GUEST_HANDLE_STRUCT(evtchn_op); | ||
194 | |||
195 | #endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */ | ||
diff --git a/include/xen/interface/features.h b/include/xen/interface/features.h new file mode 100644 index 000000000000..d73228d16488 --- /dev/null +++ b/include/xen/interface/features.h | |||
@@ -0,0 +1,43 @@ | |||
1 | /****************************************************************************** | ||
2 | * features.h | ||
3 | * | ||
4 | * Feature flags, reported by XENVER_get_features. | ||
5 | * | ||
6 | * Copyright (c) 2006, Keir Fraser <keir@xensource.com> | ||
7 | */ | ||
8 | |||
9 | #ifndef __XEN_PUBLIC_FEATURES_H__ | ||
10 | #define __XEN_PUBLIC_FEATURES_H__ | ||
11 | |||
12 | /* | ||
13 | * If set, the guest does not need to write-protect its pagetables, and can | ||
14 | * update them via direct writes. | ||
15 | */ | ||
16 | #define XENFEAT_writable_page_tables 0 | ||
17 | |||
18 | /* | ||
19 | * If set, the guest does not need to write-protect its segment descriptor | ||
20 | * tables, and can update them via direct writes. | ||
21 | */ | ||
22 | #define XENFEAT_writable_descriptor_tables 1 | ||
23 | |||
24 | /* | ||
25 | * If set, translation between the guest's 'pseudo-physical' address space | ||
26 | * and the host's machine address space are handled by the hypervisor. In this | ||
27 | * mode the guest does not need to perform phys-to/from-machine translations | ||
28 | * when performing page table operations. | ||
29 | */ | ||
30 | #define XENFEAT_auto_translated_physmap 2 | ||
31 | |||
32 | /* If set, the guest is running in supervisor mode (e.g., x86 ring 0). */ | ||
33 | #define XENFEAT_supervisor_mode_kernel 3 | ||
34 | |||
35 | /* | ||
36 | * If set, the guest does not need to allocate x86 PAE page directories | ||
37 | * below 4GB. This flag is usually implied by auto_translated_physmap. | ||
38 | */ | ||
39 | #define XENFEAT_pae_pgdir_above_4gb 4 | ||
40 | |||
41 | #define XENFEAT_NR_SUBMAPS 1 | ||
42 | |||
43 | #endif /* __XEN_PUBLIC_FEATURES_H__ */ | ||
diff --git a/include/xen/interface/grant_table.h b/include/xen/interface/grant_table.h new file mode 100644 index 000000000000..219049802cf2 --- /dev/null +++ b/include/xen/interface/grant_table.h | |||
@@ -0,0 +1,375 @@ | |||
1 | /****************************************************************************** | ||
2 | * grant_table.h | ||
3 | * | ||
4 | * Interface for granting foreign access to page frames, and receiving | ||
5 | * page-ownership transfers. | ||
6 | * | ||
7 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
8 | * of this software and associated documentation files (the "Software"), to | ||
9 | * deal in the Software without restriction, including without limitation the | ||
10 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or | ||
11 | * sell copies of the Software, and to permit persons to whom the Software is | ||
12 | * furnished to do so, subject to the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice shall be included in | ||
15 | * all copies or substantial portions of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
20 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
22 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
23 | * DEALINGS IN THE SOFTWARE. | ||
24 | * | ||
25 | * Copyright (c) 2004, K A Fraser | ||
26 | */ | ||
27 | |||
28 | #ifndef __XEN_PUBLIC_GRANT_TABLE_H__ | ||
29 | #define __XEN_PUBLIC_GRANT_TABLE_H__ | ||
30 | |||
31 | |||
32 | /*********************************** | ||
33 | * GRANT TABLE REPRESENTATION | ||
34 | */ | ||
35 | |||
36 | /* Some rough guidelines on accessing and updating grant-table entries | ||
37 | * in a concurrency-safe manner. For more information, Linux contains a | ||
38 | * reference implementation for guest OSes (arch/xen/kernel/grant_table.c). | ||
39 | * | ||
40 | * NB. WMB is a no-op on current-generation x86 processors. However, a | ||
41 | * compiler barrier will still be required. | ||
42 | * | ||
43 | * Introducing a valid entry into the grant table: | ||
44 | * 1. Write ent->domid. | ||
45 | * 2. Write ent->frame: | ||
46 | * GTF_permit_access: Frame to which access is permitted. | ||
47 | * GTF_accept_transfer: Pseudo-phys frame slot being filled by new | ||
48 | * frame, or zero if none. | ||
49 | * 3. Write memory barrier (WMB). | ||
50 | * 4. Write ent->flags, inc. valid type. | ||
51 | * | ||
52 | * Invalidating an unused GTF_permit_access entry: | ||
53 | * 1. flags = ent->flags. | ||
54 | * 2. Observe that !(flags & (GTF_reading|GTF_writing)). | ||
55 | * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0). | ||
56 | * NB. No need for WMB as reuse of entry is control-dependent on success of | ||
57 | * step 3, and all architectures guarantee ordering of ctrl-dep writes. | ||
58 | * | ||
59 | * Invalidating an in-use GTF_permit_access entry: | ||
60 | * This cannot be done directly. Request assistance from the domain controller | ||
61 | * which can set a timeout on the use of a grant entry and take necessary | ||
62 | * action. (NB. This is not yet implemented!). | ||
63 | * | ||
64 | * Invalidating an unused GTF_accept_transfer entry: | ||
65 | * 1. flags = ent->flags. | ||
66 | * 2. Observe that !(flags & GTF_transfer_committed). [*] | ||
67 | * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0). | ||
68 | * NB. No need for WMB as reuse of entry is control-dependent on success of | ||
69 | * step 3, and all architectures guarantee ordering of ctrl-dep writes. | ||
70 | * [*] If GTF_transfer_committed is set then the grant entry is 'committed'. | ||
71 | * The guest must /not/ modify the grant entry until the address of the | ||
72 | * transferred frame is written. It is safe for the guest to spin waiting | ||
73 | * for this to occur (detect by observing GTF_transfer_completed in | ||
74 | * ent->flags). | ||
75 | * | ||
76 | * Invalidating a committed GTF_accept_transfer entry: | ||
77 | * 1. Wait for (ent->flags & GTF_transfer_completed). | ||
78 | * | ||
79 | * Changing a GTF_permit_access from writable to read-only: | ||
80 | * Use SMP-safe CMPXCHG to set GTF_readonly, while checking !GTF_writing. | ||
81 | * | ||
82 | * Changing a GTF_permit_access from read-only to writable: | ||
83 | * Use SMP-safe bit-setting instruction. | ||
84 | */ | ||
85 | |||
86 | /* | ||
87 | * A grant table comprises a packed array of grant entries in one or more | ||
88 | * page frames shared between Xen and a guest. | ||
89 | * [XEN]: This field is written by Xen and read by the sharing guest. | ||
90 | * [GST]: This field is written by the guest and read by Xen. | ||
91 | */ | ||
92 | struct grant_entry { | ||
93 | /* GTF_xxx: various type and flag information. [XEN,GST] */ | ||
94 | uint16_t flags; | ||
95 | /* The domain being granted foreign privileges. [GST] */ | ||
96 | domid_t domid; | ||
97 | /* | ||
98 | * GTF_permit_access: Frame that @domid is allowed to map and access. [GST] | ||
99 | * GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN] | ||
100 | */ | ||
101 | uint32_t frame; | ||
102 | }; | ||
103 | |||
104 | /* | ||
105 | * Type of grant entry. | ||
106 | * GTF_invalid: This grant entry grants no privileges. | ||
107 | * GTF_permit_access: Allow @domid to map/access @frame. | ||
108 | * GTF_accept_transfer: Allow @domid to transfer ownership of one page frame | ||
109 | * to this guest. Xen writes the page number to @frame. | ||
110 | */ | ||
111 | #define GTF_invalid (0U<<0) | ||
112 | #define GTF_permit_access (1U<<0) | ||
113 | #define GTF_accept_transfer (2U<<0) | ||
114 | #define GTF_type_mask (3U<<0) | ||
115 | |||
116 | /* | ||
117 | * Subflags for GTF_permit_access. | ||
118 | * GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST] | ||
119 | * GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN] | ||
120 | * GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN] | ||
121 | */ | ||
122 | #define _GTF_readonly (2) | ||
123 | #define GTF_readonly (1U<<_GTF_readonly) | ||
124 | #define _GTF_reading (3) | ||
125 | #define GTF_reading (1U<<_GTF_reading) | ||
126 | #define _GTF_writing (4) | ||
127 | #define GTF_writing (1U<<_GTF_writing) | ||
128 | |||
129 | /* | ||
130 | * Subflags for GTF_accept_transfer: | ||
131 | * GTF_transfer_committed: Xen sets this flag to indicate that it is committed | ||
132 | * to transferring ownership of a page frame. When a guest sees this flag | ||
133 | * it must /not/ modify the grant entry until GTF_transfer_completed is | ||
134 | * set by Xen. | ||
135 | * GTF_transfer_completed: It is safe for the guest to spin-wait on this flag | ||
136 | * after reading GTF_transfer_committed. Xen will always write the frame | ||
137 | * address, followed by ORing this flag, in a timely manner. | ||
138 | */ | ||
139 | #define _GTF_transfer_committed (2) | ||
140 | #define GTF_transfer_committed (1U<<_GTF_transfer_committed) | ||
141 | #define _GTF_transfer_completed (3) | ||
142 | #define GTF_transfer_completed (1U<<_GTF_transfer_completed) | ||
143 | |||
144 | |||
145 | /*********************************** | ||
146 | * GRANT TABLE QUERIES AND USES | ||
147 | */ | ||
148 | |||
149 | /* | ||
150 | * Reference to a grant entry in a specified domain's grant table. | ||
151 | */ | ||
152 | typedef uint32_t grant_ref_t; | ||
153 | |||
154 | /* | ||
155 | * Handle to track a mapping created via a grant reference. | ||
156 | */ | ||
157 | typedef uint32_t grant_handle_t; | ||
158 | |||
159 | /* | ||
160 | * GNTTABOP_map_grant_ref: Map the grant entry (<dom>,<ref>) for access | ||
161 | * by devices and/or host CPUs. If successful, <handle> is a tracking number | ||
162 | * that must be presented later to destroy the mapping(s). On error, <handle> | ||
163 | * is a negative status code. | ||
164 | * NOTES: | ||
165 | * 1. If GNTMAP_device_map is specified then <dev_bus_addr> is the address | ||
166 | * via which I/O devices may access the granted frame. | ||
167 | * 2. If GNTMAP_host_map is specified then a mapping will be added at | ||
168 | * either a host virtual address in the current address space, or at | ||
169 | * a PTE at the specified machine address. The type of mapping to | ||
170 | * perform is selected through the GNTMAP_contains_pte flag, and the | ||
171 | * address is specified in <host_addr>. | ||
172 | * 3. Mappings should only be destroyed via GNTTABOP_unmap_grant_ref. If a | ||
173 | * host mapping is destroyed by other means then it is *NOT* guaranteed | ||
174 | * to be accounted to the correct grant reference! | ||
175 | */ | ||
176 | #define GNTTABOP_map_grant_ref 0 | ||
177 | struct gnttab_map_grant_ref { | ||
178 | /* IN parameters. */ | ||
179 | uint64_t host_addr; | ||
180 | uint32_t flags; /* GNTMAP_* */ | ||
181 | grant_ref_t ref; | ||
182 | domid_t dom; | ||
183 | /* OUT parameters. */ | ||
184 | int16_t status; /* GNTST_* */ | ||
185 | grant_handle_t handle; | ||
186 | uint64_t dev_bus_addr; | ||
187 | }; | ||
188 | |||
189 | /* | ||
190 | * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings | ||
191 | * tracked by <handle>. If <host_addr> or <dev_bus_addr> is zero, that | ||
192 | * field is ignored. If non-zero, they must refer to a device/host mapping | ||
193 | * that is tracked by <handle> | ||
194 | * NOTES: | ||
195 | * 1. The call may fail in an undefined manner if either mapping is not | ||
196 | * tracked by <handle>. | ||
197 | * 3. After executing a batch of unmaps, it is guaranteed that no stale | ||
198 | * mappings will remain in the device or host TLBs. | ||
199 | */ | ||
200 | #define GNTTABOP_unmap_grant_ref 1 | ||
201 | struct gnttab_unmap_grant_ref { | ||
202 | /* IN parameters. */ | ||
203 | uint64_t host_addr; | ||
204 | uint64_t dev_bus_addr; | ||
205 | grant_handle_t handle; | ||
206 | /* OUT parameters. */ | ||
207 | int16_t status; /* GNTST_* */ | ||
208 | }; | ||
209 | |||
210 | /* | ||
211 | * GNTTABOP_setup_table: Set up a grant table for <dom> comprising at least | ||
212 | * <nr_frames> pages. The frame addresses are written to the <frame_list>. | ||
213 | * Only <nr_frames> addresses are written, even if the table is larger. | ||
214 | * NOTES: | ||
215 | * 1. <dom> may be specified as DOMID_SELF. | ||
216 | * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF. | ||
217 | * 3. Xen may not support more than a single grant-table page per domain. | ||
218 | */ | ||
219 | #define GNTTABOP_setup_table 2 | ||
220 | struct gnttab_setup_table { | ||
221 | /* IN parameters. */ | ||
222 | domid_t dom; | ||
223 | uint32_t nr_frames; | ||
224 | /* OUT parameters. */ | ||
225 | int16_t status; /* GNTST_* */ | ||
226 | ulong *frame_list; | ||
227 | }; | ||
228 | |||
229 | /* | ||
230 | * GNTTABOP_dump_table: Dump the contents of the grant table to the | ||
231 | * xen console. Debugging use only. | ||
232 | */ | ||
233 | #define GNTTABOP_dump_table 3 | ||
234 | struct gnttab_dump_table { | ||
235 | /* IN parameters. */ | ||
236 | domid_t dom; | ||
237 | /* OUT parameters. */ | ||
238 | int16_t status; /* GNTST_* */ | ||
239 | }; | ||
240 | |||
241 | /* | ||
242 | * GNTTABOP_transfer_grant_ref: Transfer <frame> to a foreign domain. The | ||
243 | * foreign domain has previously registered its interest in the transfer via | ||
244 | * <domid, ref>. | ||
245 | * | ||
246 | * Note that, even if the transfer fails, the specified page no longer belongs | ||
247 | * to the calling domain *unless* the error is GNTST_bad_page. | ||
248 | */ | ||
249 | #define GNTTABOP_transfer 4 | ||
250 | struct gnttab_transfer { | ||
251 | /* IN parameters. */ | ||
252 | unsigned long mfn; | ||
253 | domid_t domid; | ||
254 | grant_ref_t ref; | ||
255 | /* OUT parameters. */ | ||
256 | int16_t status; | ||
257 | }; | ||
258 | |||
259 | |||
260 | /* | ||
261 | * GNTTABOP_copy: Hypervisor based copy | ||
262 | * source and destinations can be eithers MFNs or, for foreign domains, | ||
263 | * grant references. the foreign domain has to grant read/write access | ||
264 | * in its grant table. | ||
265 | * | ||
266 | * The flags specify what type source and destinations are (either MFN | ||
267 | * or grant reference). | ||
268 | * | ||
269 | * Note that this can also be used to copy data between two domains | ||
270 | * via a third party if the source and destination domains had previously | ||
271 | * grant appropriate access to their pages to the third party. | ||
272 | * | ||
273 | * source_offset specifies an offset in the source frame, dest_offset | ||
274 | * the offset in the target frame and len specifies the number of | ||
275 | * bytes to be copied. | ||
276 | */ | ||
277 | |||
278 | #define _GNTCOPY_source_gref (0) | ||
279 | #define GNTCOPY_source_gref (1<<_GNTCOPY_source_gref) | ||
280 | #define _GNTCOPY_dest_gref (1) | ||
281 | #define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref) | ||
282 | |||
283 | #define GNTTABOP_copy 5 | ||
284 | struct gnttab_copy { | ||
285 | /* IN parameters. */ | ||
286 | struct { | ||
287 | union { | ||
288 | grant_ref_t ref; | ||
289 | unsigned long gmfn; | ||
290 | } u; | ||
291 | domid_t domid; | ||
292 | uint16_t offset; | ||
293 | } source, dest; | ||
294 | uint16_t len; | ||
295 | uint16_t flags; /* GNTCOPY_* */ | ||
296 | /* OUT parameters. */ | ||
297 | int16_t status; | ||
298 | }; | ||
299 | |||
300 | /* | ||
301 | * GNTTABOP_query_size: Query the current and maximum sizes of the shared | ||
302 | * grant table. | ||
303 | * NOTES: | ||
304 | * 1. <dom> may be specified as DOMID_SELF. | ||
305 | * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF. | ||
306 | */ | ||
307 | #define GNTTABOP_query_size 6 | ||
308 | struct gnttab_query_size { | ||
309 | /* IN parameters. */ | ||
310 | domid_t dom; | ||
311 | /* OUT parameters. */ | ||
312 | uint32_t nr_frames; | ||
313 | uint32_t max_nr_frames; | ||
314 | int16_t status; /* GNTST_* */ | ||
315 | }; | ||
316 | |||
317 | |||
318 | /* | ||
319 | * Bitfield values for update_pin_status.flags. | ||
320 | */ | ||
321 | /* Map the grant entry for access by I/O devices. */ | ||
322 | #define _GNTMAP_device_map (0) | ||
323 | #define GNTMAP_device_map (1<<_GNTMAP_device_map) | ||
324 | /* Map the grant entry for access by host CPUs. */ | ||
325 | #define _GNTMAP_host_map (1) | ||
326 | #define GNTMAP_host_map (1<<_GNTMAP_host_map) | ||
327 | /* Accesses to the granted frame will be restricted to read-only access. */ | ||
328 | #define _GNTMAP_readonly (2) | ||
329 | #define GNTMAP_readonly (1<<_GNTMAP_readonly) | ||
330 | /* | ||
331 | * GNTMAP_host_map subflag: | ||
332 | * 0 => The host mapping is usable only by the guest OS. | ||
333 | * 1 => The host mapping is usable by guest OS + current application. | ||
334 | */ | ||
335 | #define _GNTMAP_application_map (3) | ||
336 | #define GNTMAP_application_map (1<<_GNTMAP_application_map) | ||
337 | |||
338 | /* | ||
339 | * GNTMAP_contains_pte subflag: | ||
340 | * 0 => This map request contains a host virtual address. | ||
341 | * 1 => This map request contains the machine addess of the PTE to update. | ||
342 | */ | ||
343 | #define _GNTMAP_contains_pte (4) | ||
344 | #define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte) | ||
345 | |||
346 | /* | ||
347 | * Values for error status returns. All errors are -ve. | ||
348 | */ | ||
349 | #define GNTST_okay (0) /* Normal return. */ | ||
350 | #define GNTST_general_error (-1) /* General undefined error. */ | ||
351 | #define GNTST_bad_domain (-2) /* Unrecognsed domain id. */ | ||
352 | #define GNTST_bad_gntref (-3) /* Unrecognised or inappropriate gntref. */ | ||
353 | #define GNTST_bad_handle (-4) /* Unrecognised or inappropriate handle. */ | ||
354 | #define GNTST_bad_virt_addr (-5) /* Inappropriate virtual address to map. */ | ||
355 | #define GNTST_bad_dev_addr (-6) /* Inappropriate device address to unmap.*/ | ||
356 | #define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */ | ||
357 | #define GNTST_permission_denied (-8) /* Not enough privilege for operation. */ | ||
358 | #define GNTST_bad_page (-9) /* Specified page was invalid for op. */ | ||
359 | #define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary */ | ||
360 | |||
361 | #define GNTTABOP_error_msgs { \ | ||
362 | "okay", \ | ||
363 | "undefined error", \ | ||
364 | "unrecognised domain id", \ | ||
365 | "invalid grant reference", \ | ||
366 | "invalid mapping handle", \ | ||
367 | "invalid virtual address", \ | ||
368 | "invalid device address", \ | ||
369 | "no spare translation slot in the I/O MMU", \ | ||
370 | "permission denied", \ | ||
371 | "bad page", \ | ||
372 | "copy arguments cross page boundary" \ | ||
373 | } | ||
374 | |||
375 | #endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */ | ||
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h new file mode 100644 index 000000000000..c2d1fa4dc1ee --- /dev/null +++ b/include/xen/interface/io/blkif.h | |||
@@ -0,0 +1,94 @@ | |||
1 | /****************************************************************************** | ||
2 | * blkif.h | ||
3 | * | ||
4 | * Unified block-device I/O interface for Xen guest OSes. | ||
5 | * | ||
6 | * Copyright (c) 2003-2004, Keir Fraser | ||
7 | */ | ||
8 | |||
9 | #ifndef __XEN_PUBLIC_IO_BLKIF_H__ | ||
10 | #define __XEN_PUBLIC_IO_BLKIF_H__ | ||
11 | |||
12 | #include "ring.h" | ||
13 | #include "../grant_table.h" | ||
14 | |||
15 | /* | ||
16 | * Front->back notifications: When enqueuing a new request, sending a | ||
17 | * notification can be made conditional on req_event (i.e., the generic | ||
18 | * hold-off mechanism provided by the ring macros). Backends must set | ||
19 | * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()). | ||
20 | * | ||
21 | * Back->front notifications: When enqueuing a new response, sending a | ||
22 | * notification can be made conditional on rsp_event (i.e., the generic | ||
23 | * hold-off mechanism provided by the ring macros). Frontends must set | ||
24 | * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()). | ||
25 | */ | ||
26 | |||
27 | typedef uint16_t blkif_vdev_t; | ||
28 | typedef uint64_t blkif_sector_t; | ||
29 | |||
30 | /* | ||
31 | * REQUEST CODES. | ||
32 | */ | ||
33 | #define BLKIF_OP_READ 0 | ||
34 | #define BLKIF_OP_WRITE 1 | ||
35 | /* | ||
36 | * Recognised only if "feature-barrier" is present in backend xenbus info. | ||
37 | * The "feature_barrier" node contains a boolean indicating whether barrier | ||
38 | * requests are likely to succeed or fail. Either way, a barrier request | ||
39 | * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by | ||
40 | * the underlying block-device hardware. The boolean simply indicates whether | ||
41 | * or not it is worthwhile for the frontend to attempt barrier requests. | ||
42 | * If a backend does not recognise BLKIF_OP_WRITE_BARRIER, it should *not* | ||
43 | * create the "feature-barrier" node! | ||
44 | */ | ||
45 | #define BLKIF_OP_WRITE_BARRIER 2 | ||
46 | |||
47 | /* | ||
48 | * Maximum scatter/gather segments per request. | ||
49 | * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE. | ||
50 | * NB. This could be 12 if the ring indexes weren't stored in the same page. | ||
51 | */ | ||
52 | #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 | ||
53 | |||
54 | struct blkif_request { | ||
55 | uint8_t operation; /* BLKIF_OP_??? */ | ||
56 | uint8_t nr_segments; /* number of segments */ | ||
57 | blkif_vdev_t handle; /* only for read/write requests */ | ||
58 | uint64_t id; /* private guest value, echoed in resp */ | ||
59 | blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ | ||
60 | struct blkif_request_segment { | ||
61 | grant_ref_t gref; /* reference to I/O buffer frame */ | ||
62 | /* @first_sect: first sector in frame to transfer (inclusive). */ | ||
63 | /* @last_sect: last sector in frame to transfer (inclusive). */ | ||
64 | uint8_t first_sect, last_sect; | ||
65 | } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | ||
66 | }; | ||
67 | |||
68 | struct blkif_response { | ||
69 | uint64_t id; /* copied from request */ | ||
70 | uint8_t operation; /* copied from request */ | ||
71 | int16_t status; /* BLKIF_RSP_??? */ | ||
72 | }; | ||
73 | |||
74 | /* | ||
75 | * STATUS RETURN CODES. | ||
76 | */ | ||
77 | /* Operation not supported (only happens on barrier writes). */ | ||
78 | #define BLKIF_RSP_EOPNOTSUPP -2 | ||
79 | /* Operation failed for some unspecified reason (-EIO). */ | ||
80 | #define BLKIF_RSP_ERROR -1 | ||
81 | /* Operation completed successfully. */ | ||
82 | #define BLKIF_RSP_OKAY 0 | ||
83 | |||
84 | /* | ||
85 | * Generate blkif ring structures and types. | ||
86 | */ | ||
87 | |||
88 | DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response); | ||
89 | |||
90 | #define VDISK_CDROM 0x1 | ||
91 | #define VDISK_REMOVABLE 0x2 | ||
92 | #define VDISK_READONLY 0x4 | ||
93 | |||
94 | #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */ | ||
diff --git a/include/xen/interface/io/console.h b/include/xen/interface/io/console.h new file mode 100644 index 000000000000..e563de70f784 --- /dev/null +++ b/include/xen/interface/io/console.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /****************************************************************************** | ||
2 | * console.h | ||
3 | * | ||
4 | * Console I/O interface for Xen guest OSes. | ||
5 | * | ||
6 | * Copyright (c) 2005, Keir Fraser | ||
7 | */ | ||
8 | |||
9 | #ifndef __XEN_PUBLIC_IO_CONSOLE_H__ | ||
10 | #define __XEN_PUBLIC_IO_CONSOLE_H__ | ||
11 | |||
12 | typedef uint32_t XENCONS_RING_IDX; | ||
13 | |||
14 | #define MASK_XENCONS_IDX(idx, ring) ((idx) & (sizeof(ring)-1)) | ||
15 | |||
16 | struct xencons_interface { | ||
17 | char in[1024]; | ||
18 | char out[2048]; | ||
19 | XENCONS_RING_IDX in_cons, in_prod; | ||
20 | XENCONS_RING_IDX out_cons, out_prod; | ||
21 | }; | ||
22 | |||
23 | #endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */ | ||
diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h new file mode 100644 index 000000000000..518481c95f18 --- /dev/null +++ b/include/xen/interface/io/netif.h | |||
@@ -0,0 +1,158 @@ | |||
1 | /****************************************************************************** | ||
2 | * netif.h | ||
3 | * | ||
4 | * Unified network-device I/O interface for Xen guest OSes. | ||
5 | * | ||
6 | * Copyright (c) 2003-2004, Keir Fraser | ||
7 | */ | ||
8 | |||
9 | #ifndef __XEN_PUBLIC_IO_NETIF_H__ | ||
10 | #define __XEN_PUBLIC_IO_NETIF_H__ | ||
11 | |||
12 | #include "ring.h" | ||
13 | #include "../grant_table.h" | ||
14 | |||
15 | /* | ||
16 | * Notifications after enqueuing any type of message should be conditional on | ||
17 | * the appropriate req_event or rsp_event field in the shared ring. | ||
18 | * If the client sends notification for rx requests then it should specify | ||
19 | * feature 'feature-rx-notify' via xenbus. Otherwise the backend will assume | ||
20 | * that it cannot safely queue packets (as it may not be kicked to send them). | ||
21 | */ | ||
22 | |||
23 | /* | ||
24 | * This is the 'wire' format for packets: | ||
25 | * Request 1: netif_tx_request -- NETTXF_* (any flags) | ||
26 | * [Request 2: netif_tx_extra] (only if request 1 has NETTXF_extra_info) | ||
27 | * [Request 3: netif_tx_extra] (only if request 2 has XEN_NETIF_EXTRA_MORE) | ||
28 | * Request 4: netif_tx_request -- NETTXF_more_data | ||
29 | * Request 5: netif_tx_request -- NETTXF_more_data | ||
30 | * ... | ||
31 | * Request N: netif_tx_request -- 0 | ||
32 | */ | ||
33 | |||
34 | /* Protocol checksum field is blank in the packet (hardware offload)? */ | ||
35 | #define _NETTXF_csum_blank (0) | ||
36 | #define NETTXF_csum_blank (1U<<_NETTXF_csum_blank) | ||
37 | |||
38 | /* Packet data has been validated against protocol checksum. */ | ||
39 | #define _NETTXF_data_validated (1) | ||
40 | #define NETTXF_data_validated (1U<<_NETTXF_data_validated) | ||
41 | |||
42 | /* Packet continues in the next request descriptor. */ | ||
43 | #define _NETTXF_more_data (2) | ||
44 | #define NETTXF_more_data (1U<<_NETTXF_more_data) | ||
45 | |||
46 | /* Packet to be followed by extra descriptor(s). */ | ||
47 | #define _NETTXF_extra_info (3) | ||
48 | #define NETTXF_extra_info (1U<<_NETTXF_extra_info) | ||
49 | |||
50 | struct xen_netif_tx_request { | ||
51 | grant_ref_t gref; /* Reference to buffer page */ | ||
52 | uint16_t offset; /* Offset within buffer page */ | ||
53 | uint16_t flags; /* NETTXF_* */ | ||
54 | uint16_t id; /* Echoed in response message. */ | ||
55 | uint16_t size; /* Packet size in bytes. */ | ||
56 | }; | ||
57 | |||
58 | /* Types of netif_extra_info descriptors. */ | ||
59 | #define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */ | ||
60 | #define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */ | ||
61 | #define XEN_NETIF_EXTRA_TYPE_MAX (2) | ||
62 | |||
63 | /* netif_extra_info flags. */ | ||
64 | #define _XEN_NETIF_EXTRA_FLAG_MORE (0) | ||
65 | #define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE) | ||
66 | |||
67 | /* GSO types - only TCPv4 currently supported. */ | ||
68 | #define XEN_NETIF_GSO_TYPE_TCPV4 (1) | ||
69 | |||
70 | /* | ||
71 | * This structure needs to fit within both netif_tx_request and | ||
72 | * netif_rx_response for compatibility. | ||
73 | */ | ||
74 | struct xen_netif_extra_info { | ||
75 | uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */ | ||
76 | uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */ | ||
77 | |||
78 | union { | ||
79 | struct { | ||
80 | /* | ||
81 | * Maximum payload size of each segment. For | ||
82 | * example, for TCP this is just the path MSS. | ||
83 | */ | ||
84 | uint16_t size; | ||
85 | |||
86 | /* | ||
87 | * GSO type. This determines the protocol of | ||
88 | * the packet and any extra features required | ||
89 | * to segment the packet properly. | ||
90 | */ | ||
91 | uint8_t type; /* XEN_NETIF_GSO_TYPE_* */ | ||
92 | |||
93 | /* Future expansion. */ | ||
94 | uint8_t pad; | ||
95 | |||
96 | /* | ||
97 | * GSO features. This specifies any extra GSO | ||
98 | * features required to process this packet, | ||
99 | * such as ECN support for TCPv4. | ||
100 | */ | ||
101 | uint16_t features; /* XEN_NETIF_GSO_FEAT_* */ | ||
102 | } gso; | ||
103 | |||
104 | uint16_t pad[3]; | ||
105 | } u; | ||
106 | }; | ||
107 | |||
108 | struct xen_netif_tx_response { | ||
109 | uint16_t id; | ||
110 | int16_t status; /* NETIF_RSP_* */ | ||
111 | }; | ||
112 | |||
113 | struct xen_netif_rx_request { | ||
114 | uint16_t id; /* Echoed in response message. */ | ||
115 | grant_ref_t gref; /* Reference to incoming granted frame */ | ||
116 | }; | ||
117 | |||
118 | /* Packet data has been validated against protocol checksum. */ | ||
119 | #define _NETRXF_data_validated (0) | ||
120 | #define NETRXF_data_validated (1U<<_NETRXF_data_validated) | ||
121 | |||
122 | /* Protocol checksum field is blank in the packet (hardware offload)? */ | ||
123 | #define _NETRXF_csum_blank (1) | ||
124 | #define NETRXF_csum_blank (1U<<_NETRXF_csum_blank) | ||
125 | |||
126 | /* Packet continues in the next request descriptor. */ | ||
127 | #define _NETRXF_more_data (2) | ||
128 | #define NETRXF_more_data (1U<<_NETRXF_more_data) | ||
129 | |||
130 | /* Packet to be followed by extra descriptor(s). */ | ||
131 | #define _NETRXF_extra_info (3) | ||
132 | #define NETRXF_extra_info (1U<<_NETRXF_extra_info) | ||
133 | |||
134 | struct xen_netif_rx_response { | ||
135 | uint16_t id; | ||
136 | uint16_t offset; /* Offset in page of start of received packet */ | ||
137 | uint16_t flags; /* NETRXF_* */ | ||
138 | int16_t status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */ | ||
139 | }; | ||
140 | |||
141 | /* | ||
142 | * Generate netif ring structures and types. | ||
143 | */ | ||
144 | |||
145 | DEFINE_RING_TYPES(xen_netif_tx, | ||
146 | struct xen_netif_tx_request, | ||
147 | struct xen_netif_tx_response); | ||
148 | DEFINE_RING_TYPES(xen_netif_rx, | ||
149 | struct xen_netif_rx_request, | ||
150 | struct xen_netif_rx_response); | ||
151 | |||
152 | #define NETIF_RSP_DROPPED -2 | ||
153 | #define NETIF_RSP_ERROR -1 | ||
154 | #define NETIF_RSP_OKAY 0 | ||
155 | /* No response: used for auxiliary requests (e.g., netif_tx_extra). */ | ||
156 | #define NETIF_RSP_NULL 1 | ||
157 | |||
158 | #endif | ||
diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h new file mode 100644 index 000000000000..e8cbf431c8cc --- /dev/null +++ b/include/xen/interface/io/ring.h | |||
@@ -0,0 +1,260 @@ | |||
1 | /****************************************************************************** | ||
2 | * ring.h | ||
3 | * | ||
4 | * Shared producer-consumer ring macros. | ||
5 | * | ||
6 | * Tim Deegan and Andrew Warfield November 2004. | ||
7 | */ | ||
8 | |||
9 | #ifndef __XEN_PUBLIC_IO_RING_H__ | ||
10 | #define __XEN_PUBLIC_IO_RING_H__ | ||
11 | |||
12 | typedef unsigned int RING_IDX; | ||
13 | |||
14 | /* Round a 32-bit unsigned constant down to the nearest power of two. */ | ||
15 | #define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1)) | ||
16 | #define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x)) | ||
17 | #define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x)) | ||
18 | #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x)) | ||
19 | #define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x)) | ||
20 | |||
21 | /* | ||
22 | * Calculate size of a shared ring, given the total available space for the | ||
23 | * ring and indexes (_sz), and the name tag of the request/response structure. | ||
24 | * A ring contains as many entries as will fit, rounded down to the nearest | ||
25 | * power of two (so we can mask with (size-1) to loop around). | ||
26 | */ | ||
27 | #define __RING_SIZE(_s, _sz) \ | ||
28 | (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) | ||
29 | |||
30 | /* | ||
31 | * Macros to make the correct C datatypes for a new kind of ring. | ||
32 | * | ||
33 | * To make a new ring datatype, you need to have two message structures, | ||
34 | * let's say struct request, and struct response already defined. | ||
35 | * | ||
36 | * In a header where you want the ring datatype declared, you then do: | ||
37 | * | ||
38 | * DEFINE_RING_TYPES(mytag, struct request, struct response); | ||
39 | * | ||
40 | * These expand out to give you a set of types, as you can see below. | ||
41 | * The most important of these are: | ||
42 | * | ||
43 | * struct mytag_sring - The shared ring. | ||
44 | * struct mytag_front_ring - The 'front' half of the ring. | ||
45 | * struct mytag_back_ring - The 'back' half of the ring. | ||
46 | * | ||
47 | * To initialize a ring in your code you need to know the location and size | ||
48 | * of the shared memory area (PAGE_SIZE, for instance). To initialise | ||
49 | * the front half: | ||
50 | * | ||
51 | * struct mytag_front_ring front_ring; | ||
52 | * SHARED_RING_INIT((struct mytag_sring *)shared_page); | ||
53 | * FRONT_RING_INIT(&front_ring, (struct mytag_sring *)shared_page, | ||
54 | * PAGE_SIZE); | ||
55 | * | ||
56 | * Initializing the back follows similarly (note that only the front | ||
57 | * initializes the shared ring): | ||
58 | * | ||
59 | * struct mytag_back_ring back_ring; | ||
60 | * BACK_RING_INIT(&back_ring, (struct mytag_sring *)shared_page, | ||
61 | * PAGE_SIZE); | ||
62 | */ | ||
63 | |||
64 | #define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \ | ||
65 | \ | ||
66 | /* Shared ring entry */ \ | ||
67 | union __name##_sring_entry { \ | ||
68 | __req_t req; \ | ||
69 | __rsp_t rsp; \ | ||
70 | }; \ | ||
71 | \ | ||
72 | /* Shared ring page */ \ | ||
73 | struct __name##_sring { \ | ||
74 | RING_IDX req_prod, req_event; \ | ||
75 | RING_IDX rsp_prod, rsp_event; \ | ||
76 | uint8_t pad[48]; \ | ||
77 | union __name##_sring_entry ring[1]; /* variable-length */ \ | ||
78 | }; \ | ||
79 | \ | ||
80 | /* "Front" end's private variables */ \ | ||
81 | struct __name##_front_ring { \ | ||
82 | RING_IDX req_prod_pvt; \ | ||
83 | RING_IDX rsp_cons; \ | ||
84 | unsigned int nr_ents; \ | ||
85 | struct __name##_sring *sring; \ | ||
86 | }; \ | ||
87 | \ | ||
88 | /* "Back" end's private variables */ \ | ||
89 | struct __name##_back_ring { \ | ||
90 | RING_IDX rsp_prod_pvt; \ | ||
91 | RING_IDX req_cons; \ | ||
92 | unsigned int nr_ents; \ | ||
93 | struct __name##_sring *sring; \ | ||
94 | }; | ||
95 | |||
96 | /* | ||
97 | * Macros for manipulating rings. | ||
98 | * | ||
99 | * FRONT_RING_whatever works on the "front end" of a ring: here | ||
100 | * requests are pushed on to the ring and responses taken off it. | ||
101 | * | ||
102 | * BACK_RING_whatever works on the "back end" of a ring: here | ||
103 | * requests are taken off the ring and responses put on. | ||
104 | * | ||
105 | * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL. | ||
106 | * This is OK in 1-for-1 request-response situations where the | ||
107 | * requestor (front end) never has more than RING_SIZE()-1 | ||
108 | * outstanding requests. | ||
109 | */ | ||
110 | |||
111 | /* Initialising empty rings */ | ||
112 | #define SHARED_RING_INIT(_s) do { \ | ||
113 | (_s)->req_prod = (_s)->rsp_prod = 0; \ | ||
114 | (_s)->req_event = (_s)->rsp_event = 1; \ | ||
115 | memset((_s)->pad, 0, sizeof((_s)->pad)); \ | ||
116 | } while(0) | ||
117 | |||
118 | #define FRONT_RING_INIT(_r, _s, __size) do { \ | ||
119 | (_r)->req_prod_pvt = 0; \ | ||
120 | (_r)->rsp_cons = 0; \ | ||
121 | (_r)->nr_ents = __RING_SIZE(_s, __size); \ | ||
122 | (_r)->sring = (_s); \ | ||
123 | } while (0) | ||
124 | |||
125 | #define BACK_RING_INIT(_r, _s, __size) do { \ | ||
126 | (_r)->rsp_prod_pvt = 0; \ | ||
127 | (_r)->req_cons = 0; \ | ||
128 | (_r)->nr_ents = __RING_SIZE(_s, __size); \ | ||
129 | (_r)->sring = (_s); \ | ||
130 | } while (0) | ||
131 | |||
132 | /* Initialize to existing shared indexes -- for recovery */ | ||
133 | #define FRONT_RING_ATTACH(_r, _s, __size) do { \ | ||
134 | (_r)->sring = (_s); \ | ||
135 | (_r)->req_prod_pvt = (_s)->req_prod; \ | ||
136 | (_r)->rsp_cons = (_s)->rsp_prod; \ | ||
137 | (_r)->nr_ents = __RING_SIZE(_s, __size); \ | ||
138 | } while (0) | ||
139 | |||
140 | #define BACK_RING_ATTACH(_r, _s, __size) do { \ | ||
141 | (_r)->sring = (_s); \ | ||
142 | (_r)->rsp_prod_pvt = (_s)->rsp_prod; \ | ||
143 | (_r)->req_cons = (_s)->req_prod; \ | ||
144 | (_r)->nr_ents = __RING_SIZE(_s, __size); \ | ||
145 | } while (0) | ||
146 | |||
147 | /* How big is this ring? */ | ||
148 | #define RING_SIZE(_r) \ | ||
149 | ((_r)->nr_ents) | ||
150 | |||
151 | /* Number of free requests (for use on front side only). */ | ||
152 | #define RING_FREE_REQUESTS(_r) \ | ||
153 | (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons)) | ||
154 | |||
155 | /* Test if there is an empty slot available on the front ring. | ||
156 | * (This is only meaningful from the front. ) | ||
157 | */ | ||
158 | #define RING_FULL(_r) \ | ||
159 | (RING_FREE_REQUESTS(_r) == 0) | ||
160 | |||
161 | /* Test if there are outstanding messages to be processed on a ring. */ | ||
162 | #define RING_HAS_UNCONSUMED_RESPONSES(_r) \ | ||
163 | ((_r)->sring->rsp_prod - (_r)->rsp_cons) | ||
164 | |||
165 | #define RING_HAS_UNCONSUMED_REQUESTS(_r) \ | ||
166 | ({ \ | ||
167 | unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \ | ||
168 | unsigned int rsp = RING_SIZE(_r) - \ | ||
169 | ((_r)->req_cons - (_r)->rsp_prod_pvt); \ | ||
170 | req < rsp ? req : rsp; \ | ||
171 | }) | ||
172 | |||
173 | /* Direct access to individual ring elements, by index. */ | ||
174 | #define RING_GET_REQUEST(_r, _idx) \ | ||
175 | (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) | ||
176 | |||
177 | #define RING_GET_RESPONSE(_r, _idx) \ | ||
178 | (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) | ||
179 | |||
180 | /* Loop termination condition: Would the specified index overflow the ring? */ | ||
181 | #define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ | ||
182 | (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r)) | ||
183 | |||
184 | #define RING_PUSH_REQUESTS(_r) do { \ | ||
185 | wmb(); /* back sees requests /before/ updated producer index */ \ | ||
186 | (_r)->sring->req_prod = (_r)->req_prod_pvt; \ | ||
187 | } while (0) | ||
188 | |||
189 | #define RING_PUSH_RESPONSES(_r) do { \ | ||
190 | wmb(); /* front sees responses /before/ updated producer index */ \ | ||
191 | (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \ | ||
192 | } while (0) | ||
193 | |||
194 | /* | ||
195 | * Notification hold-off (req_event and rsp_event): | ||
196 | * | ||
197 | * When queueing requests or responses on a shared ring, it may not always be | ||
198 | * necessary to notify the remote end. For example, if requests are in flight | ||
199 | * in a backend, the front may be able to queue further requests without | ||
200 | * notifying the back (if the back checks for new requests when it queues | ||
201 | * responses). | ||
202 | * | ||
203 | * When enqueuing requests or responses: | ||
204 | * | ||
205 | * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument | ||
206 | * is a boolean return value. True indicates that the receiver requires an | ||
207 | * asynchronous notification. | ||
208 | * | ||
209 | * After dequeuing requests or responses (before sleeping the connection): | ||
210 | * | ||
211 | * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES(). | ||
212 | * The second argument is a boolean return value. True indicates that there | ||
213 | * are pending messages on the ring (i.e., the connection should not be put | ||
214 | * to sleep). | ||
215 | * | ||
216 | * These macros will set the req_event/rsp_event field to trigger a | ||
217 | * notification on the very next message that is enqueued. If you want to | ||
218 | * create batches of work (i.e., only receive a notification after several | ||
219 | * messages have been enqueued) then you will need to create a customised | ||
220 | * version of the FINAL_CHECK macro in your own code, which sets the event | ||
221 | * field appropriately. | ||
222 | */ | ||
223 | |||
224 | #define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \ | ||
225 | RING_IDX __old = (_r)->sring->req_prod; \ | ||
226 | RING_IDX __new = (_r)->req_prod_pvt; \ | ||
227 | wmb(); /* back sees requests /before/ updated producer index */ \ | ||
228 | (_r)->sring->req_prod = __new; \ | ||
229 | mb(); /* back sees new requests /before/ we check req_event */ \ | ||
230 | (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \ | ||
231 | (RING_IDX)(__new - __old)); \ | ||
232 | } while (0) | ||
233 | |||
234 | #define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \ | ||
235 | RING_IDX __old = (_r)->sring->rsp_prod; \ | ||
236 | RING_IDX __new = (_r)->rsp_prod_pvt; \ | ||
237 | wmb(); /* front sees responses /before/ updated producer index */ \ | ||
238 | (_r)->sring->rsp_prod = __new; \ | ||
239 | mb(); /* front sees new responses /before/ we check rsp_event */ \ | ||
240 | (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \ | ||
241 | (RING_IDX)(__new - __old)); \ | ||
242 | } while (0) | ||
243 | |||
244 | #define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \ | ||
245 | (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ | ||
246 | if (_work_to_do) break; \ | ||
247 | (_r)->sring->req_event = (_r)->req_cons + 1; \ | ||
248 | mb(); \ | ||
249 | (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ | ||
250 | } while (0) | ||
251 | |||
252 | #define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \ | ||
253 | (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ | ||
254 | if (_work_to_do) break; \ | ||
255 | (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \ | ||
256 | mb(); \ | ||
257 | (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ | ||
258 | } while (0) | ||
259 | |||
260 | #endif /* __XEN_PUBLIC_IO_RING_H__ */ | ||
diff --git a/include/xen/interface/io/xenbus.h b/include/xen/interface/io/xenbus.h new file mode 100644 index 000000000000..46508c7fa399 --- /dev/null +++ b/include/xen/interface/io/xenbus.h | |||
@@ -0,0 +1,44 @@ | |||
1 | /***************************************************************************** | ||
2 | * xenbus.h | ||
3 | * | ||
4 | * Xenbus protocol details. | ||
5 | * | ||
6 | * Copyright (C) 2005 XenSource Ltd. | ||
7 | */ | ||
8 | |||
9 | #ifndef _XEN_PUBLIC_IO_XENBUS_H | ||
10 | #define _XEN_PUBLIC_IO_XENBUS_H | ||
11 | |||
12 | /* The state of either end of the Xenbus, i.e. the current communication | ||
13 | status of initialisation across the bus. States here imply nothing about | ||
14 | the state of the connection between the driver and the kernel's device | ||
15 | layers. */ | ||
16 | enum xenbus_state | ||
17 | { | ||
18 | XenbusStateUnknown = 0, | ||
19 | XenbusStateInitialising = 1, | ||
20 | XenbusStateInitWait = 2, /* Finished early | ||
21 | initialisation, but waiting | ||
22 | for information from the peer | ||
23 | or hotplug scripts. */ | ||
24 | XenbusStateInitialised = 3, /* Initialised and waiting for a | ||
25 | connection from the peer. */ | ||
26 | XenbusStateConnected = 4, | ||
27 | XenbusStateClosing = 5, /* The device is being closed | ||
28 | due to an error or an unplug | ||
29 | event. */ | ||
30 | XenbusStateClosed = 6 | ||
31 | |||
32 | }; | ||
33 | |||
34 | #endif /* _XEN_PUBLIC_IO_XENBUS_H */ | ||
35 | |||
36 | /* | ||
37 | * Local variables: | ||
38 | * c-file-style: "linux" | ||
39 | * indent-tabs-mode: t | ||
40 | * c-indent-level: 8 | ||
41 | * c-basic-offset: 8 | ||
42 | * tab-width: 8 | ||
43 | * End: | ||
44 | */ | ||
diff --git a/include/xen/interface/io/xs_wire.h b/include/xen/interface/io/xs_wire.h new file mode 100644 index 000000000000..99fcffb372d1 --- /dev/null +++ b/include/xen/interface/io/xs_wire.h | |||
@@ -0,0 +1,87 @@ | |||
1 | /* | ||
2 | * Details of the "wire" protocol between Xen Store Daemon and client | ||
3 | * library or guest kernel. | ||
4 | * Copyright (C) 2005 Rusty Russell IBM Corporation | ||
5 | */ | ||
6 | |||
7 | #ifndef _XS_WIRE_H | ||
8 | #define _XS_WIRE_H | ||
9 | |||
10 | enum xsd_sockmsg_type | ||
11 | { | ||
12 | XS_DEBUG, | ||
13 | XS_DIRECTORY, | ||
14 | XS_READ, | ||
15 | XS_GET_PERMS, | ||
16 | XS_WATCH, | ||
17 | XS_UNWATCH, | ||
18 | XS_TRANSACTION_START, | ||
19 | XS_TRANSACTION_END, | ||
20 | XS_INTRODUCE, | ||
21 | XS_RELEASE, | ||
22 | XS_GET_DOMAIN_PATH, | ||
23 | XS_WRITE, | ||
24 | XS_MKDIR, | ||
25 | XS_RM, | ||
26 | XS_SET_PERMS, | ||
27 | XS_WATCH_EVENT, | ||
28 | XS_ERROR, | ||
29 | XS_IS_DOMAIN_INTRODUCED | ||
30 | }; | ||
31 | |||
32 | #define XS_WRITE_NONE "NONE" | ||
33 | #define XS_WRITE_CREATE "CREATE" | ||
34 | #define XS_WRITE_CREATE_EXCL "CREATE|EXCL" | ||
35 | |||
36 | /* We hand errors as strings, for portability. */ | ||
37 | struct xsd_errors | ||
38 | { | ||
39 | int errnum; | ||
40 | const char *errstring; | ||
41 | }; | ||
42 | #define XSD_ERROR(x) { x, #x } | ||
43 | static struct xsd_errors xsd_errors[] __attribute__((unused)) = { | ||
44 | XSD_ERROR(EINVAL), | ||
45 | XSD_ERROR(EACCES), | ||
46 | XSD_ERROR(EEXIST), | ||
47 | XSD_ERROR(EISDIR), | ||
48 | XSD_ERROR(ENOENT), | ||
49 | XSD_ERROR(ENOMEM), | ||
50 | XSD_ERROR(ENOSPC), | ||
51 | XSD_ERROR(EIO), | ||
52 | XSD_ERROR(ENOTEMPTY), | ||
53 | XSD_ERROR(ENOSYS), | ||
54 | XSD_ERROR(EROFS), | ||
55 | XSD_ERROR(EBUSY), | ||
56 | XSD_ERROR(EAGAIN), | ||
57 | XSD_ERROR(EISCONN) | ||
58 | }; | ||
59 | |||
60 | struct xsd_sockmsg | ||
61 | { | ||
62 | uint32_t type; /* XS_??? */ | ||
63 | uint32_t req_id;/* Request identifier, echoed in daemon's response. */ | ||
64 | uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */ | ||
65 | uint32_t len; /* Length of data following this. */ | ||
66 | |||
67 | /* Generally followed by nul-terminated string(s). */ | ||
68 | }; | ||
69 | |||
70 | enum xs_watch_type | ||
71 | { | ||
72 | XS_WATCH_PATH = 0, | ||
73 | XS_WATCH_TOKEN | ||
74 | }; | ||
75 | |||
76 | /* Inter-domain shared memory communications. */ | ||
77 | #define XENSTORE_RING_SIZE 1024 | ||
78 | typedef uint32_t XENSTORE_RING_IDX; | ||
79 | #define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1)) | ||
80 | struct xenstore_domain_interface { | ||
81 | char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */ | ||
82 | char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */ | ||
83 | XENSTORE_RING_IDX req_cons, req_prod; | ||
84 | XENSTORE_RING_IDX rsp_cons, rsp_prod; | ||
85 | }; | ||
86 | |||
87 | #endif /* _XS_WIRE_H */ | ||
diff --git a/include/xen/interface/memory.h b/include/xen/interface/memory.h new file mode 100644 index 000000000000..af36ead16817 --- /dev/null +++ b/include/xen/interface/memory.h | |||
@@ -0,0 +1,145 @@ | |||
1 | /****************************************************************************** | ||
2 | * memory.h | ||
3 | * | ||
4 | * Memory reservation and information. | ||
5 | * | ||
6 | * Copyright (c) 2005, Keir Fraser <keir@xensource.com> | ||
7 | */ | ||
8 | |||
9 | #ifndef __XEN_PUBLIC_MEMORY_H__ | ||
10 | #define __XEN_PUBLIC_MEMORY_H__ | ||
11 | |||
12 | /* | ||
13 | * Increase or decrease the specified domain's memory reservation. Returns a | ||
14 | * -ve errcode on failure, or the # extents successfully allocated or freed. | ||
15 | * arg == addr of struct xen_memory_reservation. | ||
16 | */ | ||
17 | #define XENMEM_increase_reservation 0 | ||
18 | #define XENMEM_decrease_reservation 1 | ||
19 | #define XENMEM_populate_physmap 6 | ||
20 | struct xen_memory_reservation { | ||
21 | |||
22 | /* | ||
23 | * XENMEM_increase_reservation: | ||
24 | * OUT: MFN (*not* GMFN) bases of extents that were allocated | ||
25 | * XENMEM_decrease_reservation: | ||
26 | * IN: GMFN bases of extents to free | ||
27 | * XENMEM_populate_physmap: | ||
28 | * IN: GPFN bases of extents to populate with memory | ||
29 | * OUT: GMFN bases of extents that were allocated | ||
30 | * (NB. This command also updates the mach_to_phys translation table) | ||
31 | */ | ||
32 | GUEST_HANDLE(ulong) extent_start; | ||
33 | |||
34 | /* Number of extents, and size/alignment of each (2^extent_order pages). */ | ||
35 | unsigned long nr_extents; | ||
36 | unsigned int extent_order; | ||
37 | |||
38 | /* | ||
39 | * Maximum # bits addressable by the user of the allocated region (e.g., | ||
40 | * I/O devices often have a 32-bit limitation even in 64-bit systems). If | ||
41 | * zero then the user has no addressing restriction. | ||
42 | * This field is not used by XENMEM_decrease_reservation. | ||
43 | */ | ||
44 | unsigned int address_bits; | ||
45 | |||
46 | /* | ||
47 | * Domain whose reservation is being changed. | ||
48 | * Unprivileged domains can specify only DOMID_SELF. | ||
49 | */ | ||
50 | domid_t domid; | ||
51 | |||
52 | }; | ||
53 | DEFINE_GUEST_HANDLE_STRUCT(xen_memory_reservation); | ||
54 | |||
55 | /* | ||
56 | * Returns the maximum machine frame number of mapped RAM in this system. | ||
57 | * This command always succeeds (it never returns an error code). | ||
58 | * arg == NULL. | ||
59 | */ | ||
60 | #define XENMEM_maximum_ram_page 2 | ||
61 | |||
62 | /* | ||
63 | * Returns the current or maximum memory reservation, in pages, of the | ||
64 | * specified domain (may be DOMID_SELF). Returns -ve errcode on failure. | ||
65 | * arg == addr of domid_t. | ||
66 | */ | ||
67 | #define XENMEM_current_reservation 3 | ||
68 | #define XENMEM_maximum_reservation 4 | ||
69 | |||
70 | /* | ||
71 | * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys | ||
72 | * mapping table. Architectures which do not have a m2p table do not implement | ||
73 | * this command. | ||
74 | * arg == addr of xen_machphys_mfn_list_t. | ||
75 | */ | ||
76 | #define XENMEM_machphys_mfn_list 5 | ||
77 | struct xen_machphys_mfn_list { | ||
78 | /* | ||
79 | * Size of the 'extent_start' array. Fewer entries will be filled if the | ||
80 | * machphys table is smaller than max_extents * 2MB. | ||
81 | */ | ||
82 | unsigned int max_extents; | ||
83 | |||
84 | /* | ||
85 | * Pointer to buffer to fill with list of extent starts. If there are | ||
86 | * any large discontiguities in the machine address space, 2MB gaps in | ||
87 | * the machphys table will be represented by an MFN base of zero. | ||
88 | */ | ||
89 | GUEST_HANDLE(ulong) extent_start; | ||
90 | |||
91 | /* | ||
92 | * Number of extents written to the above array. This will be smaller | ||
93 | * than 'max_extents' if the machphys table is smaller than max_e * 2MB. | ||
94 | */ | ||
95 | unsigned int nr_extents; | ||
96 | }; | ||
97 | DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mfn_list); | ||
98 | |||
99 | /* | ||
100 | * Sets the GPFN at which a particular page appears in the specified guest's | ||
101 | * pseudophysical address space. | ||
102 | * arg == addr of xen_add_to_physmap_t. | ||
103 | */ | ||
104 | #define XENMEM_add_to_physmap 7 | ||
105 | struct xen_add_to_physmap { | ||
106 | /* Which domain to change the mapping for. */ | ||
107 | domid_t domid; | ||
108 | |||
109 | /* Source mapping space. */ | ||
110 | #define XENMAPSPACE_shared_info 0 /* shared info page */ | ||
111 | #define XENMAPSPACE_grant_table 1 /* grant table page */ | ||
112 | unsigned int space; | ||
113 | |||
114 | /* Index into source mapping space. */ | ||
115 | unsigned long idx; | ||
116 | |||
117 | /* GPFN where the source mapping page should appear. */ | ||
118 | unsigned long gpfn; | ||
119 | }; | ||
120 | DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap); | ||
121 | |||
122 | /* | ||
123 | * Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error | ||
124 | * code on failure. This call only works for auto-translated guests. | ||
125 | */ | ||
126 | #define XENMEM_translate_gpfn_list 8 | ||
127 | struct xen_translate_gpfn_list { | ||
128 | /* Which domain to translate for? */ | ||
129 | domid_t domid; | ||
130 | |||
131 | /* Length of list. */ | ||
132 | unsigned long nr_gpfns; | ||
133 | |||
134 | /* List of GPFNs to translate. */ | ||
135 | GUEST_HANDLE(ulong) gpfn_list; | ||
136 | |||
137 | /* | ||
138 | * Output list to contain MFN translations. May be the same as the input | ||
139 | * list (in which case each input GPFN is overwritten with the output MFN). | ||
140 | */ | ||
141 | GUEST_HANDLE(ulong) mfn_list; | ||
142 | }; | ||
143 | DEFINE_GUEST_HANDLE_STRUCT(xen_translate_gpfn_list); | ||
144 | |||
145 | #endif /* __XEN_PUBLIC_MEMORY_H__ */ | ||
diff --git a/include/xen/interface/physdev.h b/include/xen/interface/physdev.h new file mode 100644 index 000000000000..cd6939147cb6 --- /dev/null +++ b/include/xen/interface/physdev.h | |||
@@ -0,0 +1,145 @@ | |||
1 | /* | ||
2 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
3 | * of this software and associated documentation files (the "Software"), to | ||
4 | * deal in the Software without restriction, including without limitation the | ||
5 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or | ||
6 | * sell copies of the Software, and to permit persons to whom the Software is | ||
7 | * furnished to do so, subject to the following conditions: | ||
8 | * | ||
9 | * The above copyright notice and this permission notice shall be included in | ||
10 | * all copies or substantial portions of the Software. | ||
11 | * | ||
12 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
13 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
15 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
16 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
17 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
18 | * DEALINGS IN THE SOFTWARE. | ||
19 | */ | ||
20 | |||
21 | #ifndef __XEN_PUBLIC_PHYSDEV_H__ | ||
22 | #define __XEN_PUBLIC_PHYSDEV_H__ | ||
23 | |||
24 | /* | ||
25 | * Prototype for this hypercall is: | ||
26 | * int physdev_op(int cmd, void *args) | ||
27 | * @cmd == PHYSDEVOP_??? (physdev operation). | ||
28 | * @args == Operation-specific extra arguments (NULL if none). | ||
29 | */ | ||
30 | |||
31 | /* | ||
32 | * Notify end-of-interrupt (EOI) for the specified IRQ. | ||
33 | * @arg == pointer to physdev_eoi structure. | ||
34 | */ | ||
35 | #define PHYSDEVOP_eoi 12 | ||
36 | struct physdev_eoi { | ||
37 | /* IN */ | ||
38 | uint32_t irq; | ||
39 | }; | ||
40 | |||
41 | /* | ||
42 | * Query the status of an IRQ line. | ||
43 | * @arg == pointer to physdev_irq_status_query structure. | ||
44 | */ | ||
45 | #define PHYSDEVOP_irq_status_query 5 | ||
46 | struct physdev_irq_status_query { | ||
47 | /* IN */ | ||
48 | uint32_t irq; | ||
49 | /* OUT */ | ||
50 | uint32_t flags; /* XENIRQSTAT_* */ | ||
51 | }; | ||
52 | |||
53 | /* Need to call PHYSDEVOP_eoi when the IRQ has been serviced? */ | ||
54 | #define _XENIRQSTAT_needs_eoi (0) | ||
55 | #define XENIRQSTAT_needs_eoi (1U<<_XENIRQSTAT_needs_eoi) | ||
56 | |||
57 | /* IRQ shared by multiple guests? */ | ||
58 | #define _XENIRQSTAT_shared (1) | ||
59 | #define XENIRQSTAT_shared (1U<<_XENIRQSTAT_shared) | ||
60 | |||
61 | /* | ||
62 | * Set the current VCPU's I/O privilege level. | ||
63 | * @arg == pointer to physdev_set_iopl structure. | ||
64 | */ | ||
65 | #define PHYSDEVOP_set_iopl 6 | ||
66 | struct physdev_set_iopl { | ||
67 | /* IN */ | ||
68 | uint32_t iopl; | ||
69 | }; | ||
70 | |||
71 | /* | ||
72 | * Set the current VCPU's I/O-port permissions bitmap. | ||
73 | * @arg == pointer to physdev_set_iobitmap structure. | ||
74 | */ | ||
75 | #define PHYSDEVOP_set_iobitmap 7 | ||
76 | struct physdev_set_iobitmap { | ||
77 | /* IN */ | ||
78 | uint8_t * bitmap; | ||
79 | uint32_t nr_ports; | ||
80 | }; | ||
81 | |||
82 | /* | ||
83 | * Read or write an IO-APIC register. | ||
84 | * @arg == pointer to physdev_apic structure. | ||
85 | */ | ||
86 | #define PHYSDEVOP_apic_read 8 | ||
87 | #define PHYSDEVOP_apic_write 9 | ||
88 | struct physdev_apic { | ||
89 | /* IN */ | ||
90 | unsigned long apic_physbase; | ||
91 | uint32_t reg; | ||
92 | /* IN or OUT */ | ||
93 | uint32_t value; | ||
94 | }; | ||
95 | |||
96 | /* | ||
97 | * Allocate or free a physical upcall vector for the specified IRQ line. | ||
98 | * @arg == pointer to physdev_irq structure. | ||
99 | */ | ||
100 | #define PHYSDEVOP_alloc_irq_vector 10 | ||
101 | #define PHYSDEVOP_free_irq_vector 11 | ||
102 | struct physdev_irq { | ||
103 | /* IN */ | ||
104 | uint32_t irq; | ||
105 | /* IN or OUT */ | ||
106 | uint32_t vector; | ||
107 | }; | ||
108 | |||
109 | /* | ||
110 | * Argument to physdev_op_compat() hypercall. Superceded by new physdev_op() | ||
111 | * hypercall since 0x00030202. | ||
112 | */ | ||
113 | struct physdev_op { | ||
114 | uint32_t cmd; | ||
115 | union { | ||
116 | struct physdev_irq_status_query irq_status_query; | ||
117 | struct physdev_set_iopl set_iopl; | ||
118 | struct physdev_set_iobitmap set_iobitmap; | ||
119 | struct physdev_apic apic_op; | ||
120 | struct physdev_irq irq_op; | ||
121 | } u; | ||
122 | }; | ||
123 | |||
124 | /* | ||
125 | * Notify that some PIRQ-bound event channels have been unmasked. | ||
126 | * ** This command is obsolete since interface version 0x00030202 and is ** | ||
127 | * ** unsupported by newer versions of Xen. ** | ||
128 | */ | ||
129 | #define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4 | ||
130 | |||
131 | /* | ||
132 | * These all-capitals physdev operation names are superceded by the new names | ||
133 | * (defined above) since interface version 0x00030202. | ||
134 | */ | ||
135 | #define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query | ||
136 | #define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl | ||
137 | #define PHYSDEVOP_SET_IOBITMAP PHYSDEVOP_set_iobitmap | ||
138 | #define PHYSDEVOP_APIC_READ PHYSDEVOP_apic_read | ||
139 | #define PHYSDEVOP_APIC_WRITE PHYSDEVOP_apic_write | ||
140 | #define PHYSDEVOP_ASSIGN_VECTOR PHYSDEVOP_alloc_irq_vector | ||
141 | #define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector | ||
142 | #define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi | ||
143 | #define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared | ||
144 | |||
145 | #endif /* __XEN_PUBLIC_PHYSDEV_H__ */ | ||
diff --git a/include/xen/interface/sched.h b/include/xen/interface/sched.h new file mode 100644 index 000000000000..5fec575a800a --- /dev/null +++ b/include/xen/interface/sched.h | |||
@@ -0,0 +1,77 @@ | |||
1 | /****************************************************************************** | ||
2 | * sched.h | ||
3 | * | ||
4 | * Scheduler state interactions | ||
5 | * | ||
6 | * Copyright (c) 2005, Keir Fraser <keir@xensource.com> | ||
7 | */ | ||
8 | |||
9 | #ifndef __XEN_PUBLIC_SCHED_H__ | ||
10 | #define __XEN_PUBLIC_SCHED_H__ | ||
11 | |||
12 | #include "event_channel.h" | ||
13 | |||
14 | /* | ||
15 | * The prototype for this hypercall is: | ||
16 | * long sched_op_new(int cmd, void *arg) | ||
17 | * @cmd == SCHEDOP_??? (scheduler operation). | ||
18 | * @arg == Operation-specific extra argument(s), as described below. | ||
19 | * | ||
20 | * **NOTE**: | ||
21 | * Versions of Xen prior to 3.0.2 provide only the following legacy version | ||
22 | * of this hypercall, supporting only the commands yield, block and shutdown: | ||
23 | * long sched_op(int cmd, unsigned long arg) | ||
24 | * @cmd == SCHEDOP_??? (scheduler operation). | ||
25 | * @arg == 0 (SCHEDOP_yield and SCHEDOP_block) | ||
26 | * == SHUTDOWN_* code (SCHEDOP_shutdown) | ||
27 | */ | ||
28 | |||
29 | /* | ||
30 | * Voluntarily yield the CPU. | ||
31 | * @arg == NULL. | ||
32 | */ | ||
33 | #define SCHEDOP_yield 0 | ||
34 | |||
35 | /* | ||
36 | * Block execution of this VCPU until an event is received for processing. | ||
37 | * If called with event upcalls masked, this operation will atomically | ||
38 | * reenable event delivery and check for pending events before blocking the | ||
39 | * VCPU. This avoids a "wakeup waiting" race. | ||
40 | * @arg == NULL. | ||
41 | */ | ||
42 | #define SCHEDOP_block 1 | ||
43 | |||
44 | /* | ||
45 | * Halt execution of this domain (all VCPUs) and notify the system controller. | ||
46 | * @arg == pointer to sched_shutdown structure. | ||
47 | */ | ||
48 | #define SCHEDOP_shutdown 2 | ||
49 | struct sched_shutdown { | ||
50 | unsigned int reason; /* SHUTDOWN_* */ | ||
51 | }; | ||
52 | DEFINE_GUEST_HANDLE_STRUCT(sched_shutdown); | ||
53 | |||
54 | /* | ||
55 | * Poll a set of event-channel ports. Return when one or more are pending. An | ||
56 | * optional timeout may be specified. | ||
57 | * @arg == pointer to sched_poll structure. | ||
58 | */ | ||
59 | #define SCHEDOP_poll 3 | ||
60 | struct sched_poll { | ||
61 | GUEST_HANDLE(evtchn_port_t) ports; | ||
62 | unsigned int nr_ports; | ||
63 | uint64_t timeout; | ||
64 | }; | ||
65 | DEFINE_GUEST_HANDLE_STRUCT(sched_poll); | ||
66 | |||
67 | /* | ||
68 | * Reason codes for SCHEDOP_shutdown. These may be interpreted by control | ||
69 | * software to determine the appropriate action. For the most part, Xen does | ||
70 | * not care about the shutdown code. | ||
71 | */ | ||
72 | #define SHUTDOWN_poweroff 0 /* Domain exited normally. Clean up and kill. */ | ||
73 | #define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */ | ||
74 | #define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */ | ||
75 | #define SHUTDOWN_crash 3 /* Tell controller we've crashed. */ | ||
76 | |||
77 | #endif /* __XEN_PUBLIC_SCHED_H__ */ | ||
diff --git a/include/xen/interface/vcpu.h b/include/xen/interface/vcpu.h new file mode 100644 index 000000000000..ff61ea365997 --- /dev/null +++ b/include/xen/interface/vcpu.h | |||
@@ -0,0 +1,167 @@ | |||
1 | /****************************************************************************** | ||
2 | * vcpu.h | ||
3 | * | ||
4 | * VCPU initialisation, query, and hotplug. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
7 | * of this software and associated documentation files (the "Software"), to | ||
8 | * deal in the Software without restriction, including without limitation the | ||
9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or | ||
10 | * sell copies of the Software, and to permit persons to whom the Software is | ||
11 | * furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
22 | * DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Copyright (c) 2005, Keir Fraser <keir@xensource.com> | ||
25 | */ | ||
26 | |||
27 | #ifndef __XEN_PUBLIC_VCPU_H__ | ||
28 | #define __XEN_PUBLIC_VCPU_H__ | ||
29 | |||
30 | /* | ||
31 | * Prototype for this hypercall is: | ||
32 | * int vcpu_op(int cmd, int vcpuid, void *extra_args) | ||
33 | * @cmd == VCPUOP_??? (VCPU operation). | ||
34 | * @vcpuid == VCPU to operate on. | ||
35 | * @extra_args == Operation-specific extra arguments (NULL if none). | ||
36 | */ | ||
37 | |||
38 | /* | ||
39 | * Initialise a VCPU. Each VCPU can be initialised only once. A | ||
40 | * newly-initialised VCPU will not run until it is brought up by VCPUOP_up. | ||
41 | * | ||
42 | * @extra_arg == pointer to vcpu_guest_context structure containing initial | ||
43 | * state for the VCPU. | ||
44 | */ | ||
45 | #define VCPUOP_initialise 0 | ||
46 | |||
47 | /* | ||
48 | * Bring up a VCPU. This makes the VCPU runnable. This operation will fail | ||
49 | * if the VCPU has not been initialised (VCPUOP_initialise). | ||
50 | */ | ||
51 | #define VCPUOP_up 1 | ||
52 | |||
53 | /* | ||
54 | * Bring down a VCPU (i.e., make it non-runnable). | ||
55 | * There are a few caveats that callers should observe: | ||
56 | * 1. This operation may return, and VCPU_is_up may return false, before the | ||
57 | * VCPU stops running (i.e., the command is asynchronous). It is a good | ||
58 | * idea to ensure that the VCPU has entered a non-critical loop before | ||
59 | * bringing it down. Alternatively, this operation is guaranteed | ||
60 | * synchronous if invoked by the VCPU itself. | ||
61 | * 2. After a VCPU is initialised, there is currently no way to drop all its | ||
62 | * references to domain memory. Even a VCPU that is down still holds | ||
63 | * memory references via its pagetable base pointer and GDT. It is good | ||
64 | * practise to move a VCPU onto an 'idle' or default page table, LDT and | ||
65 | * GDT before bringing it down. | ||
66 | */ | ||
67 | #define VCPUOP_down 2 | ||
68 | |||
69 | /* Returns 1 if the given VCPU is up. */ | ||
70 | #define VCPUOP_is_up 3 | ||
71 | |||
72 | /* | ||
73 | * Return information about the state and running time of a VCPU. | ||
74 | * @extra_arg == pointer to vcpu_runstate_info structure. | ||
75 | */ | ||
76 | #define VCPUOP_get_runstate_info 4 | ||
77 | struct vcpu_runstate_info { | ||
78 | /* VCPU's current state (RUNSTATE_*). */ | ||
79 | int state; | ||
80 | /* When was current state entered (system time, ns)? */ | ||
81 | uint64_t state_entry_time; | ||
82 | /* | ||
83 | * Time spent in each RUNSTATE_* (ns). The sum of these times is | ||
84 | * guaranteed not to drift from system time. | ||
85 | */ | ||
86 | uint64_t time[4]; | ||
87 | }; | ||
88 | |||
89 | /* VCPU is currently running on a physical CPU. */ | ||
90 | #define RUNSTATE_running 0 | ||
91 | |||
92 | /* VCPU is runnable, but not currently scheduled on any physical CPU. */ | ||
93 | #define RUNSTATE_runnable 1 | ||
94 | |||
95 | /* VCPU is blocked (a.k.a. idle). It is therefore not runnable. */ | ||
96 | #define RUNSTATE_blocked 2 | ||
97 | |||
98 | /* | ||
99 | * VCPU is not runnable, but it is not blocked. | ||
100 | * This is a 'catch all' state for things like hotplug and pauses by the | ||
101 | * system administrator (or for critical sections in the hypervisor). | ||
102 | * RUNSTATE_blocked dominates this state (it is the preferred state). | ||
103 | */ | ||
104 | #define RUNSTATE_offline 3 | ||
105 | |||
106 | /* | ||
107 | * Register a shared memory area from which the guest may obtain its own | ||
108 | * runstate information without needing to execute a hypercall. | ||
109 | * Notes: | ||
110 | * 1. The registered address may be virtual or physical, depending on the | ||
111 | * platform. The virtual address should be registered on x86 systems. | ||
112 | * 2. Only one shared area may be registered per VCPU. The shared area is | ||
113 | * updated by the hypervisor each time the VCPU is scheduled. Thus | ||
114 | * runstate.state will always be RUNSTATE_running and | ||
115 | * runstate.state_entry_time will indicate the system time at which the | ||
116 | * VCPU was last scheduled to run. | ||
117 | * @extra_arg == pointer to vcpu_register_runstate_memory_area structure. | ||
118 | */ | ||
119 | #define VCPUOP_register_runstate_memory_area 5 | ||
120 | struct vcpu_register_runstate_memory_area { | ||
121 | union { | ||
122 | struct vcpu_runstate_info *v; | ||
123 | uint64_t p; | ||
124 | } addr; | ||
125 | }; | ||
126 | |||
127 | /* | ||
128 | * Set or stop a VCPU's periodic timer. Every VCPU has one periodic timer | ||
129 | * which can be set via these commands. Periods smaller than one millisecond | ||
130 | * may not be supported. | ||
131 | */ | ||
132 | #define VCPUOP_set_periodic_timer 6 /* arg == vcpu_set_periodic_timer_t */ | ||
133 | #define VCPUOP_stop_periodic_timer 7 /* arg == NULL */ | ||
134 | struct vcpu_set_periodic_timer { | ||
135 | uint64_t period_ns; | ||
136 | }; | ||
137 | |||
138 | /* | ||
139 | * Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot | ||
140 | * timer which can be set via these commands. | ||
141 | */ | ||
142 | #define VCPUOP_set_singleshot_timer 8 /* arg == vcpu_set_singleshot_timer_t */ | ||
143 | #define VCPUOP_stop_singleshot_timer 9 /* arg == NULL */ | ||
144 | struct vcpu_set_singleshot_timer { | ||
145 | uint64_t timeout_abs_ns; | ||
146 | uint32_t flags; /* VCPU_SSHOTTMR_??? */ | ||
147 | }; | ||
148 | |||
149 | /* Flags to VCPUOP_set_singleshot_timer. */ | ||
150 | /* Require the timeout to be in the future (return -ETIME if it's passed). */ | ||
151 | #define _VCPU_SSHOTTMR_future (0) | ||
152 | #define VCPU_SSHOTTMR_future (1U << _VCPU_SSHOTTMR_future) | ||
153 | |||
154 | /* | ||
155 | * Register a memory location in the guest address space for the | ||
156 | * vcpu_info structure. This allows the guest to place the vcpu_info | ||
157 | * structure in a convenient place, such as in a per-cpu data area. | ||
158 | * The pointer need not be page aligned, but the structure must not | ||
159 | * cross a page boundary. | ||
160 | */ | ||
161 | #define VCPUOP_register_vcpu_info 10 /* arg == struct vcpu_info */ | ||
162 | struct vcpu_register_vcpu_info { | ||
163 | uint32_t mfn; /* mfn of page to place vcpu_info */ | ||
164 | uint32_t offset; /* offset within page */ | ||
165 | }; | ||
166 | |||
167 | #endif /* __XEN_PUBLIC_VCPU_H__ */ | ||
diff --git a/include/xen/interface/version.h b/include/xen/interface/version.h new file mode 100644 index 000000000000..453235e923f0 --- /dev/null +++ b/include/xen/interface/version.h | |||
@@ -0,0 +1,60 @@ | |||
1 | /****************************************************************************** | ||
2 | * version.h | ||
3 | * | ||
4 | * Xen version, type, and compile information. | ||
5 | * | ||
6 | * Copyright (c) 2005, Nguyen Anh Quynh <aquynh@gmail.com> | ||
7 | * Copyright (c) 2005, Keir Fraser <keir@xensource.com> | ||
8 | */ | ||
9 | |||
10 | #ifndef __XEN_PUBLIC_VERSION_H__ | ||
11 | #define __XEN_PUBLIC_VERSION_H__ | ||
12 | |||
13 | /* NB. All ops return zero on success, except XENVER_version. */ | ||
14 | |||
15 | /* arg == NULL; returns major:minor (16:16). */ | ||
16 | #define XENVER_version 0 | ||
17 | |||
18 | /* arg == xen_extraversion_t. */ | ||
19 | #define XENVER_extraversion 1 | ||
20 | struct xen_extraversion { | ||
21 | char extraversion[16]; | ||
22 | }; | ||
23 | #define XEN_EXTRAVERSION_LEN (sizeof(struct xen_extraversion)) | ||
24 | |||
25 | /* arg == xen_compile_info_t. */ | ||
26 | #define XENVER_compile_info 2 | ||
27 | struct xen_compile_info { | ||
28 | char compiler[64]; | ||
29 | char compile_by[16]; | ||
30 | char compile_domain[32]; | ||
31 | char compile_date[32]; | ||
32 | }; | ||
33 | |||
34 | #define XENVER_capabilities 3 | ||
35 | struct xen_capabilities_info { | ||
36 | char info[1024]; | ||
37 | }; | ||
38 | #define XEN_CAPABILITIES_INFO_LEN (sizeof(struct xen_capabilities_info)) | ||
39 | |||
40 | #define XENVER_changeset 4 | ||
41 | struct xen_changeset_info { | ||
42 | char info[64]; | ||
43 | }; | ||
44 | #define XEN_CHANGESET_INFO_LEN (sizeof(struct xen_changeset_info)) | ||
45 | |||
46 | #define XENVER_platform_parameters 5 | ||
47 | struct xen_platform_parameters { | ||
48 | unsigned long virt_start; | ||
49 | }; | ||
50 | |||
51 | #define XENVER_get_features 6 | ||
52 | struct xen_feature_info { | ||
53 | unsigned int submap_idx; /* IN: which 32-bit submap to return */ | ||
54 | uint32_t submap; /* OUT: 32-bit submap */ | ||
55 | }; | ||
56 | |||
57 | /* Declares the features reported by XENVER_get_features. */ | ||
58 | #include "features.h" | ||
59 | |||
60 | #endif /* __XEN_PUBLIC_VERSION_H__ */ | ||
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h new file mode 100644 index 000000000000..518a5bf79ed3 --- /dev/null +++ b/include/xen/interface/xen.h | |||
@@ -0,0 +1,447 @@ | |||
1 | /****************************************************************************** | ||
2 | * xen.h | ||
3 | * | ||
4 | * Guest OS interface to Xen. | ||
5 | * | ||
6 | * Copyright (c) 2004, K A Fraser | ||
7 | */ | ||
8 | |||
9 | #ifndef __XEN_PUBLIC_XEN_H__ | ||
10 | #define __XEN_PUBLIC_XEN_H__ | ||
11 | |||
12 | #include <asm/xen/interface.h> | ||
13 | |||
14 | /* | ||
15 | * XEN "SYSTEM CALLS" (a.k.a. HYPERCALLS). | ||
16 | */ | ||
17 | |||
18 | /* | ||
19 | * x86_32: EAX = vector; EBX, ECX, EDX, ESI, EDI = args 1, 2, 3, 4, 5. | ||
20 | * EAX = return value | ||
21 | * (argument registers may be clobbered on return) | ||
22 | * x86_64: RAX = vector; RDI, RSI, RDX, R10, R8, R9 = args 1, 2, 3, 4, 5, 6. | ||
23 | * RAX = return value | ||
24 | * (argument registers not clobbered on return; RCX, R11 are) | ||
25 | */ | ||
26 | #define __HYPERVISOR_set_trap_table 0 | ||
27 | #define __HYPERVISOR_mmu_update 1 | ||
28 | #define __HYPERVISOR_set_gdt 2 | ||
29 | #define __HYPERVISOR_stack_switch 3 | ||
30 | #define __HYPERVISOR_set_callbacks 4 | ||
31 | #define __HYPERVISOR_fpu_taskswitch 5 | ||
32 | #define __HYPERVISOR_sched_op 6 | ||
33 | #define __HYPERVISOR_dom0_op 7 | ||
34 | #define __HYPERVISOR_set_debugreg 8 | ||
35 | #define __HYPERVISOR_get_debugreg 9 | ||
36 | #define __HYPERVISOR_update_descriptor 10 | ||
37 | #define __HYPERVISOR_memory_op 12 | ||
38 | #define __HYPERVISOR_multicall 13 | ||
39 | #define __HYPERVISOR_update_va_mapping 14 | ||
40 | #define __HYPERVISOR_set_timer_op 15 | ||
41 | #define __HYPERVISOR_event_channel_op_compat 16 | ||
42 | #define __HYPERVISOR_xen_version 17 | ||
43 | #define __HYPERVISOR_console_io 18 | ||
44 | #define __HYPERVISOR_physdev_op_compat 19 | ||
45 | #define __HYPERVISOR_grant_table_op 20 | ||
46 | #define __HYPERVISOR_vm_assist 21 | ||
47 | #define __HYPERVISOR_update_va_mapping_otherdomain 22 | ||
48 | #define __HYPERVISOR_iret 23 /* x86 only */ | ||
49 | #define __HYPERVISOR_vcpu_op 24 | ||
50 | #define __HYPERVISOR_set_segment_base 25 /* x86/64 only */ | ||
51 | #define __HYPERVISOR_mmuext_op 26 | ||
52 | #define __HYPERVISOR_acm_op 27 | ||
53 | #define __HYPERVISOR_nmi_op 28 | ||
54 | #define __HYPERVISOR_sched_op_new 29 | ||
55 | #define __HYPERVISOR_callback_op 30 | ||
56 | #define __HYPERVISOR_xenoprof_op 31 | ||
57 | #define __HYPERVISOR_event_channel_op 32 | ||
58 | #define __HYPERVISOR_physdev_op 33 | ||
59 | #define __HYPERVISOR_hvm_op 34 | ||
60 | |||
61 | /* | ||
62 | * VIRTUAL INTERRUPTS | ||
63 | * | ||
64 | * Virtual interrupts that a guest OS may receive from Xen. | ||
65 | */ | ||
66 | #define VIRQ_TIMER 0 /* Timebase update, and/or requested timeout. */ | ||
67 | #define VIRQ_DEBUG 1 /* Request guest to dump debug info. */ | ||
68 | #define VIRQ_CONSOLE 2 /* (DOM0) Bytes received on emergency console. */ | ||
69 | #define VIRQ_DOM_EXC 3 /* (DOM0) Exceptional event for some domain. */ | ||
70 | #define VIRQ_DEBUGGER 6 /* (DOM0) A domain has paused for debugging. */ | ||
71 | #define NR_VIRQS 8 | ||
72 | |||
73 | /* | ||
74 | * MMU-UPDATE REQUESTS | ||
75 | * | ||
76 | * HYPERVISOR_mmu_update() accepts a list of (ptr, val) pairs. | ||
77 | * A foreigndom (FD) can be specified (or DOMID_SELF for none). | ||
78 | * Where the FD has some effect, it is described below. | ||
79 | * ptr[1:0] specifies the appropriate MMU_* command. | ||
80 | * | ||
81 | * ptr[1:0] == MMU_NORMAL_PT_UPDATE: | ||
82 | * Updates an entry in a page table. If updating an L1 table, and the new | ||
83 | * table entry is valid/present, the mapped frame must belong to the FD, if | ||
84 | * an FD has been specified. If attempting to map an I/O page then the | ||
85 | * caller assumes the privilege of the FD. | ||
86 | * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller. | ||
87 | * FD == DOMID_XEN: Map restricted areas of Xen's heap space. | ||
88 | * ptr[:2] -- Machine address of the page-table entry to modify. | ||
89 | * val -- Value to write. | ||
90 | * | ||
91 | * ptr[1:0] == MMU_MACHPHYS_UPDATE: | ||
92 | * Updates an entry in the machine->pseudo-physical mapping table. | ||
93 | * ptr[:2] -- Machine address within the frame whose mapping to modify. | ||
94 | * The frame must belong to the FD, if one is specified. | ||
95 | * val -- Value to write into the mapping entry. | ||
96 | */ | ||
97 | #define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */ | ||
98 | #define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */ | ||
99 | |||
100 | /* | ||
101 | * MMU EXTENDED OPERATIONS | ||
102 | * | ||
103 | * HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures. | ||
104 | * A foreigndom (FD) can be specified (or DOMID_SELF for none). | ||
105 | * Where the FD has some effect, it is described below. | ||
106 | * | ||
107 | * cmd: MMUEXT_(UN)PIN_*_TABLE | ||
108 | * mfn: Machine frame number to be (un)pinned as a p.t. page. | ||
109 | * The frame must belong to the FD, if one is specified. | ||
110 | * | ||
111 | * cmd: MMUEXT_NEW_BASEPTR | ||
112 | * mfn: Machine frame number of new page-table base to install in MMU. | ||
113 | * | ||
114 | * cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only] | ||
115 | * mfn: Machine frame number of new page-table base to install in MMU | ||
116 | * when in user space. | ||
117 | * | ||
118 | * cmd: MMUEXT_TLB_FLUSH_LOCAL | ||
119 | * No additional arguments. Flushes local TLB. | ||
120 | * | ||
121 | * cmd: MMUEXT_INVLPG_LOCAL | ||
122 | * linear_addr: Linear address to be flushed from the local TLB. | ||
123 | * | ||
124 | * cmd: MMUEXT_TLB_FLUSH_MULTI | ||
125 | * vcpumask: Pointer to bitmap of VCPUs to be flushed. | ||
126 | * | ||
127 | * cmd: MMUEXT_INVLPG_MULTI | ||
128 | * linear_addr: Linear address to be flushed. | ||
129 | * vcpumask: Pointer to bitmap of VCPUs to be flushed. | ||
130 | * | ||
131 | * cmd: MMUEXT_TLB_FLUSH_ALL | ||
132 | * No additional arguments. Flushes all VCPUs' TLBs. | ||
133 | * | ||
134 | * cmd: MMUEXT_INVLPG_ALL | ||
135 | * linear_addr: Linear address to be flushed from all VCPUs' TLBs. | ||
136 | * | ||
137 | * cmd: MMUEXT_FLUSH_CACHE | ||
138 | * No additional arguments. Writes back and flushes cache contents. | ||
139 | * | ||
140 | * cmd: MMUEXT_SET_LDT | ||
141 | * linear_addr: Linear address of LDT base (NB. must be page-aligned). | ||
142 | * nr_ents: Number of entries in LDT. | ||
143 | */ | ||
144 | #define MMUEXT_PIN_L1_TABLE 0 | ||
145 | #define MMUEXT_PIN_L2_TABLE 1 | ||
146 | #define MMUEXT_PIN_L3_TABLE 2 | ||
147 | #define MMUEXT_PIN_L4_TABLE 3 | ||
148 | #define MMUEXT_UNPIN_TABLE 4 | ||
149 | #define MMUEXT_NEW_BASEPTR 5 | ||
150 | #define MMUEXT_TLB_FLUSH_LOCAL 6 | ||
151 | #define MMUEXT_INVLPG_LOCAL 7 | ||
152 | #define MMUEXT_TLB_FLUSH_MULTI 8 | ||
153 | #define MMUEXT_INVLPG_MULTI 9 | ||
154 | #define MMUEXT_TLB_FLUSH_ALL 10 | ||
155 | #define MMUEXT_INVLPG_ALL 11 | ||
156 | #define MMUEXT_FLUSH_CACHE 12 | ||
157 | #define MMUEXT_SET_LDT 13 | ||
158 | #define MMUEXT_NEW_USER_BASEPTR 15 | ||
159 | |||
160 | #ifndef __ASSEMBLY__ | ||
161 | struct mmuext_op { | ||
162 | unsigned int cmd; | ||
163 | union { | ||
164 | /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */ | ||
165 | unsigned long mfn; | ||
166 | /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */ | ||
167 | unsigned long linear_addr; | ||
168 | } arg1; | ||
169 | union { | ||
170 | /* SET_LDT */ | ||
171 | unsigned int nr_ents; | ||
172 | /* TLB_FLUSH_MULTI, INVLPG_MULTI */ | ||
173 | void *vcpumask; | ||
174 | } arg2; | ||
175 | }; | ||
176 | DEFINE_GUEST_HANDLE_STRUCT(mmuext_op); | ||
177 | #endif | ||
178 | |||
179 | /* These are passed as 'flags' to update_va_mapping. They can be ORed. */ | ||
180 | /* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap. */ | ||
181 | /* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer. */ | ||
182 | #define UVMF_NONE (0UL<<0) /* No flushing at all. */ | ||
183 | #define UVMF_TLB_FLUSH (1UL<<0) /* Flush entire TLB(s). */ | ||
184 | #define UVMF_INVLPG (2UL<<0) /* Flush only one entry. */ | ||
185 | #define UVMF_FLUSHTYPE_MASK (3UL<<0) | ||
186 | #define UVMF_MULTI (0UL<<2) /* Flush subset of TLBs. */ | ||
187 | #define UVMF_LOCAL (0UL<<2) /* Flush local TLB. */ | ||
188 | #define UVMF_ALL (1UL<<2) /* Flush all TLBs. */ | ||
189 | |||
190 | /* | ||
191 | * Commands to HYPERVISOR_console_io(). | ||
192 | */ | ||
193 | #define CONSOLEIO_write 0 | ||
194 | #define CONSOLEIO_read 1 | ||
195 | |||
196 | /* | ||
197 | * Commands to HYPERVISOR_vm_assist(). | ||
198 | */ | ||
199 | #define VMASST_CMD_enable 0 | ||
200 | #define VMASST_CMD_disable 1 | ||
201 | #define VMASST_TYPE_4gb_segments 0 | ||
202 | #define VMASST_TYPE_4gb_segments_notify 1 | ||
203 | #define VMASST_TYPE_writable_pagetables 2 | ||
204 | #define VMASST_TYPE_pae_extended_cr3 3 | ||
205 | #define MAX_VMASST_TYPE 3 | ||
206 | |||
207 | #ifndef __ASSEMBLY__ | ||
208 | |||
209 | typedef uint16_t domid_t; | ||
210 | |||
211 | /* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */ | ||
212 | #define DOMID_FIRST_RESERVED (0x7FF0U) | ||
213 | |||
214 | /* DOMID_SELF is used in certain contexts to refer to oneself. */ | ||
215 | #define DOMID_SELF (0x7FF0U) | ||
216 | |||
217 | /* | ||
218 | * DOMID_IO is used to restrict page-table updates to mapping I/O memory. | ||
219 | * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO | ||
220 | * is useful to ensure that no mappings to the OS's own heap are accidentally | ||
221 | * installed. (e.g., in Linux this could cause havoc as reference counts | ||
222 | * aren't adjusted on the I/O-mapping code path). | ||
223 | * This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can | ||
224 | * be specified by any calling domain. | ||
225 | */ | ||
226 | #define DOMID_IO (0x7FF1U) | ||
227 | |||
228 | /* | ||
229 | * DOMID_XEN is used to allow privileged domains to map restricted parts of | ||
230 | * Xen's heap space (e.g., the machine_to_phys table). | ||
231 | * This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if | ||
232 | * the caller is privileged. | ||
233 | */ | ||
234 | #define DOMID_XEN (0x7FF2U) | ||
235 | |||
236 | /* | ||
237 | * Send an array of these to HYPERVISOR_mmu_update(). | ||
238 | * NB. The fields are natural pointer/address size for this architecture. | ||
239 | */ | ||
240 | struct mmu_update { | ||
241 | uint64_t ptr; /* Machine address of PTE. */ | ||
242 | uint64_t val; /* New contents of PTE. */ | ||
243 | }; | ||
244 | DEFINE_GUEST_HANDLE_STRUCT(mmu_update); | ||
245 | |||
246 | /* | ||
247 | * Send an array of these to HYPERVISOR_multicall(). | ||
248 | * NB. The fields are natural register size for this architecture. | ||
249 | */ | ||
250 | struct multicall_entry { | ||
251 | unsigned long op; | ||
252 | long result; | ||
253 | unsigned long args[6]; | ||
254 | }; | ||
255 | DEFINE_GUEST_HANDLE_STRUCT(multicall_entry); | ||
256 | |||
257 | /* | ||
258 | * Event channel endpoints per domain: | ||
259 | * 1024 if a long is 32 bits; 4096 if a long is 64 bits. | ||
260 | */ | ||
261 | #define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64) | ||
262 | |||
263 | struct vcpu_time_info { | ||
264 | /* | ||
265 | * Updates to the following values are preceded and followed | ||
266 | * by an increment of 'version'. The guest can therefore | ||
267 | * detect updates by looking for changes to 'version'. If the | ||
268 | * least-significant bit of the version number is set then an | ||
269 | * update is in progress and the guest must wait to read a | ||
270 | * consistent set of values. The correct way to interact with | ||
271 | * the version number is similar to Linux's seqlock: see the | ||
272 | * implementations of read_seqbegin/read_seqretry. | ||
273 | */ | ||
274 | uint32_t version; | ||
275 | uint32_t pad0; | ||
276 | uint64_t tsc_timestamp; /* TSC at last update of time vals. */ | ||
277 | uint64_t system_time; /* Time, in nanosecs, since boot. */ | ||
278 | /* | ||
279 | * Current system time: | ||
280 | * system_time + ((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul | ||
281 | * CPU frequency (Hz): | ||
282 | * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift | ||
283 | */ | ||
284 | uint32_t tsc_to_system_mul; | ||
285 | int8_t tsc_shift; | ||
286 | int8_t pad1[3]; | ||
287 | }; /* 32 bytes */ | ||
288 | |||
289 | struct vcpu_info { | ||
290 | /* | ||
291 | * 'evtchn_upcall_pending' is written non-zero by Xen to indicate | ||
292 | * a pending notification for a particular VCPU. It is then cleared | ||
293 | * by the guest OS /before/ checking for pending work, thus avoiding | ||
294 | * a set-and-check race. Note that the mask is only accessed by Xen | ||
295 | * on the CPU that is currently hosting the VCPU. This means that the | ||
296 | * pending and mask flags can be updated by the guest without special | ||
297 | * synchronisation (i.e., no need for the x86 LOCK prefix). | ||
298 | * This may seem suboptimal because if the pending flag is set by | ||
299 | * a different CPU then an IPI may be scheduled even when the mask | ||
300 | * is set. However, note: | ||
301 | * 1. The task of 'interrupt holdoff' is covered by the per-event- | ||
302 | * channel mask bits. A 'noisy' event that is continually being | ||
303 | * triggered can be masked at source at this very precise | ||
304 | * granularity. | ||
305 | * 2. The main purpose of the per-VCPU mask is therefore to restrict | ||
306 | * reentrant execution: whether for concurrency control, or to | ||
307 | * prevent unbounded stack usage. Whatever the purpose, we expect | ||
308 | * that the mask will be asserted only for short periods at a time, | ||
309 | * and so the likelihood of a 'spurious' IPI is suitably small. | ||
310 | * The mask is read before making an event upcall to the guest: a | ||
311 | * non-zero mask therefore guarantees that the VCPU will not receive | ||
312 | * an upcall activation. The mask is cleared when the VCPU requests | ||
313 | * to block: this avoids wakeup-waiting races. | ||
314 | */ | ||
315 | uint8_t evtchn_upcall_pending; | ||
316 | uint8_t evtchn_upcall_mask; | ||
317 | unsigned long evtchn_pending_sel; | ||
318 | struct arch_vcpu_info arch; | ||
319 | struct vcpu_time_info time; | ||
320 | }; /* 64 bytes (x86) */ | ||
321 | |||
322 | /* | ||
323 | * Xen/kernel shared data -- pointer provided in start_info. | ||
324 | * NB. We expect that this struct is smaller than a page. | ||
325 | */ | ||
326 | struct shared_info { | ||
327 | struct vcpu_info vcpu_info[MAX_VIRT_CPUS]; | ||
328 | |||
329 | /* | ||
330 | * A domain can create "event channels" on which it can send and receive | ||
331 | * asynchronous event notifications. There are three classes of event that | ||
332 | * are delivered by this mechanism: | ||
333 | * 1. Bi-directional inter- and intra-domain connections. Domains must | ||
334 | * arrange out-of-band to set up a connection (usually by allocating | ||
335 | * an unbound 'listener' port and avertising that via a storage service | ||
336 | * such as xenstore). | ||
337 | * 2. Physical interrupts. A domain with suitable hardware-access | ||
338 | * privileges can bind an event-channel port to a physical interrupt | ||
339 | * source. | ||
340 | * 3. Virtual interrupts ('events'). A domain can bind an event-channel | ||
341 | * port to a virtual interrupt source, such as the virtual-timer | ||
342 | * device or the emergency console. | ||
343 | * | ||
344 | * Event channels are addressed by a "port index". Each channel is | ||
345 | * associated with two bits of information: | ||
346 | * 1. PENDING -- notifies the domain that there is a pending notification | ||
347 | * to be processed. This bit is cleared by the guest. | ||
348 | * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING | ||
349 | * will cause an asynchronous upcall to be scheduled. This bit is only | ||
350 | * updated by the guest. It is read-only within Xen. If a channel | ||
351 | * becomes pending while the channel is masked then the 'edge' is lost | ||
352 | * (i.e., when the channel is unmasked, the guest must manually handle | ||
353 | * pending notifications as no upcall will be scheduled by Xen). | ||
354 | * | ||
355 | * To expedite scanning of pending notifications, any 0->1 pending | ||
356 | * transition on an unmasked channel causes a corresponding bit in a | ||
357 | * per-vcpu selector word to be set. Each bit in the selector covers a | ||
358 | * 'C long' in the PENDING bitfield array. | ||
359 | */ | ||
360 | unsigned long evtchn_pending[sizeof(unsigned long) * 8]; | ||
361 | unsigned long evtchn_mask[sizeof(unsigned long) * 8]; | ||
362 | |||
363 | /* | ||
364 | * Wallclock time: updated only by control software. Guests should base | ||
365 | * their gettimeofday() syscall on this wallclock-base value. | ||
366 | */ | ||
367 | uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */ | ||
368 | uint32_t wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */ | ||
369 | uint32_t wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */ | ||
370 | |||
371 | struct arch_shared_info arch; | ||
372 | |||
373 | }; | ||
374 | |||
375 | /* | ||
376 | * Start-of-day memory layout for the initial domain (DOM0): | ||
377 | * 1. The domain is started within contiguous virtual-memory region. | ||
378 | * 2. The contiguous region begins and ends on an aligned 4MB boundary. | ||
379 | * 3. The region start corresponds to the load address of the OS image. | ||
380 | * If the load address is not 4MB aligned then the address is rounded down. | ||
381 | * 4. This the order of bootstrap elements in the initial virtual region: | ||
382 | * a. relocated kernel image | ||
383 | * b. initial ram disk [mod_start, mod_len] | ||
384 | * c. list of allocated page frames [mfn_list, nr_pages] | ||
385 | * d. start_info_t structure [register ESI (x86)] | ||
386 | * e. bootstrap page tables [pt_base, CR3 (x86)] | ||
387 | * f. bootstrap stack [register ESP (x86)] | ||
388 | * 5. Bootstrap elements are packed together, but each is 4kB-aligned. | ||
389 | * 6. The initial ram disk may be omitted. | ||
390 | * 7. The list of page frames forms a contiguous 'pseudo-physical' memory | ||
391 | * layout for the domain. In particular, the bootstrap virtual-memory | ||
392 | * region is a 1:1 mapping to the first section of the pseudo-physical map. | ||
393 | * 8. All bootstrap elements are mapped read-writable for the guest OS. The | ||
394 | * only exception is the bootstrap page table, which is mapped read-only. | ||
395 | * 9. There is guaranteed to be at least 512kB padding after the final | ||
396 | * bootstrap element. If necessary, the bootstrap virtual region is | ||
397 | * extended by an extra 4MB to ensure this. | ||
398 | */ | ||
399 | |||
400 | #define MAX_GUEST_CMDLINE 1024 | ||
401 | struct start_info { | ||
402 | /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */ | ||
403 | char magic[32]; /* "xen-<version>-<platform>". */ | ||
404 | unsigned long nr_pages; /* Total pages allocated to this domain. */ | ||
405 | unsigned long shared_info; /* MACHINE address of shared info struct. */ | ||
406 | uint32_t flags; /* SIF_xxx flags. */ | ||
407 | unsigned long store_mfn; /* MACHINE page number of shared page. */ | ||
408 | uint32_t store_evtchn; /* Event channel for store communication. */ | ||
409 | union { | ||
410 | struct { | ||
411 | unsigned long mfn; /* MACHINE page number of console page. */ | ||
412 | uint32_t evtchn; /* Event channel for console page. */ | ||
413 | } domU; | ||
414 | struct { | ||
415 | uint32_t info_off; /* Offset of console_info struct. */ | ||
416 | uint32_t info_size; /* Size of console_info struct from start.*/ | ||
417 | } dom0; | ||
418 | } console; | ||
419 | /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */ | ||
420 | unsigned long pt_base; /* VIRTUAL address of page directory. */ | ||
421 | unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */ | ||
422 | unsigned long mfn_list; /* VIRTUAL address of page-frame list. */ | ||
423 | unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */ | ||
424 | unsigned long mod_len; /* Size (bytes) of pre-loaded module. */ | ||
425 | int8_t cmd_line[MAX_GUEST_CMDLINE]; | ||
426 | }; | ||
427 | |||
428 | /* These flags are passed in the 'flags' field of start_info_t. */ | ||
429 | #define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */ | ||
430 | #define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */ | ||
431 | |||
432 | typedef uint64_t cpumap_t; | ||
433 | |||
434 | typedef uint8_t xen_domain_handle_t[16]; | ||
435 | |||
436 | /* Turn a plain number into a C unsigned long constant. */ | ||
437 | #define __mk_unsigned_long(x) x ## UL | ||
438 | #define mk_unsigned_long(x) __mk_unsigned_long(x) | ||
439 | |||
440 | #else /* __ASSEMBLY__ */ | ||
441 | |||
442 | /* In assembly code we cannot use C numeric constant suffixes. */ | ||
443 | #define mk_unsigned_long(x) x | ||
444 | |||
445 | #endif /* !__ASSEMBLY__ */ | ||
446 | |||
447 | #endif /* __XEN_PUBLIC_XEN_H__ */ | ||
diff --git a/include/xen/page.h b/include/xen/page.h new file mode 100644 index 000000000000..1df6c1930578 --- /dev/null +++ b/include/xen/page.h | |||
@@ -0,0 +1,179 @@ | |||
1 | #ifndef __XEN_PAGE_H | ||
2 | #define __XEN_PAGE_H | ||
3 | |||
4 | #include <linux/pfn.h> | ||
5 | |||
6 | #include <asm/uaccess.h> | ||
7 | |||
8 | #include <xen/features.h> | ||
9 | |||
10 | #ifdef CONFIG_X86_PAE | ||
11 | /* Xen machine address */ | ||
12 | typedef struct xmaddr { | ||
13 | unsigned long long maddr; | ||
14 | } xmaddr_t; | ||
15 | |||
16 | /* Xen pseudo-physical address */ | ||
17 | typedef struct xpaddr { | ||
18 | unsigned long long paddr; | ||
19 | } xpaddr_t; | ||
20 | #else | ||
21 | /* Xen machine address */ | ||
22 | typedef struct xmaddr { | ||
23 | unsigned long maddr; | ||
24 | } xmaddr_t; | ||
25 | |||
26 | /* Xen pseudo-physical address */ | ||
27 | typedef struct xpaddr { | ||
28 | unsigned long paddr; | ||
29 | } xpaddr_t; | ||
30 | #endif | ||
31 | |||
32 | #define XMADDR(x) ((xmaddr_t) { .maddr = (x) }) | ||
33 | #define XPADDR(x) ((xpaddr_t) { .paddr = (x) }) | ||
34 | |||
35 | /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ | ||
36 | #define INVALID_P2M_ENTRY (~0UL) | ||
37 | #define FOREIGN_FRAME_BIT (1UL<<31) | ||
38 | #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) | ||
39 | |||
40 | extern unsigned long *phys_to_machine_mapping; | ||
41 | |||
42 | static inline unsigned long pfn_to_mfn(unsigned long pfn) | ||
43 | { | ||
44 | if (xen_feature(XENFEAT_auto_translated_physmap)) | ||
45 | return pfn; | ||
46 | |||
47 | return phys_to_machine_mapping[(unsigned int)(pfn)] & | ||
48 | ~FOREIGN_FRAME_BIT; | ||
49 | } | ||
50 | |||
51 | static inline int phys_to_machine_mapping_valid(unsigned long pfn) | ||
52 | { | ||
53 | if (xen_feature(XENFEAT_auto_translated_physmap)) | ||
54 | return 1; | ||
55 | |||
56 | return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY); | ||
57 | } | ||
58 | |||
59 | static inline unsigned long mfn_to_pfn(unsigned long mfn) | ||
60 | { | ||
61 | unsigned long pfn; | ||
62 | |||
63 | if (xen_feature(XENFEAT_auto_translated_physmap)) | ||
64 | return mfn; | ||
65 | |||
66 | #if 0 | ||
67 | if (unlikely((mfn >> machine_to_phys_order) != 0)) | ||
68 | return max_mapnr; | ||
69 | #endif | ||
70 | |||
71 | pfn = 0; | ||
72 | /* | ||
73 | * The array access can fail (e.g., device space beyond end of RAM). | ||
74 | * In such cases it doesn't matter what we return (we return garbage), | ||
75 | * but we must handle the fault without crashing! | ||
76 | */ | ||
77 | __get_user(pfn, &machine_to_phys_mapping[mfn]); | ||
78 | |||
79 | return pfn; | ||
80 | } | ||
81 | |||
82 | static inline xmaddr_t phys_to_machine(xpaddr_t phys) | ||
83 | { | ||
84 | unsigned offset = phys.paddr & ~PAGE_MASK; | ||
85 | return XMADDR(PFN_PHYS((u64)pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset); | ||
86 | } | ||
87 | |||
88 | static inline xpaddr_t machine_to_phys(xmaddr_t machine) | ||
89 | { | ||
90 | unsigned offset = machine.maddr & ~PAGE_MASK; | ||
91 | return XPADDR(PFN_PHYS((u64)mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset); | ||
92 | } | ||
93 | |||
94 | /* | ||
95 | * We detect special mappings in one of two ways: | ||
96 | * 1. If the MFN is an I/O page then Xen will set the m2p entry | ||
97 | * to be outside our maximum possible pseudophys range. | ||
98 | * 2. If the MFN belongs to a different domain then we will certainly | ||
99 | * not have MFN in our p2m table. Conversely, if the page is ours, | ||
100 | * then we'll have p2m(m2p(MFN))==MFN. | ||
101 | * If we detect a special mapping then it doesn't have a 'struct page'. | ||
102 | * We force !pfn_valid() by returning an out-of-range pointer. | ||
103 | * | ||
104 | * NB. These checks require that, for any MFN that is not in our reservation, | ||
105 | * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if | ||
106 | * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN. | ||
107 | * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety. | ||
108 | * | ||
109 | * NB2. When deliberately mapping foreign pages into the p2m table, you *must* | ||
110 | * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we | ||
111 | * require. In all the cases we care about, the FOREIGN_FRAME bit is | ||
112 | * masked (e.g., pfn_to_mfn()) so behaviour there is correct. | ||
113 | */ | ||
114 | static inline unsigned long mfn_to_local_pfn(unsigned long mfn) | ||
115 | { | ||
116 | extern unsigned long max_mapnr; | ||
117 | unsigned long pfn = mfn_to_pfn(mfn); | ||
118 | if ((pfn < max_mapnr) | ||
119 | && !xen_feature(XENFEAT_auto_translated_physmap) | ||
120 | && (phys_to_machine_mapping[pfn] != mfn)) | ||
121 | return max_mapnr; /* force !pfn_valid() */ | ||
122 | return pfn; | ||
123 | } | ||
124 | |||
125 | static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) | ||
126 | { | ||
127 | if (xen_feature(XENFEAT_auto_translated_physmap)) { | ||
128 | BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); | ||
129 | return; | ||
130 | } | ||
131 | phys_to_machine_mapping[pfn] = mfn; | ||
132 | } | ||
133 | |||
134 | /* VIRT <-> MACHINE conversion */ | ||
135 | #define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v)))) | ||
136 | #define virt_to_mfn(v) (pfn_to_mfn(PFN_DOWN(__pa(v)))) | ||
137 | #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) | ||
138 | |||
139 | #ifdef CONFIG_X86_PAE | ||
140 | #define pte_mfn(_pte) (((_pte).pte_low >> PAGE_SHIFT) | \ | ||
141 | (((_pte).pte_high & 0xfff) << (32-PAGE_SHIFT))) | ||
142 | |||
143 | static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot) | ||
144 | { | ||
145 | pte_t pte; | ||
146 | |||
147 | pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) | | ||
148 | (pgprot_val(pgprot) >> 32); | ||
149 | pte.pte_high &= (__supported_pte_mask >> 32); | ||
150 | pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot)); | ||
151 | pte.pte_low &= __supported_pte_mask; | ||
152 | |||
153 | return pte; | ||
154 | } | ||
155 | |||
156 | static inline unsigned long long pte_val_ma(pte_t x) | ||
157 | { | ||
158 | return ((unsigned long long)x.pte_high << 32) | x.pte_low; | ||
159 | } | ||
160 | #define pmd_val_ma(v) ((v).pmd) | ||
161 | #define pud_val_ma(v) ((v).pgd.pgd) | ||
162 | #define __pte_ma(x) ((pte_t) { .pte_low = (x), .pte_high = (x)>>32 } ) | ||
163 | #define __pmd_ma(x) ((pmd_t) { (x) } ) | ||
164 | #else /* !X86_PAE */ | ||
165 | #define pte_mfn(_pte) ((_pte).pte_low >> PAGE_SHIFT) | ||
166 | #define mfn_pte(pfn, prot) __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | ||
167 | #define pte_val_ma(x) ((x).pte_low) | ||
168 | #define pmd_val_ma(v) ((v).pud.pgd.pgd) | ||
169 | #define __pte_ma(x) ((pte_t) { (x) } ) | ||
170 | #endif /* CONFIG_X86_PAE */ | ||
171 | |||
172 | #define pgd_val_ma(x) ((x).pgd) | ||
173 | |||
174 | |||
175 | xmaddr_t arbitrary_virt_to_machine(unsigned long address); | ||
176 | void make_lowmem_page_readonly(void *vaddr); | ||
177 | void make_lowmem_page_readwrite(void *vaddr); | ||
178 | |||
179 | #endif /* __XEN_PAGE_H */ | ||
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h new file mode 100644 index 000000000000..6f7c290651ae --- /dev/null +++ b/include/xen/xenbus.h | |||
@@ -0,0 +1,234 @@ | |||
1 | /****************************************************************************** | ||
2 | * xenbus.h | ||
3 | * | ||
4 | * Talks to Xen Store to figure out what devices we have. | ||
5 | * | ||
6 | * Copyright (C) 2005 Rusty Russell, IBM Corporation | ||
7 | * Copyright (C) 2005 XenSource Ltd. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License version 2 | ||
11 | * as published by the Free Software Foundation; or, when distributed | ||
12 | * separately from the Linux kernel or incorporated into other | ||
13 | * software packages, subject to the following license: | ||
14 | * | ||
15 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
16 | * of this source file (the "Software"), to deal in the Software without | ||
17 | * restriction, including without limitation the rights to use, copy, modify, | ||
18 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | ||
19 | * and to permit persons to whom the Software is furnished to do so, subject to | ||
20 | * the following conditions: | ||
21 | * | ||
22 | * The above copyright notice and this permission notice shall be included in | ||
23 | * all copies or substantial portions of the Software. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
26 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
27 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
28 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
29 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
30 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
31 | * IN THE SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | #ifndef _XEN_XENBUS_H | ||
35 | #define _XEN_XENBUS_H | ||
36 | |||
37 | #include <linux/device.h> | ||
38 | #include <linux/notifier.h> | ||
39 | #include <linux/mutex.h> | ||
40 | #include <linux/completion.h> | ||
41 | #include <linux/init.h> | ||
42 | #include <xen/interface/xen.h> | ||
43 | #include <xen/interface/grant_table.h> | ||
44 | #include <xen/interface/io/xenbus.h> | ||
45 | #include <xen/interface/io/xs_wire.h> | ||
46 | |||
47 | /* Register callback to watch this node. */ | ||
48 | struct xenbus_watch | ||
49 | { | ||
50 | struct list_head list; | ||
51 | |||
52 | /* Path being watched. */ | ||
53 | const char *node; | ||
54 | |||
55 | /* Callback (executed in a process context with no locks held). */ | ||
56 | void (*callback)(struct xenbus_watch *, | ||
57 | const char **vec, unsigned int len); | ||
58 | }; | ||
59 | |||
60 | |||
61 | /* A xenbus device. */ | ||
62 | struct xenbus_device { | ||
63 | const char *devicetype; | ||
64 | const char *nodename; | ||
65 | const char *otherend; | ||
66 | int otherend_id; | ||
67 | struct xenbus_watch otherend_watch; | ||
68 | struct device dev; | ||
69 | enum xenbus_state state; | ||
70 | struct completion down; | ||
71 | }; | ||
72 | |||
73 | static inline struct xenbus_device *to_xenbus_device(struct device *dev) | ||
74 | { | ||
75 | return container_of(dev, struct xenbus_device, dev); | ||
76 | } | ||
77 | |||
78 | struct xenbus_device_id | ||
79 | { | ||
80 | /* .../device/<device_type>/<identifier> */ | ||
81 | char devicetype[32]; /* General class of device. */ | ||
82 | }; | ||
83 | |||
84 | /* A xenbus driver. */ | ||
85 | struct xenbus_driver { | ||
86 | char *name; | ||
87 | struct module *owner; | ||
88 | const struct xenbus_device_id *ids; | ||
89 | int (*probe)(struct xenbus_device *dev, | ||
90 | const struct xenbus_device_id *id); | ||
91 | void (*otherend_changed)(struct xenbus_device *dev, | ||
92 | enum xenbus_state backend_state); | ||
93 | int (*remove)(struct xenbus_device *dev); | ||
94 | int (*suspend)(struct xenbus_device *dev); | ||
95 | int (*suspend_cancel)(struct xenbus_device *dev); | ||
96 | int (*resume)(struct xenbus_device *dev); | ||
97 | int (*uevent)(struct xenbus_device *, char **, int, char *, int); | ||
98 | struct device_driver driver; | ||
99 | int (*read_otherend_details)(struct xenbus_device *dev); | ||
100 | }; | ||
101 | |||
102 | static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv) | ||
103 | { | ||
104 | return container_of(drv, struct xenbus_driver, driver); | ||
105 | } | ||
106 | |||
107 | int __must_check __xenbus_register_frontend(struct xenbus_driver *drv, | ||
108 | struct module *owner, | ||
109 | const char *mod_name); | ||
110 | |||
111 | static inline int __must_check | ||
112 | xenbus_register_frontend(struct xenbus_driver *drv) | ||
113 | { | ||
114 | WARN_ON(drv->owner != THIS_MODULE); | ||
115 | return __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME); | ||
116 | } | ||
117 | |||
118 | int __must_check __xenbus_register_backend(struct xenbus_driver *drv, | ||
119 | struct module *owner, | ||
120 | const char *mod_name); | ||
121 | static inline int __must_check | ||
122 | xenbus_register_backend(struct xenbus_driver *drv) | ||
123 | { | ||
124 | WARN_ON(drv->owner != THIS_MODULE); | ||
125 | return __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME); | ||
126 | } | ||
127 | |||
128 | void xenbus_unregister_driver(struct xenbus_driver *drv); | ||
129 | |||
130 | struct xenbus_transaction | ||
131 | { | ||
132 | u32 id; | ||
133 | }; | ||
134 | |||
135 | /* Nil transaction ID. */ | ||
136 | #define XBT_NIL ((struct xenbus_transaction) { 0 }) | ||
137 | |||
138 | int __init xenbus_dev_init(void); | ||
139 | |||
140 | char **xenbus_directory(struct xenbus_transaction t, | ||
141 | const char *dir, const char *node, unsigned int *num); | ||
142 | void *xenbus_read(struct xenbus_transaction t, | ||
143 | const char *dir, const char *node, unsigned int *len); | ||
144 | int xenbus_write(struct xenbus_transaction t, | ||
145 | const char *dir, const char *node, const char *string); | ||
146 | int xenbus_mkdir(struct xenbus_transaction t, | ||
147 | const char *dir, const char *node); | ||
148 | int xenbus_exists(struct xenbus_transaction t, | ||
149 | const char *dir, const char *node); | ||
150 | int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node); | ||
151 | int xenbus_transaction_start(struct xenbus_transaction *t); | ||
152 | int xenbus_transaction_end(struct xenbus_transaction t, int abort); | ||
153 | |||
154 | /* Single read and scanf: returns -errno or num scanned if > 0. */ | ||
155 | int xenbus_scanf(struct xenbus_transaction t, | ||
156 | const char *dir, const char *node, const char *fmt, ...) | ||
157 | __attribute__((format(scanf, 4, 5))); | ||
158 | |||
159 | /* Single printf and write: returns -errno or 0. */ | ||
160 | int xenbus_printf(struct xenbus_transaction t, | ||
161 | const char *dir, const char *node, const char *fmt, ...) | ||
162 | __attribute__((format(printf, 4, 5))); | ||
163 | |||
164 | /* Generic read function: NULL-terminated triples of name, | ||
165 | * sprintf-style type string, and pointer. Returns 0 or errno.*/ | ||
166 | int xenbus_gather(struct xenbus_transaction t, const char *dir, ...); | ||
167 | |||
168 | /* notifer routines for when the xenstore comes up */ | ||
169 | extern int xenstored_ready; | ||
170 | int register_xenstore_notifier(struct notifier_block *nb); | ||
171 | void unregister_xenstore_notifier(struct notifier_block *nb); | ||
172 | |||
173 | int register_xenbus_watch(struct xenbus_watch *watch); | ||
174 | void unregister_xenbus_watch(struct xenbus_watch *watch); | ||
175 | void xs_suspend(void); | ||
176 | void xs_resume(void); | ||
177 | void xs_suspend_cancel(void); | ||
178 | |||
179 | /* Used by xenbus_dev to borrow kernel's store connection. */ | ||
180 | void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg); | ||
181 | |||
182 | struct work_struct; | ||
183 | |||
184 | /* Prepare for domain suspend: then resume or cancel the suspend. */ | ||
185 | void xenbus_suspend(void); | ||
186 | void xenbus_resume(void); | ||
187 | void xenbus_probe(struct work_struct *); | ||
188 | void xenbus_suspend_cancel(void); | ||
189 | |||
190 | #define XENBUS_IS_ERR_READ(str) ({ \ | ||
191 | if (!IS_ERR(str) && strlen(str) == 0) { \ | ||
192 | kfree(str); \ | ||
193 | str = ERR_PTR(-ERANGE); \ | ||
194 | } \ | ||
195 | IS_ERR(str); \ | ||
196 | }) | ||
197 | |||
198 | #define XENBUS_EXIST_ERR(err) ((err) == -ENOENT || (err) == -ERANGE) | ||
199 | |||
200 | int xenbus_watch_path(struct xenbus_device *dev, const char *path, | ||
201 | struct xenbus_watch *watch, | ||
202 | void (*callback)(struct xenbus_watch *, | ||
203 | const char **, unsigned int)); | ||
204 | int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch, | ||
205 | void (*callback)(struct xenbus_watch *, | ||
206 | const char **, unsigned int), | ||
207 | const char *pathfmt, ...) | ||
208 | __attribute__ ((format (printf, 4, 5))); | ||
209 | |||
210 | int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state new_state); | ||
211 | int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn); | ||
212 | int xenbus_map_ring_valloc(struct xenbus_device *dev, | ||
213 | int gnt_ref, void **vaddr); | ||
214 | int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, | ||
215 | grant_handle_t *handle, void *vaddr); | ||
216 | |||
217 | int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr); | ||
218 | int xenbus_unmap_ring(struct xenbus_device *dev, | ||
219 | grant_handle_t handle, void *vaddr); | ||
220 | |||
221 | int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port); | ||
222 | int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port); | ||
223 | int xenbus_free_evtchn(struct xenbus_device *dev, int port); | ||
224 | |||
225 | enum xenbus_state xenbus_read_driver_state(const char *path); | ||
226 | |||
227 | void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...); | ||
228 | void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...); | ||
229 | |||
230 | const char *xenbus_strstate(enum xenbus_state state); | ||
231 | int xenbus_dev_is_online(struct xenbus_device *dev); | ||
232 | int xenbus_frontend_closed(struct xenbus_device *dev); | ||
233 | |||
234 | #endif /* _XEN_XENBUS_H */ | ||