aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/bug.h16
-rw-r--r--include/asm-alpha/byteorder.h2
-rw-r--r--include/asm-alpha/pgtable.h2
-rw-r--r--include/asm-arm/arch-sa1100/ide.h6
-rw-r--r--include/asm-arm/pgtable.h3
-rw-r--r--include/asm-avr32/pgtable.h8
-rw-r--r--include/asm-cris/arch-v10/ide.h11
-rw-r--r--include/asm-cris/pgtable.h4
-rw-r--r--include/asm-frv/pgtable.h2
-rw-r--r--include/asm-generic/gpio.h15
-rw-r--r--include/asm-ia64/gcc_intrin.h12
-rw-r--r--include/asm-ia64/hugetlb.h79
-rw-r--r--include/asm-ia64/kvm.h205
-rw-r--r--include/asm-ia64/kvm_host.h524
-rw-r--r--include/asm-ia64/kvm_para.h29
-rw-r--r--include/asm-ia64/page.h6
-rw-r--r--include/asm-ia64/pgtable.h3
-rw-r--r--include/asm-ia64/processor.h63
-rw-r--r--include/asm-m32r/pgtable.h10
-rw-r--r--include/asm-m68k/motorola_pgtable.h2
-rw-r--r--include/asm-m68k/sun3_pgtable.h2
-rw-r--r--include/asm-mips/pgtable.h2
-rw-r--r--include/asm-mips/vr41xx/siu.h8
-rw-r--r--include/asm-mips/vr41xx/vr41xx.h8
-rw-r--r--include/asm-mn10300/pgtable.h3
-rw-r--r--include/asm-parisc/pgtable.h2
-rw-r--r--include/asm-powerpc/hugetlb.h79
-rw-r--r--include/asm-powerpc/kvm.h53
-rw-r--r--include/asm-powerpc/kvm_asm.h55
-rw-r--r--include/asm-powerpc/kvm_host.h152
-rw-r--r--include/asm-powerpc/kvm_para.h37
-rw-r--r--include/asm-powerpc/kvm_ppc.h88
-rw-r--r--include/asm-powerpc/mmu-44x.h2
-rw-r--r--include/asm-powerpc/page_64.h7
-rw-r--r--include/asm-powerpc/pgtable-ppc32.h3
-rw-r--r--include/asm-powerpc/pgtable-ppc64.h3
-rw-r--r--include/asm-ppc/pgtable.h3
-rw-r--r--include/asm-s390/Kbuild1
-rw-r--r--include/asm-s390/kvm.h41
-rw-r--r--include/asm-s390/kvm_host.h234
-rw-r--r--include/asm-s390/kvm_para.h150
-rw-r--r--include/asm-s390/kvm_virtio.h53
-rw-r--r--include/asm-s390/lowcore.h15
-rw-r--r--include/asm-s390/mmu.h1
-rw-r--r--include/asm-s390/mmu_context.h8
-rw-r--r--include/asm-s390/pgtable.h106
-rw-r--r--include/asm-s390/setup.h1
-rw-r--r--include/asm-sh/hugetlb.h91
-rw-r--r--include/asm-sh/pgtable_32.h3
-rw-r--r--include/asm-sh/pgtable_64.h10
-rw-r--r--include/asm-sparc/pgtable.h7
-rw-r--r--include/asm-sparc64/hugetlb.h84
-rw-r--r--include/asm-sparc64/page.h2
-rw-r--r--include/asm-sparc64/pgtable.h10
-rw-r--r--include/asm-um/pgtable.h10
-rw-r--r--include/asm-x86/geode.h38
-rw-r--r--include/asm-x86/hugetlb.h91
-rw-r--r--include/asm-x86/kvm.h41
-rw-r--r--include/asm-x86/kvm_host.h99
-rw-r--r--include/asm-x86/kvm_para.h55
-rw-r--r--include/asm-x86/pgtable.h10
-rw-r--r--include/asm-x86/processor.h1
-rw-r--r--include/asm-x86/reboot.h2
-rw-r--r--include/asm-xtensa/pgtable.h4
-rw-r--r--include/linux/bitmap.h6
-rw-r--r--include/linux/bootmem.h2
-rw-r--r--include/linux/bsg.h14
-rw-r--r--include/linux/cache.h4
-rw-r--r--include/linux/capability.h3
-rw-r--r--include/linux/cpumask.h22
-rw-r--r--include/linux/cpuset.h4
-rw-r--r--include/linux/dmi.h1
-rw-r--r--include/linux/fb.h44
-rw-r--r--include/linux/fs.h7
-rw-r--r--include/linux/gfp.h44
-rw-r--r--include/linux/hugetlb.h46
-rw-r--r--include/linux/i2o.h5
-rw-r--r--include/linux/ide.h95
-rw-r--r--include/linux/init_task.h3
-rw-r--r--include/linux/kprobes.h34
-rw-r--r--include/linux/kvm.h130
-rw-r--r--include/linux/kvm_host.h59
-rw-r--r--include/linux/kvm_para.h11
-rw-r--r--include/linux/kvm_types.h2
-rw-r--r--include/linux/list.h9
-rw-r--r--include/linux/memory_hotplug.h33
-rw-r--r--include/linux/mempolicy.h156
-rw-r--r--include/linux/mlx4/device.h40
-rw-r--r--include/linux/mlx4/qp.h4
-rw-r--r--include/linux/mm.h57
-rw-r--r--include/linux/mm_types.h4
-rw-r--r--include/linux/mmzone.h183
-rw-r--r--include/linux/msdos_fs.h10
-rw-r--r--include/linux/ncp_fs.h7
-rw-r--r--include/linux/nodemask.h22
-rw-r--r--include/linux/notifier.h1
-rw-r--r--include/linux/oom.h4
-rw-r--r--include/linux/page-flags.h319
-rw-r--r--include/linux/prctl.h9
-rw-r--r--include/linux/quota.h21
-rw-r--r--include/linux/quotaops.h137
-rw-r--r--include/linux/raid/raid5.h7
-rw-r--r--include/linux/reiserfs_fs.h1
-rw-r--r--include/linux/sched.h5
-rw-r--r--include/linux/securebits.h25
-rw-r--r--include/linux/security.h16
-rw-r--r--include/linux/serial_8250.h1
-rw-r--r--include/linux/shmem_fs.h3
-rw-r--r--include/linux/suspend.h15
-rw-r--r--include/linux/swap.h4
-rw-r--r--include/linux/synclink.h4
-rw-r--r--include/linux/sysfs.h4
-rw-r--r--include/linux/vmalloc.h5
-rw-r--r--include/linux/vmstat.h6
-rw-r--r--include/net/compat.h3
-rw-r--r--include/scsi/scsi_device.h3
-rw-r--r--include/video/atmel_lcdc.h11
-rw-r--r--include/video/hecubafb.h51
-rw-r--r--include/video/metronomefb.h62
119 files changed, 3882 insertions, 573 deletions
diff --git a/include/asm-alpha/bug.h b/include/asm-alpha/bug.h
index 39a3e2a5017d..695a5ee4b5d3 100644
--- a/include/asm-alpha/bug.h
+++ b/include/asm-alpha/bug.h
@@ -1,14 +1,24 @@
1#ifndef _ALPHA_BUG_H 1#ifndef _ALPHA_BUG_H
2#define _ALPHA_BUG_H 2#define _ALPHA_BUG_H
3 3
4#include <linux/linkage.h>
5
4#ifdef CONFIG_BUG 6#ifdef CONFIG_BUG
5#include <asm/pal.h> 7#include <asm/pal.h>
6 8
7/* ??? Would be nice to use .gprel32 here, but we can't be sure that the 9/* ??? Would be nice to use .gprel32 here, but we can't be sure that the
8 function loaded the GP, so this could fail in modules. */ 10 function loaded the GP, so this could fail in modules. */
9#define BUG() \ 11static inline void ATTRIB_NORET __BUG(const char *file, int line)
10 __asm__ __volatile__("call_pal %0 # bugchk\n\t"".long %1\n\t.8byte %2" \ 12{
11 : : "i" (PAL_bugchk), "i"(__LINE__), "i"(__FILE__)) 13 __asm__ __volatile__(
14 "call_pal %0 # bugchk\n\t"
15 ".long %1\n\t.8byte %2"
16 : : "i" (PAL_bugchk), "i"(line), "i"(file));
17 for ( ; ; )
18 ;
19}
20
21#define BUG() __BUG(__FILE__, __LINE__)
12 22
13#define HAVE_ARCH_BUG 23#define HAVE_ARCH_BUG
14#endif 24#endif
diff --git a/include/asm-alpha/byteorder.h b/include/asm-alpha/byteorder.h
index 7af2b8d25486..58e958fc7f1b 100644
--- a/include/asm-alpha/byteorder.h
+++ b/include/asm-alpha/byteorder.h
@@ -7,7 +7,7 @@
7 7
8#ifdef __GNUC__ 8#ifdef __GNUC__
9 9
10static __inline __attribute_const__ __u32 __arch__swab32(__u32 x) 10static inline __attribute_const__ __u32 __arch__swab32(__u32 x)
11{ 11{
12 /* 12 /*
13 * Unfortunately, we can't use the 6 instruction sequence 13 * Unfortunately, we can't use the 6 instruction sequence
diff --git a/include/asm-alpha/pgtable.h b/include/asm-alpha/pgtable.h
index 99037b032357..05ce5fba43e3 100644
--- a/include/asm-alpha/pgtable.h
+++ b/include/asm-alpha/pgtable.h
@@ -268,6 +268,7 @@ extern inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_FOW); }
268extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 268extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
269extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 269extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
270extern inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } 270extern inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
271extern inline int pte_special(pte_t pte) { return 0; }
271 272
272extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOW; return pte; } 273extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOW; return pte; }
273extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~(__DIRTY_BITS); return pte; } 274extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~(__DIRTY_BITS); return pte; }
@@ -275,6 +276,7 @@ extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~(__ACCESS_BITS); ret
275extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_FOW; return pte; } 276extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_FOW; return pte; }
276extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= __DIRTY_BITS; return pte; } 277extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= __DIRTY_BITS; return pte; }
277extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; return pte; } 278extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; return pte; }
279extern inline pte_t pte_mkspecial(pte_t pte) { return pte; }
278 280
279#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address)) 281#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
280 282
diff --git a/include/asm-arm/arch-sa1100/ide.h b/include/asm-arm/arch-sa1100/ide.h
index 98b10bcf9f1b..b14cbda01dc3 100644
--- a/include/asm-arm/arch-sa1100/ide.h
+++ b/include/asm-arm/arch-sa1100/ide.h
@@ -37,12 +37,12 @@ static inline void ide_init_hwif_ports(hw_regs_t *hw, unsigned long data_port,
37 37
38 memset(hw, 0, sizeof(*hw)); 38 memset(hw, 0, sizeof(*hw));
39 39
40 for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) { 40 for (i = 0; i <= 7; i++) {
41 hw->io_ports[i] = reg; 41 hw->io_ports_array[i] = reg;
42 reg += regincr; 42 reg += regincr;
43 } 43 }
44 44
45 hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port; 45 hw->io_ports.ctl_addr = ctrl_port;
46 46
47 if (irq) 47 if (irq)
48 *irq = 0; 48 *irq = 0;
diff --git a/include/asm-arm/pgtable.h b/include/asm-arm/pgtable.h
index 5e0182485d8c..5571c13c3f3b 100644
--- a/include/asm-arm/pgtable.h
+++ b/include/asm-arm/pgtable.h
@@ -260,6 +260,7 @@ extern struct page *empty_zero_page;
260#define pte_write(pte) (pte_val(pte) & L_PTE_WRITE) 260#define pte_write(pte) (pte_val(pte) & L_PTE_WRITE)
261#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) 261#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
262#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) 262#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
263#define pte_special(pte) (0)
263 264
264/* 265/*
265 * The following only works if pte_present() is not true. 266 * The following only works if pte_present() is not true.
@@ -280,6 +281,8 @@ PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY);
280PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG); 281PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG);
281PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG); 282PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
282 283
284static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
285
283/* 286/*
284 * Mark the prot value as uncacheable and unbufferable. 287 * Mark the prot value as uncacheable and unbufferable.
285 */ 288 */
diff --git a/include/asm-avr32/pgtable.h b/include/asm-avr32/pgtable.h
index 3ae7b548fce7..c0e5e29417df 100644
--- a/include/asm-avr32/pgtable.h
+++ b/include/asm-avr32/pgtable.h
@@ -212,6 +212,10 @@ static inline int pte_young(pte_t pte)
212{ 212{
213 return pte_val(pte) & _PAGE_ACCESSED; 213 return pte_val(pte) & _PAGE_ACCESSED;
214} 214}
215static inline int pte_special(pte_t pte)
216{
217 return 0;
218}
215 219
216/* 220/*
217 * The following only work if pte_present() is not true. 221 * The following only work if pte_present() is not true.
@@ -252,6 +256,10 @@ static inline pte_t pte_mkyoung(pte_t pte)
252 set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); 256 set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED));
253 return pte; 257 return pte;
254} 258}
259static inline pte_t pte_mkspecial(pte_t pte)
260{
261 return pte;
262}
255 263
256#define pmd_none(x) (!pmd_val(x)) 264#define pmd_none(x) (!pmd_val(x))
257#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) 265#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
diff --git a/include/asm-cris/arch-v10/ide.h b/include/asm-cris/arch-v10/ide.h
index ea34e0d0a388..5366e6239328 100644
--- a/include/asm-cris/arch-v10/ide.h
+++ b/include/asm-cris/arch-v10/ide.h
@@ -59,22 +59,19 @@ static inline void ide_init_hwif_ports(hw_regs_t *hw, unsigned long data_port, u
59 int i; 59 int i;
60 60
61 /* fill in ports for ATA addresses 0 to 7 */ 61 /* fill in ports for ATA addresses 0 to 7 */
62 62 for (i = 0; i <= 7; i++) {
63 for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) { 63 hw->io_ports_array[i] = data_port |
64 hw->io_ports[i] = data_port |
65 IO_FIELD(R_ATA_CTRL_DATA, addr, i) | 64 IO_FIELD(R_ATA_CTRL_DATA, addr, i) |
66 IO_STATE(R_ATA_CTRL_DATA, cs0, active); 65 IO_STATE(R_ATA_CTRL_DATA, cs0, active);
67 } 66 }
68 67
69 /* the IDE control register is at ATA address 6, with CS1 active instead of CS0 */ 68 /* the IDE control register is at ATA address 6, with CS1 active instead of CS0 */
70 69 hw->io_ports.ctl_addr = data_port |
71 hw->io_ports[IDE_CONTROL_OFFSET] = data_port |
72 IO_FIELD(R_ATA_CTRL_DATA, addr, 6) | 70 IO_FIELD(R_ATA_CTRL_DATA, addr, 6) |
73 IO_STATE(R_ATA_CTRL_DATA, cs1, active); 71 IO_STATE(R_ATA_CTRL_DATA, cs1, active);
74 72
75 /* whats this for ? */ 73 /* whats this for ? */
76 74 hw->io_ports.irq_addr = 0;
77 hw->io_ports[IDE_IRQ_OFFSET] = 0;
78} 75}
79 76
80static inline void ide_init_default_hwifs(void) 77static inline void ide_init_default_hwifs(void)
diff --git a/include/asm-cris/pgtable.h b/include/asm-cris/pgtable.h
index a2607575681b..829e7a7d9fb9 100644
--- a/include/asm-cris/pgtable.h
+++ b/include/asm-cris/pgtable.h
@@ -115,6 +115,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WR
115static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; } 115static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
116static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 116static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
117static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } 117static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
118static inline int pte_special(pte_t pte) { return 0; }
118 119
119static inline pte_t pte_wrprotect(pte_t pte) 120static inline pte_t pte_wrprotect(pte_t pte)
120{ 121{
@@ -162,6 +163,7 @@ static inline pte_t pte_mkyoung(pte_t pte)
162 } 163 }
163 return pte; 164 return pte;
164} 165}
166static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
165 167
166/* 168/*
167 * Conversion functions: convert a page and protection to a page entry, 169 * Conversion functions: convert a page and protection to a page entry,
@@ -229,7 +231,7 @@ static inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
229#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 231#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
230 232
231/* to find an entry in a page-table-directory */ 233/* to find an entry in a page-table-directory */
232static inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address) 234static inline pgd_t * pgd_offset(const struct mm_struct *mm, unsigned long address)
233{ 235{
234 return mm->pgd + pgd_index(address); 236 return mm->pgd + pgd_index(address);
235} 237}
diff --git a/include/asm-frv/pgtable.h b/include/asm-frv/pgtable.h
index 4e219046fe42..83c51aba534b 100644
--- a/include/asm-frv/pgtable.h
+++ b/include/asm-frv/pgtable.h
@@ -380,6 +380,7 @@ static inline pmd_t *pmd_offset(pud_t *dir, unsigned long address)
380static inline int pte_dirty(pte_t pte) { return (pte).pte & _PAGE_DIRTY; } 380static inline int pte_dirty(pte_t pte) { return (pte).pte & _PAGE_DIRTY; }
381static inline int pte_young(pte_t pte) { return (pte).pte & _PAGE_ACCESSED; } 381static inline int pte_young(pte_t pte) { return (pte).pte & _PAGE_ACCESSED; }
382static inline int pte_write(pte_t pte) { return !((pte).pte & _PAGE_WP); } 382static inline int pte_write(pte_t pte) { return !((pte).pte & _PAGE_WP); }
383static inline int pte_special(pte_t pte) { return 0; }
383 384
384static inline pte_t pte_mkclean(pte_t pte) { (pte).pte &= ~_PAGE_DIRTY; return pte; } 385static inline pte_t pte_mkclean(pte_t pte) { (pte).pte &= ~_PAGE_DIRTY; return pte; }
385static inline pte_t pte_mkold(pte_t pte) { (pte).pte &= ~_PAGE_ACCESSED; return pte; } 386static inline pte_t pte_mkold(pte_t pte) { (pte).pte &= ~_PAGE_ACCESSED; return pte; }
@@ -387,6 +388,7 @@ static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte |= _PAGE_WP; return pte
387static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte |= _PAGE_DIRTY; return pte; } 388static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte |= _PAGE_DIRTY; return pte; }
388static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte |= _PAGE_ACCESSED; return pte; } 389static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte |= _PAGE_ACCESSED; return pte; }
389static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte &= ~_PAGE_WP; return pte; } 390static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte &= ~_PAGE_WP; return pte; }
391static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
390 392
391static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) 393static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
392{ 394{
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index f29a502f4a6c..ecf675a59d21 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -16,7 +16,14 @@
16#define ARCH_NR_GPIOS 256 16#define ARCH_NR_GPIOS 256
17#endif 17#endif
18 18
19static inline int gpio_is_valid(int number)
20{
21 /* only some non-negative numbers are valid */
22 return ((unsigned)number) < ARCH_NR_GPIOS;
23}
24
19struct seq_file; 25struct seq_file;
26struct module;
20 27
21/** 28/**
22 * struct gpio_chip - abstract a GPIO controller 29 * struct gpio_chip - abstract a GPIO controller
@@ -48,6 +55,7 @@ struct seq_file;
48 */ 55 */
49struct gpio_chip { 56struct gpio_chip {
50 char *label; 57 char *label;
58 struct module *owner;
51 59
52 int (*direction_input)(struct gpio_chip *chip, 60 int (*direction_input)(struct gpio_chip *chip,
53 unsigned offset); 61 unsigned offset);
@@ -66,6 +74,7 @@ struct gpio_chip {
66 74
67extern const char *gpiochip_is_requested(struct gpio_chip *chip, 75extern const char *gpiochip_is_requested(struct gpio_chip *chip,
68 unsigned offset); 76 unsigned offset);
77extern int __init __must_check gpiochip_reserve(int start, int ngpio);
69 78
70/* add/remove chips */ 79/* add/remove chips */
71extern int gpiochip_add(struct gpio_chip *chip); 80extern int gpiochip_add(struct gpio_chip *chip);
@@ -97,6 +106,12 @@ extern int __gpio_cansleep(unsigned gpio);
97 106
98#else 107#else
99 108
109static inline int gpio_is_valid(int number)
110{
111 /* only non-negative numbers are valid */
112 return number >= 0;
113}
114
100/* platforms that don't directly support access to GPIOs through I2C, SPI, 115/* platforms that don't directly support access to GPIOs through I2C, SPI,
101 * or other blocking infrastructure can use these wrappers. 116 * or other blocking infrastructure can use these wrappers.
102 */ 117 */
diff --git a/include/asm-ia64/gcc_intrin.h b/include/asm-ia64/gcc_intrin.h
index de2ed2cbdd84..2fe292c275fe 100644
--- a/include/asm-ia64/gcc_intrin.h
+++ b/include/asm-ia64/gcc_intrin.h
@@ -21,6 +21,10 @@
21 21
22#define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum)) 22#define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum))
23 23
24#define ia64_flushrs() asm volatile ("flushrs;;":::"memory")
25
26#define ia64_loadrs() asm volatile ("loadrs;;":::"memory")
27
24extern void ia64_bad_param_for_setreg (void); 28extern void ia64_bad_param_for_setreg (void);
25extern void ia64_bad_param_for_getreg (void); 29extern void ia64_bad_param_for_getreg (void);
26 30
@@ -517,6 +521,14 @@ do { \
517#define ia64_ptrd(addr, size) \ 521#define ia64_ptrd(addr, size) \
518 asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory") 522 asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
519 523
524#define ia64_ttag(addr) \
525({ \
526 __u64 ia64_intri_res; \
527 asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
528 ia64_intri_res; \
529})
530
531
520/* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */ 532/* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
521 533
522#define ia64_lfhint_none 0 534#define ia64_lfhint_none 0
diff --git a/include/asm-ia64/hugetlb.h b/include/asm-ia64/hugetlb.h
new file mode 100644
index 000000000000..f28a9701f1cf
--- /dev/null
+++ b/include/asm-ia64/hugetlb.h
@@ -0,0 +1,79 @@
1#ifndef _ASM_IA64_HUGETLB_H
2#define _ASM_IA64_HUGETLB_H
3
4#include <asm/page.h>
5
6
7void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
8 unsigned long end, unsigned long floor,
9 unsigned long ceiling);
10
11int prepare_hugepage_range(unsigned long addr, unsigned long len);
12
13static inline int is_hugepage_only_range(struct mm_struct *mm,
14 unsigned long addr,
15 unsigned long len)
16{
17 return (REGION_NUMBER(addr) == RGN_HPAGE ||
18 REGION_NUMBER((addr)+(len)-1) == RGN_HPAGE);
19}
20
21static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
22{
23}
24
25static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
26 pte_t *ptep, pte_t pte)
27{
28 set_pte_at(mm, addr, ptep, pte);
29}
30
31static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
32 unsigned long addr, pte_t *ptep)
33{
34 return ptep_get_and_clear(mm, addr, ptep);
35}
36
37static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
38 unsigned long addr, pte_t *ptep)
39{
40}
41
42static inline int huge_pte_none(pte_t pte)
43{
44 return pte_none(pte);
45}
46
47static inline pte_t huge_pte_wrprotect(pte_t pte)
48{
49 return pte_wrprotect(pte);
50}
51
52static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
53 unsigned long addr, pte_t *ptep)
54{
55 ptep_set_wrprotect(mm, addr, ptep);
56}
57
58static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
59 unsigned long addr, pte_t *ptep,
60 pte_t pte, int dirty)
61{
62 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
63}
64
65static inline pte_t huge_ptep_get(pte_t *ptep)
66{
67 return *ptep;
68}
69
70static inline int arch_prepare_hugepage(struct page *page)
71{
72 return 0;
73}
74
75static inline void arch_release_hugepage(struct page *page)
76{
77}
78
79#endif /* _ASM_IA64_HUGETLB_H */
diff --git a/include/asm-ia64/kvm.h b/include/asm-ia64/kvm.h
index 030d29b4b26b..eb2d3559d089 100644
--- a/include/asm-ia64/kvm.h
+++ b/include/asm-ia64/kvm.h
@@ -1,6 +1,205 @@
1#ifndef __LINUX_KVM_IA64_H 1#ifndef __ASM_IA64_KVM_H
2#define __LINUX_KVM_IA64_H 2#define __ASM_IA64_KVM_H
3 3
4/* ia64 does not support KVM */ 4/*
5 * asm-ia64/kvm.h: kvm structure definitions for ia64
6 *
7 * Copyright (C) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
20 * Place - Suite 330, Boston, MA 02111-1307 USA.
21 *
22 */
23
24#include <asm/types.h>
25#include <asm/fpu.h>
26
27#include <linux/ioctl.h>
28
29/* Architectural interrupt line count. */
30#define KVM_NR_INTERRUPTS 256
31
32#define KVM_IOAPIC_NUM_PINS 24
33
34struct kvm_ioapic_state {
35 __u64 base_address;
36 __u32 ioregsel;
37 __u32 id;
38 __u32 irr;
39 __u32 pad;
40 union {
41 __u64 bits;
42 struct {
43 __u8 vector;
44 __u8 delivery_mode:3;
45 __u8 dest_mode:1;
46 __u8 delivery_status:1;
47 __u8 polarity:1;
48 __u8 remote_irr:1;
49 __u8 trig_mode:1;
50 __u8 mask:1;
51 __u8 reserve:7;
52 __u8 reserved[4];
53 __u8 dest_id;
54 } fields;
55 } redirtbl[KVM_IOAPIC_NUM_PINS];
56};
57
58#define KVM_IRQCHIP_PIC_MASTER 0
59#define KVM_IRQCHIP_PIC_SLAVE 1
60#define KVM_IRQCHIP_IOAPIC 2
61
62#define KVM_CONTEXT_SIZE 8*1024
63
64union context {
65 /* 8K size */
66 char dummy[KVM_CONTEXT_SIZE];
67 struct {
68 unsigned long psr;
69 unsigned long pr;
70 unsigned long caller_unat;
71 unsigned long pad;
72 unsigned long gr[32];
73 unsigned long ar[128];
74 unsigned long br[8];
75 unsigned long cr[128];
76 unsigned long rr[8];
77 unsigned long ibr[8];
78 unsigned long dbr[8];
79 unsigned long pkr[8];
80 struct ia64_fpreg fr[128];
81 };
82};
83
84struct thash_data {
85 union {
86 struct {
87 unsigned long p : 1; /* 0 */
88 unsigned long rv1 : 1; /* 1 */
89 unsigned long ma : 3; /* 2-4 */
90 unsigned long a : 1; /* 5 */
91 unsigned long d : 1; /* 6 */
92 unsigned long pl : 2; /* 7-8 */
93 unsigned long ar : 3; /* 9-11 */
94 unsigned long ppn : 38; /* 12-49 */
95 unsigned long rv2 : 2; /* 50-51 */
96 unsigned long ed : 1; /* 52 */
97 unsigned long ig1 : 11; /* 53-63 */
98 };
99 struct {
100 unsigned long __rv1 : 53; /* 0-52 */
101 unsigned long contiguous : 1; /*53 */
102 unsigned long tc : 1; /* 54 TR or TC */
103 unsigned long cl : 1;
104 /* 55 I side or D side cache line */
105 unsigned long len : 4; /* 56-59 */
106 unsigned long io : 1; /* 60 entry is for io or not */
107 unsigned long nomap : 1;
108 /* 61 entry cann't be inserted into machine TLB.*/
109 unsigned long checked : 1;
110 /* 62 for VTLB/VHPT sanity check */
111 unsigned long invalid : 1;
112 /* 63 invalid entry */
113 };
114 unsigned long page_flags;
115 }; /* same for VHPT and TLB */
116
117 union {
118 struct {
119 unsigned long rv3 : 2;
120 unsigned long ps : 6;
121 unsigned long key : 24;
122 unsigned long rv4 : 32;
123 };
124 unsigned long itir;
125 };
126 union {
127 struct {
128 unsigned long ig2 : 12;
129 unsigned long vpn : 49;
130 unsigned long vrn : 3;
131 };
132 unsigned long ifa;
133 unsigned long vadr;
134 struct {
135 unsigned long tag : 63;
136 unsigned long ti : 1;
137 };
138 unsigned long etag;
139 };
140 union {
141 struct thash_data *next;
142 unsigned long rid;
143 unsigned long gpaddr;
144 };
145};
146
147#define NITRS 8
148#define NDTRS 8
149
150struct saved_vpd {
151 unsigned long vhpi;
152 unsigned long vgr[16];
153 unsigned long vbgr[16];
154 unsigned long vnat;
155 unsigned long vbnat;
156 unsigned long vcpuid[5];
157 unsigned long vpsr;
158 unsigned long vpr;
159 unsigned long vcr[128];
160};
161
162struct kvm_regs {
163 char *saved_guest;
164 char *saved_stack;
165 struct saved_vpd vpd;
166 /*Arch-regs*/
167 int mp_state;
168 unsigned long vmm_rr;
169 /* TR and TC. */
170 struct thash_data itrs[NITRS];
171 struct thash_data dtrs[NDTRS];
172 /* Bit is set if there is a tr/tc for the region. */
173 unsigned char itr_regions;
174 unsigned char dtr_regions;
175 unsigned char tc_regions;
176
177 char irq_check;
178 unsigned long saved_itc;
179 unsigned long itc_check;
180 unsigned long timer_check;
181 unsigned long timer_pending;
182 unsigned long last_itc;
183
184 unsigned long vrr[8];
185 unsigned long ibr[8];
186 unsigned long dbr[8];
187 unsigned long insvc[4]; /* Interrupt in service. */
188 unsigned long xtp;
189
190 unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */
191 unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */
192 unsigned long metaphysical_saved_rr0; /* from kvm_arch */
193 unsigned long metaphysical_saved_rr4; /* from kvm_arch */
194 unsigned long fp_psr; /*used for lazy float register */
195 unsigned long saved_gp;
196 /*for phycial emulation */
197};
198
199struct kvm_sregs {
200};
201
202struct kvm_fpu {
203};
5 204
6#endif 205#endif
diff --git a/include/asm-ia64/kvm_host.h b/include/asm-ia64/kvm_host.h
new file mode 100644
index 000000000000..c082c208c1f3
--- /dev/null
+++ b/include/asm-ia64/kvm_host.h
@@ -0,0 +1,524 @@
1/*
2 * kvm_host.h: used for kvm module, and hold ia64-specific sections.
3 *
4 * Copyright (C) 2007, Intel Corporation.
5 *
6 * Xiantao Zhang <xiantao.zhang@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
20 *
21 */
22
23#ifndef __ASM_KVM_HOST_H
24#define __ASM_KVM_HOST_H
25
26
27#include <linux/types.h>
28#include <linux/mm.h>
29#include <linux/kvm.h>
30#include <linux/kvm_para.h>
31#include <linux/kvm_types.h>
32
33#include <asm/pal.h>
34#include <asm/sal.h>
35
36#define KVM_MAX_VCPUS 4
37#define KVM_MEMORY_SLOTS 32
38/* memory slots that does not exposed to userspace */
39#define KVM_PRIVATE_MEM_SLOTS 4
40
41
42/* define exit reasons from vmm to kvm*/
43#define EXIT_REASON_VM_PANIC 0
44#define EXIT_REASON_MMIO_INSTRUCTION 1
45#define EXIT_REASON_PAL_CALL 2
46#define EXIT_REASON_SAL_CALL 3
47#define EXIT_REASON_SWITCH_RR6 4
48#define EXIT_REASON_VM_DESTROY 5
49#define EXIT_REASON_EXTERNAL_INTERRUPT 6
50#define EXIT_REASON_IPI 7
51#define EXIT_REASON_PTC_G 8
52
53/*Define vmm address space and vm data space.*/
54#define KVM_VMM_SIZE (16UL<<20)
55#define KVM_VMM_SHIFT 24
56#define KVM_VMM_BASE 0xD000000000000000UL
57#define VMM_SIZE (8UL<<20)
58
59/*
60 * Define vm_buffer, used by PAL Services, base address.
61 * Note: vmbuffer is in the VMM-BLOCK, the size must be < 8M
62 */
63#define KVM_VM_BUFFER_BASE (KVM_VMM_BASE + VMM_SIZE)
64#define KVM_VM_BUFFER_SIZE (8UL<<20)
65
66/*Define Virtual machine data layout.*/
67#define KVM_VM_DATA_SHIFT 24
68#define KVM_VM_DATA_SIZE (1UL << KVM_VM_DATA_SHIFT)
69#define KVM_VM_DATA_BASE (KVM_VMM_BASE + KVM_VMM_SIZE)
70
71
72#define KVM_P2M_BASE KVM_VM_DATA_BASE
73#define KVM_P2M_OFS 0
74#define KVM_P2M_SIZE (8UL << 20)
75
76#define KVM_VHPT_BASE (KVM_P2M_BASE + KVM_P2M_SIZE)
77#define KVM_VHPT_OFS KVM_P2M_SIZE
78#define KVM_VHPT_BLOCK_SIZE (2UL << 20)
79#define VHPT_SHIFT 18
80#define VHPT_SIZE (1UL << VHPT_SHIFT)
81#define VHPT_NUM_ENTRIES (1<<(VHPT_SHIFT-5))
82
83#define KVM_VTLB_BASE (KVM_VHPT_BASE+KVM_VHPT_BLOCK_SIZE)
84#define KVM_VTLB_OFS (KVM_VHPT_OFS+KVM_VHPT_BLOCK_SIZE)
85#define KVM_VTLB_BLOCK_SIZE (1UL<<20)
86#define VTLB_SHIFT 17
87#define VTLB_SIZE (1UL<<VTLB_SHIFT)
88#define VTLB_NUM_ENTRIES (1<<(VTLB_SHIFT-5))
89
90#define KVM_VPD_BASE (KVM_VTLB_BASE+KVM_VTLB_BLOCK_SIZE)
91#define KVM_VPD_OFS (KVM_VTLB_OFS+KVM_VTLB_BLOCK_SIZE)
92#define KVM_VPD_BLOCK_SIZE (2UL<<20)
93#define VPD_SHIFT 16
94#define VPD_SIZE (1UL<<VPD_SHIFT)
95
96#define KVM_VCPU_BASE (KVM_VPD_BASE+KVM_VPD_BLOCK_SIZE)
97#define KVM_VCPU_OFS (KVM_VPD_OFS+KVM_VPD_BLOCK_SIZE)
98#define KVM_VCPU_BLOCK_SIZE (2UL<<20)
99#define VCPU_SHIFT 18
100#define VCPU_SIZE (1UL<<VCPU_SHIFT)
101#define MAX_VCPU_NUM KVM_VCPU_BLOCK_SIZE/VCPU_SIZE
102
103#define KVM_VM_BASE (KVM_VCPU_BASE+KVM_VCPU_BLOCK_SIZE)
104#define KVM_VM_OFS (KVM_VCPU_OFS+KVM_VCPU_BLOCK_SIZE)
105#define KVM_VM_BLOCK_SIZE (1UL<<19)
106
107#define KVM_MEM_DIRTY_LOG_BASE (KVM_VM_BASE+KVM_VM_BLOCK_SIZE)
108#define KVM_MEM_DIRTY_LOG_OFS (KVM_VM_OFS+KVM_VM_BLOCK_SIZE)
109#define KVM_MEM_DIRTY_LOG_SIZE (1UL<<19)
110
111/* Get vpd, vhpt, tlb, vcpu, base*/
112#define VPD_ADDR(n) (KVM_VPD_BASE+n*VPD_SIZE)
113#define VHPT_ADDR(n) (KVM_VHPT_BASE+n*VHPT_SIZE)
114#define VTLB_ADDR(n) (KVM_VTLB_BASE+n*VTLB_SIZE)
115#define VCPU_ADDR(n) (KVM_VCPU_BASE+n*VCPU_SIZE)
116
117/*IO section definitions*/
118#define IOREQ_READ 1
119#define IOREQ_WRITE 0
120
121#define STATE_IOREQ_NONE 0
122#define STATE_IOREQ_READY 1
123#define STATE_IOREQ_INPROCESS 2
124#define STATE_IORESP_READY 3
125
126/*Guest Physical address layout.*/
127#define GPFN_MEM (0UL << 60) /* Guest pfn is normal mem */
128#define GPFN_FRAME_BUFFER (1UL << 60) /* VGA framebuffer */
129#define GPFN_LOW_MMIO (2UL << 60) /* Low MMIO range */
130#define GPFN_PIB (3UL << 60) /* PIB base */
131#define GPFN_IOSAPIC (4UL << 60) /* IOSAPIC base */
132#define GPFN_LEGACY_IO (5UL << 60) /* Legacy I/O base */
133#define GPFN_GFW (6UL << 60) /* Guest Firmware */
134#define GPFN_HIGH_MMIO (7UL << 60) /* High MMIO range */
135
136#define GPFN_IO_MASK (7UL << 60) /* Guest pfn is I/O type */
137#define GPFN_INV_MASK (1UL << 63) /* Guest pfn is invalid */
138#define INVALID_MFN (~0UL)
139#define MEM_G (1UL << 30)
140#define MEM_M (1UL << 20)
141#define MMIO_START (3 * MEM_G)
142#define MMIO_SIZE (512 * MEM_M)
143#define VGA_IO_START 0xA0000UL
144#define VGA_IO_SIZE 0x20000
145#define LEGACY_IO_START (MMIO_START + MMIO_SIZE)
146#define LEGACY_IO_SIZE (64 * MEM_M)
147#define IO_SAPIC_START 0xfec00000UL
148#define IO_SAPIC_SIZE 0x100000
149#define PIB_START 0xfee00000UL
150#define PIB_SIZE 0x200000
151#define GFW_START (4 * MEM_G - 16 * MEM_M)
152#define GFW_SIZE (16 * MEM_M)
153
154/*Deliver mode, defined for ioapic.c*/
155#define dest_Fixed IOSAPIC_FIXED
156#define dest_LowestPrio IOSAPIC_LOWEST_PRIORITY
157
158#define NMI_VECTOR 2
159#define ExtINT_VECTOR 0
160#define NULL_VECTOR (-1)
161#define IA64_SPURIOUS_INT_VECTOR 0x0f
162
163#define VCPU_LID(v) (((u64)(v)->vcpu_id) << 24)
164
165/*
166 *Delivery mode
167 */
168#define SAPIC_DELIV_SHIFT 8
169#define SAPIC_FIXED 0x0
170#define SAPIC_LOWEST_PRIORITY 0x1
171#define SAPIC_PMI 0x2
172#define SAPIC_NMI 0x4
173#define SAPIC_INIT 0x5
174#define SAPIC_EXTINT 0x7
175
176/*
177 * vcpu->requests bit members for arch
178 */
179#define KVM_REQ_PTC_G 32
180#define KVM_REQ_RESUME 33
181
182#define KVM_PAGES_PER_HPAGE 1
183
184struct kvm;
185struct kvm_vcpu;
186struct kvm_guest_debug{
187};
188
189struct kvm_mmio_req {
190 uint64_t addr; /* physical address */
191 uint64_t size; /* size in bytes */
192 uint64_t data; /* data (or paddr of data) */
193 uint8_t state:4;
194 uint8_t dir:1; /* 1=read, 0=write */
195};
196
197/*Pal data struct */
198struct kvm_pal_call{
199 /*In area*/
200 uint64_t gr28;
201 uint64_t gr29;
202 uint64_t gr30;
203 uint64_t gr31;
204 /*Out area*/
205 struct ia64_pal_retval ret;
206};
207
208/* Sal data structure */
209struct kvm_sal_call{
210 /*In area*/
211 uint64_t in0;
212 uint64_t in1;
213 uint64_t in2;
214 uint64_t in3;
215 uint64_t in4;
216 uint64_t in5;
217 uint64_t in6;
218 uint64_t in7;
219 struct sal_ret_values ret;
220};
221
222/*Guest change rr6*/
223struct kvm_switch_rr6 {
224 uint64_t old_rr;
225 uint64_t new_rr;
226};
227
228union ia64_ipi_a{
229 unsigned long val;
230 struct {
231 unsigned long rv : 3;
232 unsigned long ir : 1;
233 unsigned long eid : 8;
234 unsigned long id : 8;
235 unsigned long ib_base : 44;
236 };
237};
238
239union ia64_ipi_d {
240 unsigned long val;
241 struct {
242 unsigned long vector : 8;
243 unsigned long dm : 3;
244 unsigned long ig : 53;
245 };
246};
247
248/*ipi check exit data*/
249struct kvm_ipi_data{
250 union ia64_ipi_a addr;
251 union ia64_ipi_d data;
252};
253
254/*global purge data*/
255struct kvm_ptc_g {
256 unsigned long vaddr;
257 unsigned long rr;
258 unsigned long ps;
259 struct kvm_vcpu *vcpu;
260};
261
262/*Exit control data */
263struct exit_ctl_data{
264 uint32_t exit_reason;
265 uint32_t vm_status;
266 union {
267 struct kvm_mmio_req ioreq;
268 struct kvm_pal_call pal_data;
269 struct kvm_sal_call sal_data;
270 struct kvm_switch_rr6 rr_data;
271 struct kvm_ipi_data ipi_data;
272 struct kvm_ptc_g ptc_g_data;
273 } u;
274};
275
276union pte_flags {
277 unsigned long val;
278 struct {
279 unsigned long p : 1; /*0 */
280 unsigned long : 1; /* 1 */
281 unsigned long ma : 3; /* 2-4 */
282 unsigned long a : 1; /* 5 */
283 unsigned long d : 1; /* 6 */
284 unsigned long pl : 2; /* 7-8 */
285 unsigned long ar : 3; /* 9-11 */
286 unsigned long ppn : 38; /* 12-49 */
287 unsigned long : 2; /* 50-51 */
288 unsigned long ed : 1; /* 52 */
289 };
290};
291
292union ia64_pta {
293 unsigned long val;
294 struct {
295 unsigned long ve : 1;
296 unsigned long reserved0 : 1;
297 unsigned long size : 6;
298 unsigned long vf : 1;
299 unsigned long reserved1 : 6;
300 unsigned long base : 49;
301 };
302};
303
304struct thash_cb {
305 /* THASH base information */
306 struct thash_data *hash; /* hash table pointer */
307 union ia64_pta pta;
308 int num;
309};
310
311struct kvm_vcpu_stat {
312};
313
314struct kvm_vcpu_arch {
315 int launched;
316 int last_exit;
317 int last_run_cpu;
318 int vmm_tr_slot;
319 int vm_tr_slot;
320
321#define KVM_MP_STATE_RUNNABLE 0
322#define KVM_MP_STATE_UNINITIALIZED 1
323#define KVM_MP_STATE_INIT_RECEIVED 2
324#define KVM_MP_STATE_HALTED 3
325 int mp_state;
326
327#define MAX_PTC_G_NUM 3
328 int ptc_g_count;
329 struct kvm_ptc_g ptc_g_data[MAX_PTC_G_NUM];
330
331 /*halt timer to wake up sleepy vcpus*/
332 struct hrtimer hlt_timer;
333 long ht_active;
334
335 struct kvm_lapic *apic; /* kernel irqchip context */
336 struct vpd *vpd;
337
338 /* Exit data for vmm_transition*/
339 struct exit_ctl_data exit_data;
340
341 cpumask_t cache_coherent_map;
342
343 unsigned long vmm_rr;
344 unsigned long host_rr6;
345 unsigned long psbits[8];
346 unsigned long cr_iipa;
347 unsigned long cr_isr;
348 unsigned long vsa_base;
349 unsigned long dirty_log_lock_pa;
350 unsigned long __gp;
351 /* TR and TC. */
352 struct thash_data itrs[NITRS];
353 struct thash_data dtrs[NDTRS];
354 /* Bit is set if there is a tr/tc for the region. */
355 unsigned char itr_regions;
356 unsigned char dtr_regions;
357 unsigned char tc_regions;
358 /* purge all */
359 unsigned long ptce_base;
360 unsigned long ptce_count[2];
361 unsigned long ptce_stride[2];
362 /* itc/itm */
363 unsigned long last_itc;
364 long itc_offset;
365 unsigned long itc_check;
366 unsigned long timer_check;
367 unsigned long timer_pending;
368
369 unsigned long vrr[8];
370 unsigned long ibr[8];
371 unsigned long dbr[8];
372 unsigned long insvc[4]; /* Interrupt in service. */
373 unsigned long xtp;
374
375 unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */
376 unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */
377 unsigned long metaphysical_saved_rr0; /* from kvm_arch */
378 unsigned long metaphysical_saved_rr4; /* from kvm_arch */
379 unsigned long fp_psr; /*used for lazy float register */
380 unsigned long saved_gp;
381 /*for phycial emulation */
382 int mode_flags;
383 struct thash_cb vtlb;
384 struct thash_cb vhpt;
385 char irq_check;
386 char irq_new_pending;
387
388 unsigned long opcode;
389 unsigned long cause;
390 union context host;
391 union context guest;
392};
393
394struct kvm_vm_stat {
395 u64 remote_tlb_flush;
396};
397
398struct kvm_sal_data {
399 unsigned long boot_ip;
400 unsigned long boot_gp;
401};
402
403struct kvm_arch {
404 unsigned long vm_base;
405 unsigned long metaphysical_rr0;
406 unsigned long metaphysical_rr4;
407 unsigned long vmm_init_rr;
408 unsigned long vhpt_base;
409 unsigned long vtlb_base;
410 unsigned long vpd_base;
411 spinlock_t dirty_log_lock;
412 struct kvm_ioapic *vioapic;
413 struct kvm_vm_stat stat;
414 struct kvm_sal_data rdv_sal_data;
415};
416
417union cpuid3_t {
418 u64 value;
419 struct {
420 u64 number : 8;
421 u64 revision : 8;
422 u64 model : 8;
423 u64 family : 8;
424 u64 archrev : 8;
425 u64 rv : 24;
426 };
427};
428
429struct kvm_pt_regs {
430 /* The following registers are saved by SAVE_MIN: */
431 unsigned long b6; /* scratch */
432 unsigned long b7; /* scratch */
433
434 unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
435 unsigned long ar_ssd; /* reserved for future use (scratch) */
436
437 unsigned long r8; /* scratch (return value register 0) */
438 unsigned long r9; /* scratch (return value register 1) */
439 unsigned long r10; /* scratch (return value register 2) */
440 unsigned long r11; /* scratch (return value register 3) */
441
442 unsigned long cr_ipsr; /* interrupted task's psr */
443 unsigned long cr_iip; /* interrupted task's instruction pointer */
444 unsigned long cr_ifs; /* interrupted task's function state */
445
446 unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
447 unsigned long ar_pfs; /* prev function state */
448 unsigned long ar_rsc; /* RSE configuration */
449 /* The following two are valid only if cr_ipsr.cpl > 0: */
450 unsigned long ar_rnat; /* RSE NaT */
451 unsigned long ar_bspstore; /* RSE bspstore */
452
453 unsigned long pr; /* 64 predicate registers (1 bit each) */
454 unsigned long b0; /* return pointer (bp) */
455 unsigned long loadrs; /* size of dirty partition << 16 */
456
457 unsigned long r1; /* the gp pointer */
458 unsigned long r12; /* interrupted task's memory stack pointer */
459 unsigned long r13; /* thread pointer */
460
461 unsigned long ar_fpsr; /* floating point status (preserved) */
462 unsigned long r15; /* scratch */
463
464 /* The remaining registers are NOT saved for system calls. */
465 unsigned long r14; /* scratch */
466 unsigned long r2; /* scratch */
467 unsigned long r3; /* scratch */
468 unsigned long r16; /* scratch */
469 unsigned long r17; /* scratch */
470 unsigned long r18; /* scratch */
471 unsigned long r19; /* scratch */
472 unsigned long r20; /* scratch */
473 unsigned long r21; /* scratch */
474 unsigned long r22; /* scratch */
475 unsigned long r23; /* scratch */
476 unsigned long r24; /* scratch */
477 unsigned long r25; /* scratch */
478 unsigned long r26; /* scratch */
479 unsigned long r27; /* scratch */
480 unsigned long r28; /* scratch */
481 unsigned long r29; /* scratch */
482 unsigned long r30; /* scratch */
483 unsigned long r31; /* scratch */
484 unsigned long ar_ccv; /* compare/exchange value (scratch) */
485
486 /*
487 * Floating point registers that the kernel considers scratch:
488 */
489 struct ia64_fpreg f6; /* scratch */
490 struct ia64_fpreg f7; /* scratch */
491 struct ia64_fpreg f8; /* scratch */
492 struct ia64_fpreg f9; /* scratch */
493 struct ia64_fpreg f10; /* scratch */
494 struct ia64_fpreg f11; /* scratch */
495
496 unsigned long r4; /* preserved */
497 unsigned long r5; /* preserved */
498 unsigned long r6; /* preserved */
499 unsigned long r7; /* preserved */
500 unsigned long eml_unat; /* used for emulating instruction */
501 unsigned long pad0; /* alignment pad */
502};
503
504static inline struct kvm_pt_regs *vcpu_regs(struct kvm_vcpu *v)
505{
506 return (struct kvm_pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
507}
508
509typedef int kvm_vmm_entry(void);
510typedef void kvm_tramp_entry(union context *host, union context *guest);
511
512struct kvm_vmm_info{
513 struct module *module;
514 kvm_vmm_entry *vmm_entry;
515 kvm_tramp_entry *tramp_entry;
516 unsigned long vmm_ivt;
517};
518
519int kvm_highest_pending_irq(struct kvm_vcpu *vcpu);
520int kvm_emulate_halt(struct kvm_vcpu *vcpu);
521int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
522void kvm_sal_emul(struct kvm_vcpu *vcpu);
523
524#endif
diff --git a/include/asm-ia64/kvm_para.h b/include/asm-ia64/kvm_para.h
new file mode 100644
index 000000000000..9f9796bb3441
--- /dev/null
+++ b/include/asm-ia64/kvm_para.h
@@ -0,0 +1,29 @@
1#ifndef __IA64_KVM_PARA_H
2#define __IA64_KVM_PARA_H
3
4/*
5 * asm-ia64/kvm_para.h
6 *
7 * Copyright (C) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
20 * Place - Suite 330, Boston, MA 02111-1307 USA.
21 *
22 */
23
24static inline unsigned int kvm_arch_para_features(void)
25{
26 return 0;
27}
28
29#endif
diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h
index 4999a6c63775..36f39321b768 100644
--- a/include/asm-ia64/page.h
+++ b/include/asm-ia64/page.h
@@ -54,9 +54,6 @@
54# define HPAGE_MASK (~(HPAGE_SIZE - 1)) 54# define HPAGE_MASK (~(HPAGE_SIZE - 1))
55 55
56# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 56# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
57# define ARCH_HAS_HUGEPAGE_ONLY_RANGE
58# define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
59# define ARCH_HAS_HUGETLB_FREE_PGD_RANGE
60#endif /* CONFIG_HUGETLB_PAGE */ 57#endif /* CONFIG_HUGETLB_PAGE */
61 58
62#ifdef __ASSEMBLY__ 59#ifdef __ASSEMBLY__
@@ -153,9 +150,6 @@ typedef union ia64_va {
153# define htlbpage_to_page(x) (((unsigned long) REGION_NUMBER(x) << 61) \ 150# define htlbpage_to_page(x) (((unsigned long) REGION_NUMBER(x) << 61) \
154 | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT))) 151 | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT)))
155# define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 152# define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
156# define is_hugepage_only_range(mm, addr, len) \
157 (REGION_NUMBER(addr) == RGN_HPAGE || \
158 REGION_NUMBER((addr)+(len)-1) == RGN_HPAGE)
159extern unsigned int hpage_shift; 153extern unsigned int hpage_shift;
160#endif 154#endif
161 155
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
index ed70862ea247..7a9bff47564f 100644
--- a/include/asm-ia64/pgtable.h
+++ b/include/asm-ia64/pgtable.h
@@ -302,6 +302,8 @@ ia64_phys_addr_valid (unsigned long addr)
302#define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0) 302#define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0)
303#define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0) 303#define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0)
304#define pte_file(pte) ((pte_val(pte) & _PAGE_FILE) != 0) 304#define pte_file(pte) ((pte_val(pte) & _PAGE_FILE) != 0)
305#define pte_special(pte) 0
306
305/* 307/*
306 * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the 308 * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the
307 * access rights: 309 * access rights:
@@ -313,6 +315,7 @@ ia64_phys_addr_valid (unsigned long addr)
313#define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D)) 315#define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D))
314#define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D)) 316#define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D))
315#define pte_mkhuge(pte) (__pte(pte_val(pte))) 317#define pte_mkhuge(pte) (__pte(pte_val(pte)))
318#define pte_mkspecial(pte) (pte)
316 319
317/* 320/*
318 * Because ia64's Icache and Dcache is not coherent (on a cpu), we need to 321 * Because ia64's Icache and Dcache is not coherent (on a cpu), we need to
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h
index 741f7ecb986a..6aff126fc07e 100644
--- a/include/asm-ia64/processor.h
+++ b/include/asm-ia64/processor.h
@@ -119,6 +119,69 @@ struct ia64_psr {
119 __u64 reserved4 : 19; 119 __u64 reserved4 : 19;
120}; 120};
121 121
122union ia64_isr {
123 __u64 val;
124 struct {
125 __u64 code : 16;
126 __u64 vector : 8;
127 __u64 reserved1 : 8;
128 __u64 x : 1;
129 __u64 w : 1;
130 __u64 r : 1;
131 __u64 na : 1;
132 __u64 sp : 1;
133 __u64 rs : 1;
134 __u64 ir : 1;
135 __u64 ni : 1;
136 __u64 so : 1;
137 __u64 ei : 2;
138 __u64 ed : 1;
139 __u64 reserved2 : 20;
140 };
141};
142
143union ia64_lid {
144 __u64 val;
145 struct {
146 __u64 rv : 16;
147 __u64 eid : 8;
148 __u64 id : 8;
149 __u64 ig : 32;
150 };
151};
152
153union ia64_tpr {
154 __u64 val;
155 struct {
156 __u64 ig0 : 4;
157 __u64 mic : 4;
158 __u64 rsv : 8;
159 __u64 mmi : 1;
160 __u64 ig1 : 47;
161 };
162};
163
164union ia64_itir {
165 __u64 val;
166 struct {
167 __u64 rv3 : 2; /* 0-1 */
168 __u64 ps : 6; /* 2-7 */
169 __u64 key : 24; /* 8-31 */
170 __u64 rv4 : 32; /* 32-63 */
171 };
172};
173
174union ia64_rr {
175 __u64 val;
176 struct {
177 __u64 ve : 1; /* enable hw walker */
178 __u64 reserved0: 1; /* reserved */
179 __u64 ps : 6; /* log page size */
180 __u64 rid : 24; /* region id */
181 __u64 reserved1: 32; /* reserved */
182 };
183};
184
122/* 185/*
123 * CPU type, hardware bug flags, and per-CPU state. Frequently used 186 * CPU type, hardware bug flags, and per-CPU state. Frequently used
124 * state comes earlier: 187 * state comes earlier:
diff --git a/include/asm-m32r/pgtable.h b/include/asm-m32r/pgtable.h
index 86505387be08..e6359c566b50 100644
--- a/include/asm-m32r/pgtable.h
+++ b/include/asm-m32r/pgtable.h
@@ -214,6 +214,11 @@ static inline int pte_file(pte_t pte)
214 return pte_val(pte) & _PAGE_FILE; 214 return pte_val(pte) & _PAGE_FILE;
215} 215}
216 216
217static inline int pte_special(pte_t pte)
218{
219 return 0;
220}
221
217static inline pte_t pte_mkclean(pte_t pte) 222static inline pte_t pte_mkclean(pte_t pte)
218{ 223{
219 pte_val(pte) &= ~_PAGE_DIRTY; 224 pte_val(pte) &= ~_PAGE_DIRTY;
@@ -250,6 +255,11 @@ static inline pte_t pte_mkwrite(pte_t pte)
250 return pte; 255 return pte;
251} 256}
252 257
258static inline pte_t pte_mkspecial(pte_t pte)
259{
260 return pte;
261}
262
253static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) 263static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
254{ 264{
255 return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep); 265 return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep);
diff --git a/include/asm-m68k/motorola_pgtable.h b/include/asm-m68k/motorola_pgtable.h
index 13135d4821d8..8e9a8a754dde 100644
--- a/include/asm-m68k/motorola_pgtable.h
+++ b/include/asm-m68k/motorola_pgtable.h
@@ -168,6 +168,7 @@ static inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_RONLY);
168static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 168static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
169static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 169static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
170static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } 170static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
171static inline int pte_special(pte_t pte) { return 0; }
171 172
172static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_RONLY; return pte; } 173static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_RONLY; return pte; }
173static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } 174static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
@@ -185,6 +186,7 @@ static inline pte_t pte_mkcache(pte_t pte)
185 pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | m68k_supervisor_cachemode; 186 pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | m68k_supervisor_cachemode;
186 return pte; 187 return pte;
187} 188}
189static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
188 190
189#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address)) 191#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
190 192
diff --git a/include/asm-m68k/sun3_pgtable.h b/include/asm-m68k/sun3_pgtable.h
index b766fc261bde..f847ec732d62 100644
--- a/include/asm-m68k/sun3_pgtable.h
+++ b/include/asm-m68k/sun3_pgtable.h
@@ -169,6 +169,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & SUN3_PAGE_WRITEA
169static inline int pte_dirty(pte_t pte) { return pte_val(pte) & SUN3_PAGE_MODIFIED; } 169static inline int pte_dirty(pte_t pte) { return pte_val(pte) & SUN3_PAGE_MODIFIED; }
170static inline int pte_young(pte_t pte) { return pte_val(pte) & SUN3_PAGE_ACCESSED; } 170static inline int pte_young(pte_t pte) { return pte_val(pte) & SUN3_PAGE_ACCESSED; }
171static inline int pte_file(pte_t pte) { return pte_val(pte) & SUN3_PAGE_ACCESSED; } 171static inline int pte_file(pte_t pte) { return pte_val(pte) & SUN3_PAGE_ACCESSED; }
172static inline int pte_special(pte_t pte) { return 0; }
172 173
173static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~SUN3_PAGE_WRITEABLE; return pte; } 174static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~SUN3_PAGE_WRITEABLE; return pte; }
174static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~SUN3_PAGE_MODIFIED; return pte; } 175static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~SUN3_PAGE_MODIFIED; return pte; }
@@ -181,6 +182,7 @@ static inline pte_t pte_mknocache(pte_t pte) { pte_val(pte) |= SUN3_PAGE_NOCACHE
181//static inline pte_t pte_mkcache(pte_t pte) { pte_val(pte) &= SUN3_PAGE_NOCACHE; return pte; } 182//static inline pte_t pte_mkcache(pte_t pte) { pte_val(pte) &= SUN3_PAGE_NOCACHE; return pte; }
182// until then, use: 183// until then, use:
183static inline pte_t pte_mkcache(pte_t pte) { return pte; } 184static inline pte_t pte_mkcache(pte_t pte) { return pte; }
185static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
184 186
185extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 187extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
186extern pgd_t kernel_pg_dir[PTRS_PER_PGD]; 188extern pgd_t kernel_pg_dir[PTRS_PER_PGD];
diff --git a/include/asm-mips/pgtable.h b/include/asm-mips/pgtable.h
index 17a7703a2969..782221e57c0a 100644
--- a/include/asm-mips/pgtable.h
+++ b/include/asm-mips/pgtable.h
@@ -285,6 +285,8 @@ static inline pte_t pte_mkyoung(pte_t pte)
285 return pte; 285 return pte;
286} 286}
287#endif 287#endif
288static inline int pte_special(pte_t pte) { return 0; }
289static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
288 290
289/* 291/*
290 * Macro to make mark a page protection value as "uncacheable". Note 292 * Macro to make mark a page protection value as "uncacheable". Note
diff --git a/include/asm-mips/vr41xx/siu.h b/include/asm-mips/vr41xx/siu.h
index 98cdb4096485..da9f6e373409 100644
--- a/include/asm-mips/vr41xx/siu.h
+++ b/include/asm-mips/vr41xx/siu.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Include file for NEC VR4100 series Serial Interface Unit. 2 * Include file for NEC VR4100 series Serial Interface Unit.
3 * 3 *
4 * Copyright (C) 2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> 4 * Copyright (C) 2005-2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -49,4 +49,10 @@ typedef enum {
49 49
50extern void vr41xx_select_irda_module(irda_module_t module, irda_speed_t speed); 50extern void vr41xx_select_irda_module(irda_module_t module, irda_speed_t speed);
51 51
52#ifdef CONFIG_SERIAL_VR41XX_CONSOLE
53extern void vr41xx_siu_early_setup(struct uart_port *port);
54#else
55static inline void vr41xx_siu_early_setup(struct uart_port *port) {}
56#endif
57
52#endif /* __NEC_VR41XX_SIU_H */ 58#endif /* __NEC_VR41XX_SIU_H */
diff --git a/include/asm-mips/vr41xx/vr41xx.h b/include/asm-mips/vr41xx/vr41xx.h
index 88b492f6ea9c..22be64971cc6 100644
--- a/include/asm-mips/vr41xx/vr41xx.h
+++ b/include/asm-mips/vr41xx/vr41xx.h
@@ -7,7 +7,7 @@
7 * Copyright (C) 2001, 2002 Paul Mundt 7 * Copyright (C) 2001, 2002 Paul Mundt
8 * Copyright (C) 2002 MontaVista Software, Inc. 8 * Copyright (C) 2002 MontaVista Software, Inc.
9 * Copyright (C) 2002 TimeSys Corp. 9 * Copyright (C) 2002 TimeSys Corp.
10 * Copyright (C) 2003-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> 10 * Copyright (C) 2003-2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify it 12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the 13 * under the terms of the GNU General Public License as published by the
@@ -143,4 +143,10 @@ extern void vr41xx_disable_csiint(uint16_t mask);
143extern void vr41xx_enable_bcuint(void); 143extern void vr41xx_enable_bcuint(void);
144extern void vr41xx_disable_bcuint(void); 144extern void vr41xx_disable_bcuint(void);
145 145
146#ifdef CONFIG_SERIAL_VR41XX_CONSOLE
147extern void vr41xx_siu_setup(void);
148#else
149static inline void vr41xx_siu_setup(void) {}
150#endif
151
146#endif /* __NEC_VR41XX_H */ 152#endif /* __NEC_VR41XX_H */
diff --git a/include/asm-mn10300/pgtable.h b/include/asm-mn10300/pgtable.h
index 375c4941deda..6dc30fc827c4 100644
--- a/include/asm-mn10300/pgtable.h
+++ b/include/asm-mn10300/pgtable.h
@@ -224,6 +224,7 @@ static inline int pte_read(pte_t pte) { return pte_val(pte) & __PAGE_PROT_USER;
224static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 224static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
225static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 225static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
226static inline int pte_write(pte_t pte) { return pte_val(pte) & __PAGE_PROT_WRITE; } 226static inline int pte_write(pte_t pte) { return pte_val(pte) & __PAGE_PROT_WRITE; }
227static inline int pte_special(pte_t pte){ return 0; }
227 228
228/* 229/*
229 * The following only works if pte_present() is not true. 230 * The following only works if pte_present() is not true.
@@ -265,6 +266,8 @@ static inline pte_t pte_mkwrite(pte_t pte)
265 return pte; 266 return pte;
266} 267}
267 268
269static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
270
268#define pte_ERROR(e) \ 271#define pte_ERROR(e) \
269 printk(KERN_ERR "%s:%d: bad pte %08lx.\n", \ 272 printk(KERN_ERR "%s:%d: bad pte %08lx.\n", \
270 __FILE__, __LINE__, pte_val(e)) 273 __FILE__, __LINE__, pte_val(e))
diff --git a/include/asm-parisc/pgtable.h b/include/asm-parisc/pgtable.h
index dc86adbec916..470a4b88124d 100644
--- a/include/asm-parisc/pgtable.h
+++ b/include/asm-parisc/pgtable.h
@@ -323,6 +323,7 @@ static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
323static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 323static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
324static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } 324static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
325static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } 325static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
326static inline int pte_special(pte_t pte) { return 0; }
326 327
327static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } 328static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
328static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } 329static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
@@ -330,6 +331,7 @@ static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_WRITE; ret
330static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; } 331static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
331static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } 332static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
332static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; } 333static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; }
334static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
333 335
334/* 336/*
335 * Conversion functions: convert a page and protection to a page entry, 337 * Conversion functions: convert a page and protection to a page entry,
diff --git a/include/asm-powerpc/hugetlb.h b/include/asm-powerpc/hugetlb.h
new file mode 100644
index 000000000000..649c6c3b87b3
--- /dev/null
+++ b/include/asm-powerpc/hugetlb.h
@@ -0,0 +1,79 @@
1#ifndef _ASM_POWERPC_HUGETLB_H
2#define _ASM_POWERPC_HUGETLB_H
3
4#include <asm/page.h>
5
6
7int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
8 unsigned long len);
9
10void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
11 unsigned long end, unsigned long floor,
12 unsigned long ceiling);
13
14void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
15 pte_t *ptep, pte_t pte);
16
17pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
18 pte_t *ptep);
19
20/*
21 * If the arch doesn't supply something else, assume that hugepage
22 * size aligned regions are ok without further preparation.
23 */
24static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
25{
26 if (len & ~HPAGE_MASK)
27 return -EINVAL;
28 if (addr & ~HPAGE_MASK)
29 return -EINVAL;
30 return 0;
31}
32
33static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
34{
35}
36
37static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
38 unsigned long addr, pte_t *ptep)
39{
40}
41
42static inline int huge_pte_none(pte_t pte)
43{
44 return pte_none(pte);
45}
46
47static inline pte_t huge_pte_wrprotect(pte_t pte)
48{
49 return pte_wrprotect(pte);
50}
51
52static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
53 unsigned long addr, pte_t *ptep)
54{
55 ptep_set_wrprotect(mm, addr, ptep);
56}
57
58static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
59 unsigned long addr, pte_t *ptep,
60 pte_t pte, int dirty)
61{
62 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
63}
64
65static inline pte_t huge_ptep_get(pte_t *ptep)
66{
67 return *ptep;
68}
69
70static inline int arch_prepare_hugepage(struct page *page)
71{
72 return 0;
73}
74
75static inline void arch_release_hugepage(struct page *page)
76{
77}
78
79#endif /* _ASM_POWERPC_HUGETLB_H */
diff --git a/include/asm-powerpc/kvm.h b/include/asm-powerpc/kvm.h
index d1b530fbf8dd..f993e4198d5c 100644
--- a/include/asm-powerpc/kvm.h
+++ b/include/asm-powerpc/kvm.h
@@ -1,6 +1,55 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
1#ifndef __LINUX_KVM_POWERPC_H 20#ifndef __LINUX_KVM_POWERPC_H
2#define __LINUX_KVM_POWERPC_H 21#define __LINUX_KVM_POWERPC_H
3 22
4/* powerpc does not support KVM */ 23#include <asm/types.h>
24
25struct kvm_regs {
26 __u64 pc;
27 __u64 cr;
28 __u64 ctr;
29 __u64 lr;
30 __u64 xer;
31 __u64 msr;
32 __u64 srr0;
33 __u64 srr1;
34 __u64 pid;
35
36 __u64 sprg0;
37 __u64 sprg1;
38 __u64 sprg2;
39 __u64 sprg3;
40 __u64 sprg4;
41 __u64 sprg5;
42 __u64 sprg6;
43 __u64 sprg7;
44
45 __u64 gpr[32];
46};
47
48struct kvm_sregs {
49};
50
51struct kvm_fpu {
52 __u64 fpr[32];
53};
5 54
6#endif 55#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/include/asm-powerpc/kvm_asm.h b/include/asm-powerpc/kvm_asm.h
new file mode 100644
index 000000000000..2197764796d9
--- /dev/null
+++ b/include/asm-powerpc/kvm_asm.h
@@ -0,0 +1,55 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#ifndef __POWERPC_KVM_ASM_H__
21#define __POWERPC_KVM_ASM_H__
22
23/* IVPR must be 64KiB-aligned. */
24#define VCPU_SIZE_ORDER 4
25#define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12)
26#define VCPU_TLB_PGSZ PPC44x_TLB_64K
27#define VCPU_SIZE_BYTES (1<<VCPU_SIZE_LOG)
28
29#define BOOKE_INTERRUPT_CRITICAL 0
30#define BOOKE_INTERRUPT_MACHINE_CHECK 1
31#define BOOKE_INTERRUPT_DATA_STORAGE 2
32#define BOOKE_INTERRUPT_INST_STORAGE 3
33#define BOOKE_INTERRUPT_EXTERNAL 4
34#define BOOKE_INTERRUPT_ALIGNMENT 5
35#define BOOKE_INTERRUPT_PROGRAM 6
36#define BOOKE_INTERRUPT_FP_UNAVAIL 7
37#define BOOKE_INTERRUPT_SYSCALL 8
38#define BOOKE_INTERRUPT_AP_UNAVAIL 9
39#define BOOKE_INTERRUPT_DECREMENTER 10
40#define BOOKE_INTERRUPT_FIT 11
41#define BOOKE_INTERRUPT_WATCHDOG 12
42#define BOOKE_INTERRUPT_DTLB_MISS 13
43#define BOOKE_INTERRUPT_ITLB_MISS 14
44#define BOOKE_INTERRUPT_DEBUG 15
45#define BOOKE_MAX_INTERRUPT 15
46
47#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
48#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
49
50#define RESUME_GUEST 0
51#define RESUME_GUEST_NV RESUME_FLAG_NV
52#define RESUME_HOST RESUME_FLAG_HOST
53#define RESUME_HOST_NV (RESUME_FLAG_HOST|RESUME_FLAG_NV)
54
55#endif /* __POWERPC_KVM_ASM_H__ */
diff --git a/include/asm-powerpc/kvm_host.h b/include/asm-powerpc/kvm_host.h
new file mode 100644
index 000000000000..04ffbb8e0a35
--- /dev/null
+++ b/include/asm-powerpc/kvm_host.h
@@ -0,0 +1,152 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#ifndef __POWERPC_KVM_HOST_H__
21#define __POWERPC_KVM_HOST_H__
22
23#include <linux/mutex.h>
24#include <linux/timer.h>
25#include <linux/types.h>
26#include <linux/kvm_types.h>
27#include <asm/kvm_asm.h>
28
29#define KVM_MAX_VCPUS 1
30#define KVM_MEMORY_SLOTS 32
31/* memory slots that does not exposed to userspace */
32#define KVM_PRIVATE_MEM_SLOTS 4
33
34/* We don't currently support large pages. */
35#define KVM_PAGES_PER_HPAGE (1<<31)
36
37struct kvm;
38struct kvm_run;
39struct kvm_vcpu;
40
41struct kvm_vm_stat {
42 u32 remote_tlb_flush;
43};
44
45struct kvm_vcpu_stat {
46 u32 sum_exits;
47 u32 mmio_exits;
48 u32 dcr_exits;
49 u32 signal_exits;
50 u32 light_exits;
51 /* Account for special types of light exits: */
52 u32 itlb_real_miss_exits;
53 u32 itlb_virt_miss_exits;
54 u32 dtlb_real_miss_exits;
55 u32 dtlb_virt_miss_exits;
56 u32 syscall_exits;
57 u32 isi_exits;
58 u32 dsi_exits;
59 u32 emulated_inst_exits;
60 u32 dec_exits;
61 u32 ext_intr_exits;
62};
63
64struct tlbe {
65 u32 tid; /* Only the low 8 bits are used. */
66 u32 word0;
67 u32 word1;
68 u32 word2;
69};
70
71struct kvm_arch {
72};
73
74struct kvm_vcpu_arch {
75 /* Unmodified copy of the guest's TLB. */
76 struct tlbe guest_tlb[PPC44x_TLB_SIZE];
77 /* TLB that's actually used when the guest is running. */
78 struct tlbe shadow_tlb[PPC44x_TLB_SIZE];
79 /* Pages which are referenced in the shadow TLB. */
80 struct page *shadow_pages[PPC44x_TLB_SIZE];
81 /* Copy of the host's TLB. */
82 struct tlbe host_tlb[PPC44x_TLB_SIZE];
83
84 u32 host_stack;
85 u32 host_pid;
86
87 u64 fpr[32];
88 u32 gpr[32];
89
90 u32 pc;
91 u32 cr;
92 u32 ctr;
93 u32 lr;
94 u32 xer;
95
96 u32 msr;
97 u32 mmucr;
98 u32 sprg0;
99 u32 sprg1;
100 u32 sprg2;
101 u32 sprg3;
102 u32 sprg4;
103 u32 sprg5;
104 u32 sprg6;
105 u32 sprg7;
106 u32 srr0;
107 u32 srr1;
108 u32 csrr0;
109 u32 csrr1;
110 u32 dsrr0;
111 u32 dsrr1;
112 u32 dear;
113 u32 esr;
114 u32 dec;
115 u32 decar;
116 u32 tbl;
117 u32 tbu;
118 u32 tcr;
119 u32 tsr;
120 u32 ivor[16];
121 u32 ivpr;
122 u32 pir;
123 u32 pid;
124 u32 pvr;
125 u32 ccr0;
126 u32 ccr1;
127 u32 dbcr0;
128 u32 dbcr1;
129
130 u32 last_inst;
131 u32 fault_dear;
132 u32 fault_esr;
133 gpa_t paddr_accessed;
134
135 u8 io_gpr; /* GPR used as IO source/target */
136 u8 mmio_is_bigendian;
137 u8 dcr_needed;
138 u8 dcr_is_write;
139
140 u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
141
142 struct timer_list dec_timer;
143 unsigned long pending_exceptions;
144};
145
146struct kvm_guest_debug {
147 int enabled;
148 unsigned long bp[4];
149 int singlestep;
150};
151
152#endif /* __POWERPC_KVM_HOST_H__ */
diff --git a/include/asm-powerpc/kvm_para.h b/include/asm-powerpc/kvm_para.h
new file mode 100644
index 000000000000..2d48f6a63d0b
--- /dev/null
+++ b/include/asm-powerpc/kvm_para.h
@@ -0,0 +1,37 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#ifndef __POWERPC_KVM_PARA_H__
21#define __POWERPC_KVM_PARA_H__
22
23#ifdef __KERNEL__
24
25static inline int kvm_para_available(void)
26{
27 return 0;
28}
29
30static inline unsigned int kvm_arch_para_features(void)
31{
32 return 0;
33}
34
35#endif /* __KERNEL__ */
36
37#endif /* __POWERPC_KVM_PARA_H__ */
diff --git a/include/asm-powerpc/kvm_ppc.h b/include/asm-powerpc/kvm_ppc.h
new file mode 100644
index 000000000000..7ac820308a7e
--- /dev/null
+++ b/include/asm-powerpc/kvm_ppc.h
@@ -0,0 +1,88 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#ifndef __POWERPC_KVM_PPC_H__
21#define __POWERPC_KVM_PPC_H__
22
23/* This file exists just so we can dereference kvm_vcpu, avoiding nested header
24 * dependencies. */
25
26#include <linux/mutex.h>
27#include <linux/timer.h>
28#include <linux/types.h>
29#include <linux/kvm_types.h>
30#include <linux/kvm_host.h>
31
32struct kvm_tlb {
33 struct tlbe guest_tlb[PPC44x_TLB_SIZE];
34 struct tlbe shadow_tlb[PPC44x_TLB_SIZE];
35};
36
37enum emulation_result {
38 EMULATE_DONE, /* no further processing */
39 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
40 EMULATE_DO_DCR, /* kvm_run filled with DCR request */
41 EMULATE_FAIL, /* can't emulate this instruction */
42};
43
44extern const unsigned char exception_priority[];
45extern const unsigned char priority_exception[];
46
47extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
48extern char kvmppc_handlers_start[];
49extern unsigned long kvmppc_handler_len;
50
51extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
52extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
53 unsigned int rt, unsigned int bytes,
54 int is_bigendian);
55extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
56 u32 val, unsigned int bytes, int is_bigendian);
57
58extern int kvmppc_emulate_instruction(struct kvm_run *run,
59 struct kvm_vcpu *vcpu);
60
61extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn,
62 u64 asid, u32 flags);
63extern void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, u64 eaddr, u64 asid);
64extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
65
66extern void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu);
67
68static inline void kvmppc_queue_exception(struct kvm_vcpu *vcpu, int exception)
69{
70 unsigned int priority = exception_priority[exception];
71 set_bit(priority, &vcpu->arch.pending_exceptions);
72}
73
74static inline void kvmppc_clear_exception(struct kvm_vcpu *vcpu, int exception)
75{
76 unsigned int priority = exception_priority[exception];
77 clear_bit(priority, &vcpu->arch.pending_exceptions);
78}
79
80static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
81{
82 if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR))
83 kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
84
85 vcpu->arch.msr = new_msr;
86}
87
88#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/include/asm-powerpc/mmu-44x.h b/include/asm-powerpc/mmu-44x.h
index c8b02d97f753..a825524c981a 100644
--- a/include/asm-powerpc/mmu-44x.h
+++ b/include/asm-powerpc/mmu-44x.h
@@ -53,6 +53,8 @@
53 53
54#ifndef __ASSEMBLY__ 54#ifndef __ASSEMBLY__
55 55
56extern unsigned int tlb_44x_hwater;
57
56typedef struct { 58typedef struct {
57 unsigned long id; 59 unsigned long id;
58 unsigned long vdso_base; 60 unsigned long vdso_base;
diff --git a/include/asm-powerpc/page_64.h b/include/asm-powerpc/page_64.h
index 67834eae5702..25af4fc8daf4 100644
--- a/include/asm-powerpc/page_64.h
+++ b/include/asm-powerpc/page_64.h
@@ -128,11 +128,6 @@ extern void slice_init_context(struct mm_struct *mm, unsigned int psize);
128extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize); 128extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize);
129#define slice_mm_new_context(mm) ((mm)->context.id == 0) 129#define slice_mm_new_context(mm) ((mm)->context.id == 0)
130 130
131#define ARCH_HAS_HUGEPAGE_ONLY_RANGE
132extern int is_hugepage_only_range(struct mm_struct *m,
133 unsigned long addr,
134 unsigned long len);
135
136#endif /* __ASSEMBLY__ */ 131#endif /* __ASSEMBLY__ */
137#else 132#else
138#define slice_init() 133#define slice_init()
@@ -146,8 +141,6 @@ do { \
146 141
147#ifdef CONFIG_HUGETLB_PAGE 142#ifdef CONFIG_HUGETLB_PAGE
148 143
149#define ARCH_HAS_HUGETLB_FREE_PGD_RANGE
150#define ARCH_HAS_SETCLEAR_HUGE_PTE
151#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 144#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
152 145
153#endif /* !CONFIG_HUGETLB_PAGE */ 146#endif /* !CONFIG_HUGETLB_PAGE */
diff --git a/include/asm-powerpc/pgtable-ppc32.h b/include/asm-powerpc/pgtable-ppc32.h
index daea7692d070..7c97b5a08d08 100644
--- a/include/asm-powerpc/pgtable-ppc32.h
+++ b/include/asm-powerpc/pgtable-ppc32.h
@@ -504,6 +504,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
504static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 504static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
505static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 505static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
506static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } 506static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
507static inline int pte_special(pte_t pte) { return 0; }
507 508
508static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } 509static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
509static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } 510static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
@@ -521,6 +522,8 @@ static inline pte_t pte_mkdirty(pte_t pte) {
521 pte_val(pte) |= _PAGE_DIRTY; return pte; } 522 pte_val(pte) |= _PAGE_DIRTY; return pte; }
522static inline pte_t pte_mkyoung(pte_t pte) { 523static inline pte_t pte_mkyoung(pte_t pte) {
523 pte_val(pte) |= _PAGE_ACCESSED; return pte; } 524 pte_val(pte) |= _PAGE_ACCESSED; return pte; }
525static inline pte_t pte_mkspecial(pte_t pte) {
526 return pte; }
524 527
525static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 528static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
526{ 529{
diff --git a/include/asm-powerpc/pgtable-ppc64.h b/include/asm-powerpc/pgtable-ppc64.h
index dd4c26dc57d2..27f18695f7d6 100644
--- a/include/asm-powerpc/pgtable-ppc64.h
+++ b/include/asm-powerpc/pgtable-ppc64.h
@@ -239,6 +239,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;}
239static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;} 239static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;}
240static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;} 240static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;}
241static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;} 241static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;}
242static inline int pte_special(pte_t pte) { return 0; }
242 243
243static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } 244static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
244static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } 245static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
@@ -257,6 +258,8 @@ static inline pte_t pte_mkyoung(pte_t pte) {
257 pte_val(pte) |= _PAGE_ACCESSED; return pte; } 258 pte_val(pte) |= _PAGE_ACCESSED; return pte; }
258static inline pte_t pte_mkhuge(pte_t pte) { 259static inline pte_t pte_mkhuge(pte_t pte) {
259 return pte; } 260 return pte; }
261static inline pte_t pte_mkspecial(pte_t pte) {
262 return pte; }
260 263
261/* Atomic PTE updates */ 264/* Atomic PTE updates */
262static inline unsigned long pte_update(struct mm_struct *mm, 265static inline unsigned long pte_update(struct mm_struct *mm,
diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h
index 70435d32129a..55f9d38e3bf8 100644
--- a/include/asm-ppc/pgtable.h
+++ b/include/asm-ppc/pgtable.h
@@ -483,6 +483,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
483static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 483static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
484static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 484static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
485static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } 485static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
486static inline int pte_special(pte_t pte) { return 0; }
486 487
487static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } 488static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
488static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } 489static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
@@ -500,6 +501,8 @@ static inline pte_t pte_mkdirty(pte_t pte) {
500 pte_val(pte) |= _PAGE_DIRTY; return pte; } 501 pte_val(pte) |= _PAGE_DIRTY; return pte; }
501static inline pte_t pte_mkyoung(pte_t pte) { 502static inline pte_t pte_mkyoung(pte_t pte) {
502 pte_val(pte) |= _PAGE_ACCESSED; return pte; } 503 pte_val(pte) |= _PAGE_ACCESSED; return pte; }
504static inline pte_t pte_mkspecial(pte_t pte) {
505 return pte; }
503 506
504static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 507static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
505{ 508{
diff --git a/include/asm-s390/Kbuild b/include/asm-s390/Kbuild
index e92b429d2be1..13c9805349f1 100644
--- a/include/asm-s390/Kbuild
+++ b/include/asm-s390/Kbuild
@@ -7,6 +7,7 @@ header-y += tape390.h
7header-y += ucontext.h 7header-y += ucontext.h
8header-y += vtoc.h 8header-y += vtoc.h
9header-y += zcrypt.h 9header-y += zcrypt.h
10header-y += kvm.h
10 11
11unifdef-y += cmb.h 12unifdef-y += cmb.h
12unifdef-y += debug.h 13unifdef-y += debug.h
diff --git a/include/asm-s390/kvm.h b/include/asm-s390/kvm.h
index 573f2a351386..d74002f95794 100644
--- a/include/asm-s390/kvm.h
+++ b/include/asm-s390/kvm.h
@@ -1,6 +1,45 @@
1#ifndef __LINUX_KVM_S390_H 1#ifndef __LINUX_KVM_S390_H
2#define __LINUX_KVM_S390_H 2#define __LINUX_KVM_S390_H
3 3
4/* s390 does not support KVM */ 4/*
5 * asm-s390/kvm.h - KVM s390 specific structures and definitions
6 *
7 * Copyright IBM Corp. 2008
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License (version 2 only)
11 * as published by the Free Software Foundation.
12 *
13 * Author(s): Carsten Otte <cotte@de.ibm.com>
14 * Christian Borntraeger <borntraeger@de.ibm.com>
15 */
16#include <asm/types.h>
17
18/* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */
19struct kvm_pic_state {
20 /* no PIC for s390 */
21};
22
23struct kvm_ioapic_state {
24 /* no IOAPIC for s390 */
25};
26
27/* for KVM_GET_REGS and KVM_SET_REGS */
28struct kvm_regs {
29 /* general purpose regs for s390 */
30 __u64 gprs[16];
31};
32
33/* for KVM_GET_SREGS and KVM_SET_SREGS */
34struct kvm_sregs {
35 __u32 acrs[16];
36 __u64 crs[16];
37};
38
39/* for KVM_GET_FPU and KVM_SET_FPU */
40struct kvm_fpu {
41 __u32 fpc;
42 __u64 fprs[16];
43};
5 44
6#endif 45#endif
diff --git a/include/asm-s390/kvm_host.h b/include/asm-s390/kvm_host.h
new file mode 100644
index 000000000000..f8204a4f2e02
--- /dev/null
+++ b/include/asm-s390/kvm_host.h
@@ -0,0 +1,234 @@
1/*
2 * asm-s390/kvm_host.h - definition for kernel virtual machines on s390
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 */
12
13
14#ifndef ASM_KVM_HOST_H
15#define ASM_KVM_HOST_H
16#include <linux/kvm_host.h>
17#include <asm/debug.h>
18
19#define KVM_MAX_VCPUS 64
20#define KVM_MEMORY_SLOTS 32
21/* memory slots that does not exposed to userspace */
22#define KVM_PRIVATE_MEM_SLOTS 4
23
24struct kvm_guest_debug {
25};
26
27struct sca_entry {
28 atomic_t scn;
29 __u64 reserved;
30 __u64 sda;
31 __u64 reserved2[2];
32} __attribute__((packed));
33
34
35struct sca_block {
36 __u64 ipte_control;
37 __u64 reserved[5];
38 __u64 mcn;
39 __u64 reserved2;
40 struct sca_entry cpu[64];
41} __attribute__((packed));
42
43#define KVM_PAGES_PER_HPAGE 256
44
45#define CPUSTAT_HOST 0x80000000
46#define CPUSTAT_WAIT 0x10000000
47#define CPUSTAT_ECALL_PEND 0x08000000
48#define CPUSTAT_STOP_INT 0x04000000
49#define CPUSTAT_IO_INT 0x02000000
50#define CPUSTAT_EXT_INT 0x01000000
51#define CPUSTAT_RUNNING 0x00800000
52#define CPUSTAT_RETAINED 0x00400000
53#define CPUSTAT_TIMING_SUB 0x00020000
54#define CPUSTAT_SIE_SUB 0x00010000
55#define CPUSTAT_RRF 0x00008000
56#define CPUSTAT_SLSV 0x00004000
57#define CPUSTAT_SLSR 0x00002000
58#define CPUSTAT_ZARCH 0x00000800
59#define CPUSTAT_MCDS 0x00000100
60#define CPUSTAT_SM 0x00000080
61#define CPUSTAT_G 0x00000008
62#define CPUSTAT_J 0x00000002
63#define CPUSTAT_P 0x00000001
64
65struct sie_block {
66 atomic_t cpuflags; /* 0x0000 */
67 __u32 prefix; /* 0x0004 */
68 __u8 reserved8[32]; /* 0x0008 */
69 __u64 cputm; /* 0x0028 */
70 __u64 ckc; /* 0x0030 */
71 __u64 epoch; /* 0x0038 */
72 __u8 reserved40[4]; /* 0x0040 */
73#define LCTL_CR0 0x8000
74 __u16 lctl; /* 0x0044 */
75 __s16 icpua; /* 0x0046 */
76 __u32 ictl; /* 0x0048 */
77 __u32 eca; /* 0x004c */
78 __u8 icptcode; /* 0x0050 */
79 __u8 reserved51; /* 0x0051 */
80 __u16 ihcpu; /* 0x0052 */
81 __u8 reserved54[2]; /* 0x0054 */
82 __u16 ipa; /* 0x0056 */
83 __u32 ipb; /* 0x0058 */
84 __u32 scaoh; /* 0x005c */
85 __u8 reserved60; /* 0x0060 */
86 __u8 ecb; /* 0x0061 */
87 __u8 reserved62[2]; /* 0x0062 */
88 __u32 scaol; /* 0x0064 */
89 __u8 reserved68[4]; /* 0x0068 */
90 __u32 todpr; /* 0x006c */
91 __u8 reserved70[16]; /* 0x0070 */
92 __u64 gmsor; /* 0x0080 */
93 __u64 gmslm; /* 0x0088 */
94 psw_t gpsw; /* 0x0090 */
95 __u64 gg14; /* 0x00a0 */
96 __u64 gg15; /* 0x00a8 */
97 __u8 reservedb0[30]; /* 0x00b0 */
98 __u16 iprcc; /* 0x00ce */
99 __u8 reservedd0[48]; /* 0x00d0 */
100 __u64 gcr[16]; /* 0x0100 */
101 __u64 gbea; /* 0x0180 */
102 __u8 reserved188[120]; /* 0x0188 */
103} __attribute__((packed));
104
105struct kvm_vcpu_stat {
106 u32 exit_userspace;
107 u32 exit_external_request;
108 u32 exit_external_interrupt;
109 u32 exit_stop_request;
110 u32 exit_validity;
111 u32 exit_instruction;
112 u32 instruction_lctl;
113 u32 instruction_lctg;
114 u32 exit_program_interruption;
115 u32 exit_instr_and_program;
116 u32 deliver_emergency_signal;
117 u32 deliver_service_signal;
118 u32 deliver_virtio_interrupt;
119 u32 deliver_stop_signal;
120 u32 deliver_prefix_signal;
121 u32 deliver_restart_signal;
122 u32 deliver_program_int;
123 u32 exit_wait_state;
124 u32 instruction_stidp;
125 u32 instruction_spx;
126 u32 instruction_stpx;
127 u32 instruction_stap;
128 u32 instruction_storage_key;
129 u32 instruction_stsch;
130 u32 instruction_chsc;
131 u32 instruction_stsi;
132 u32 instruction_stfl;
133 u32 instruction_sigp_sense;
134 u32 instruction_sigp_emergency;
135 u32 instruction_sigp_stop;
136 u32 instruction_sigp_arch;
137 u32 instruction_sigp_prefix;
138 u32 instruction_sigp_restart;
139 u32 diagnose_44;
140};
141
142struct io_info {
143 __u16 subchannel_id; /* 0x0b8 */
144 __u16 subchannel_nr; /* 0x0ba */
145 __u32 io_int_parm; /* 0x0bc */
146 __u32 io_int_word; /* 0x0c0 */
147};
148
149struct ext_info {
150 __u32 ext_params;
151 __u64 ext_params2;
152};
153
154#define PGM_OPERATION 0x01
155#define PGM_PRIVILEGED_OPERATION 0x02
156#define PGM_EXECUTE 0x03
157#define PGM_PROTECTION 0x04
158#define PGM_ADDRESSING 0x05
159#define PGM_SPECIFICATION 0x06
160#define PGM_DATA 0x07
161
162struct pgm_info {
163 __u16 code;
164};
165
166struct prefix_info {
167 __u32 address;
168};
169
170struct interrupt_info {
171 struct list_head list;
172 u64 type;
173 union {
174 struct io_info io;
175 struct ext_info ext;
176 struct pgm_info pgm;
177 struct prefix_info prefix;
178 };
179};
180
181/* for local_interrupt.action_flags */
182#define ACTION_STORE_ON_STOP 1
183#define ACTION_STOP_ON_STOP 2
184
185struct local_interrupt {
186 spinlock_t lock;
187 struct list_head list;
188 atomic_t active;
189 struct float_interrupt *float_int;
190 int timer_due; /* event indicator for waitqueue below */
191 wait_queue_head_t wq;
192 atomic_t *cpuflags;
193 unsigned int action_bits;
194};
195
196struct float_interrupt {
197 spinlock_t lock;
198 struct list_head list;
199 atomic_t active;
200 int next_rr_cpu;
201 unsigned long idle_mask [(64 + sizeof(long) - 1) / sizeof(long)];
202 struct local_interrupt *local_int[64];
203};
204
205
206struct kvm_vcpu_arch {
207 struct sie_block *sie_block;
208 unsigned long guest_gprs[16];
209 s390_fp_regs host_fpregs;
210 unsigned int host_acrs[NUM_ACRS];
211 s390_fp_regs guest_fpregs;
212 unsigned int guest_acrs[NUM_ACRS];
213 struct local_interrupt local_int;
214 struct timer_list ckc_timer;
215 union {
216 cpuid_t cpu_id;
217 u64 stidp_data;
218 };
219};
220
221struct kvm_vm_stat {
222 u32 remote_tlb_flush;
223};
224
225struct kvm_arch{
226 unsigned long guest_origin;
227 unsigned long guest_memsize;
228 struct sca_block *sca;
229 debug_info_t *dbf;
230 struct float_interrupt float_int;
231};
232
233extern int sie64a(struct sie_block *, __u64 *);
234#endif
diff --git a/include/asm-s390/kvm_para.h b/include/asm-s390/kvm_para.h
new file mode 100644
index 000000000000..2c503796b619
--- /dev/null
+++ b/include/asm-s390/kvm_para.h
@@ -0,0 +1,150 @@
1/*
2 * asm-s390/kvm_para.h - definition for paravirtual devices on s390
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
11 */
12
13#ifndef __S390_KVM_PARA_H
14#define __S390_KVM_PARA_H
15
16/*
17 * Hypercalls for KVM on s390. The calling convention is similar to the
18 * s390 ABI, so we use R2-R6 for parameters 1-5. In addition we use R1
19 * as hypercall number and R7 as parameter 6. The return value is
20 * written to R2. We use the diagnose instruction as hypercall. To avoid
21 * conflicts with existing diagnoses for LPAR and z/VM, we do not use
22 * the instruction encoded number, but specify the number in R1 and
23 * use 0x500 as KVM hypercall
24 *
25 * Copyright IBM Corp. 2007,2008
26 * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
27 *
28 * This work is licensed under the terms of the GNU GPL, version 2.
29 */
30
31static inline long kvm_hypercall0(unsigned long nr)
32{
33 register unsigned long __nr asm("1") = nr;
34 register long __rc asm("2");
35
36 asm volatile ("diag 2,4,0x500\n"
37 : "=d" (__rc) : "d" (__nr): "memory", "cc");
38 return __rc;
39}
40
41static inline long kvm_hypercall1(unsigned long nr, unsigned long p1)
42{
43 register unsigned long __nr asm("1") = nr;
44 register unsigned long __p1 asm("2") = p1;
45 register long __rc asm("2");
46
47 asm volatile ("diag 2,4,0x500\n"
48 : "=d" (__rc) : "d" (__nr), "0" (__p1) : "memory", "cc");
49 return __rc;
50}
51
52static inline long kvm_hypercall2(unsigned long nr, unsigned long p1,
53 unsigned long p2)
54{
55 register unsigned long __nr asm("1") = nr;
56 register unsigned long __p1 asm("2") = p1;
57 register unsigned long __p2 asm("3") = p2;
58 register long __rc asm("2");
59
60 asm volatile ("diag 2,4,0x500\n"
61 : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2)
62 : "memory", "cc");
63 return __rc;
64}
65
66static inline long kvm_hypercall3(unsigned long nr, unsigned long p1,
67 unsigned long p2, unsigned long p3)
68{
69 register unsigned long __nr asm("1") = nr;
70 register unsigned long __p1 asm("2") = p1;
71 register unsigned long __p2 asm("3") = p2;
72 register unsigned long __p3 asm("4") = p3;
73 register long __rc asm("2");
74
75 asm volatile ("diag 2,4,0x500\n"
76 : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
77 "d" (__p3) : "memory", "cc");
78 return __rc;
79}
80
81
82static inline long kvm_hypercall4(unsigned long nr, unsigned long p1,
83 unsigned long p2, unsigned long p3,
84 unsigned long p4)
85{
86 register unsigned long __nr asm("1") = nr;
87 register unsigned long __p1 asm("2") = p1;
88 register unsigned long __p2 asm("3") = p2;
89 register unsigned long __p3 asm("4") = p3;
90 register unsigned long __p4 asm("5") = p4;
91 register long __rc asm("2");
92
93 asm volatile ("diag 2,4,0x500\n"
94 : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
95 "d" (__p3), "d" (__p4) : "memory", "cc");
96 return __rc;
97}
98
99static inline long kvm_hypercall5(unsigned long nr, unsigned long p1,
100 unsigned long p2, unsigned long p3,
101 unsigned long p4, unsigned long p5)
102{
103 register unsigned long __nr asm("1") = nr;
104 register unsigned long __p1 asm("2") = p1;
105 register unsigned long __p2 asm("3") = p2;
106 register unsigned long __p3 asm("4") = p3;
107 register unsigned long __p4 asm("5") = p4;
108 register unsigned long __p5 asm("6") = p5;
109 register long __rc asm("2");
110
111 asm volatile ("diag 2,4,0x500\n"
112 : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
113 "d" (__p3), "d" (__p4), "d" (__p5) : "memory", "cc");
114 return __rc;
115}
116
117static inline long kvm_hypercall6(unsigned long nr, unsigned long p1,
118 unsigned long p2, unsigned long p3,
119 unsigned long p4, unsigned long p5,
120 unsigned long p6)
121{
122 register unsigned long __nr asm("1") = nr;
123 register unsigned long __p1 asm("2") = p1;
124 register unsigned long __p2 asm("3") = p2;
125 register unsigned long __p3 asm("4") = p3;
126 register unsigned long __p4 asm("5") = p4;
127 register unsigned long __p5 asm("6") = p5;
128 register unsigned long __p6 asm("7") = p6;
129 register long __rc asm("2");
130
131 asm volatile ("diag 2,4,0x500\n"
132 : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
133 "d" (__p3), "d" (__p4), "d" (__p5), "d" (__p6)
134 : "memory", "cc");
135 return __rc;
136}
137
138/* kvm on s390 is always paravirtualization enabled */
139static inline int kvm_para_available(void)
140{
141 return 1;
142}
143
144/* No feature bits are currently assigned for kvm on s390 */
145static inline unsigned int kvm_arch_para_features(void)
146{
147 return 0;
148}
149
150#endif /* __S390_KVM_PARA_H */
diff --git a/include/asm-s390/kvm_virtio.h b/include/asm-s390/kvm_virtio.h
new file mode 100644
index 000000000000..5c871a990c29
--- /dev/null
+++ b/include/asm-s390/kvm_virtio.h
@@ -0,0 +1,53 @@
1/*
2 * kvm_virtio.h - definition for virtio for kvm on s390
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
11 */
12
13#ifndef __KVM_S390_VIRTIO_H
14#define __KVM_S390_VIRTIO_H
15
16#include <linux/types.h>
17
18struct kvm_device_desc {
19 /* The device type: console, network, disk etc. Type 0 terminates. */
20 __u8 type;
21 /* The number of virtqueues (first in config array) */
22 __u8 num_vq;
23 /*
24 * The number of bytes of feature bits. Multiply by 2: one for host
25 * features and one for guest acknowledgements.
26 */
27 __u8 feature_len;
28 /* The number of bytes of the config array after virtqueues. */
29 __u8 config_len;
30 /* A status byte, written by the Guest. */
31 __u8 status;
32 __u8 config[0];
33};
34
35/*
36 * This is how we expect the device configuration field for a virtqueue
37 * to be laid out in config space.
38 */
39struct kvm_vqconfig {
40 /* The token returned with an interrupt. Set by the guest */
41 __u64 token;
42 /* The address of the virtio ring */
43 __u64 address;
44 /* The number of entries in the virtio_ring */
45 __u16 num;
46
47};
48
49#define KVM_S390_VIRTIO_NOTIFY 0
50#define KVM_S390_VIRTIO_RESET 1
51#define KVM_S390_VIRTIO_SET_STATUS 2
52
53#endif
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h
index 5de3efb31445..0bc51d52a899 100644
--- a/include/asm-s390/lowcore.h
+++ b/include/asm-s390/lowcore.h
@@ -381,27 +381,32 @@ struct _lowcore
381 /* whether the kernel died with panic() or not */ 381 /* whether the kernel died with panic() or not */
382 __u32 panic_magic; /* 0xe00 */ 382 __u32 panic_magic; /* 0xe00 */
383 383
384 __u8 pad13[0x1200-0xe04]; /* 0xe04 */ 384 __u8 pad13[0x11b8-0xe04]; /* 0xe04 */
385
386 /* 64 bit extparam used for pfault, diag 250 etc */
387 __u64 ext_params2; /* 0x11B8 */
388
389 __u8 pad14[0x1200-0x11C0]; /* 0x11C0 */
385 390
386 /* System info area */ 391 /* System info area */
387 392
388 __u64 floating_pt_save_area[16]; /* 0x1200 */ 393 __u64 floating_pt_save_area[16]; /* 0x1200 */
389 __u64 gpregs_save_area[16]; /* 0x1280 */ 394 __u64 gpregs_save_area[16]; /* 0x1280 */
390 __u32 st_status_fixed_logout[4]; /* 0x1300 */ 395 __u32 st_status_fixed_logout[4]; /* 0x1300 */
391 __u8 pad14[0x1318-0x1310]; /* 0x1310 */ 396 __u8 pad15[0x1318-0x1310]; /* 0x1310 */
392 __u32 prefixreg_save_area; /* 0x1318 */ 397 __u32 prefixreg_save_area; /* 0x1318 */
393 __u32 fpt_creg_save_area; /* 0x131c */ 398 __u32 fpt_creg_save_area; /* 0x131c */
394 __u8 pad15[0x1324-0x1320]; /* 0x1320 */ 399 __u8 pad16[0x1324-0x1320]; /* 0x1320 */
395 __u32 tod_progreg_save_area; /* 0x1324 */ 400 __u32 tod_progreg_save_area; /* 0x1324 */
396 __u32 cpu_timer_save_area[2]; /* 0x1328 */ 401 __u32 cpu_timer_save_area[2]; /* 0x1328 */
397 __u32 clock_comp_save_area[2]; /* 0x1330 */ 402 __u32 clock_comp_save_area[2]; /* 0x1330 */
398 __u8 pad16[0x1340-0x1338]; /* 0x1338 */ 403 __u8 pad17[0x1340-0x1338]; /* 0x1338 */
399 __u32 access_regs_save_area[16]; /* 0x1340 */ 404 __u32 access_regs_save_area[16]; /* 0x1340 */
400 __u64 cregs_save_area[16]; /* 0x1380 */ 405 __u64 cregs_save_area[16]; /* 0x1380 */
401 406
402 /* align to the top of the prefix area */ 407 /* align to the top of the prefix area */
403 408
404 __u8 pad17[0x2000-0x1400]; /* 0x1400 */ 409 __u8 pad18[0x2000-0x1400]; /* 0x1400 */
405#endif /* !__s390x__ */ 410#endif /* !__s390x__ */
406} __attribute__((packed)); /* End structure*/ 411} __attribute__((packed)); /* End structure*/
407 412
diff --git a/include/asm-s390/mmu.h b/include/asm-s390/mmu.h
index 1698e29c5b20..5dd5e7b3476f 100644
--- a/include/asm-s390/mmu.h
+++ b/include/asm-s390/mmu.h
@@ -7,6 +7,7 @@ typedef struct {
7 unsigned long asce_bits; 7 unsigned long asce_bits;
8 unsigned long asce_limit; 8 unsigned long asce_limit;
9 int noexec; 9 int noexec;
10 int pgstes;
10} mm_context_t; 11} mm_context_t;
11 12
12#endif 13#endif
diff --git a/include/asm-s390/mmu_context.h b/include/asm-s390/mmu_context.h
index b5a34c6f91a9..4c2fbf48c9c4 100644
--- a/include/asm-s390/mmu_context.h
+++ b/include/asm-s390/mmu_context.h
@@ -20,7 +20,13 @@ static inline int init_new_context(struct task_struct *tsk,
20#ifdef CONFIG_64BIT 20#ifdef CONFIG_64BIT
21 mm->context.asce_bits |= _ASCE_TYPE_REGION3; 21 mm->context.asce_bits |= _ASCE_TYPE_REGION3;
22#endif 22#endif
23 mm->context.noexec = s390_noexec; 23 if (current->mm->context.pgstes) {
24 mm->context.noexec = 0;
25 mm->context.pgstes = 1;
26 } else {
27 mm->context.noexec = s390_noexec;
28 mm->context.pgstes = 0;
29 }
24 mm->context.asce_limit = STACK_TOP_MAX; 30 mm->context.asce_limit = STACK_TOP_MAX;
25 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); 31 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
26 return 0; 32 return 0;
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 65154dc9a9e5..f8347ce9c5a1 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -30,6 +30,7 @@
30 */ 30 */
31#ifndef __ASSEMBLY__ 31#ifndef __ASSEMBLY__
32#include <linux/mm_types.h> 32#include <linux/mm_types.h>
33#include <asm/bitops.h>
33#include <asm/bug.h> 34#include <asm/bug.h>
34#include <asm/processor.h> 35#include <asm/processor.h>
35 36
@@ -219,6 +220,8 @@ extern char empty_zero_page[PAGE_SIZE];
219/* Software bits in the page table entry */ 220/* Software bits in the page table entry */
220#define _PAGE_SWT 0x001 /* SW pte type bit t */ 221#define _PAGE_SWT 0x001 /* SW pte type bit t */
221#define _PAGE_SWX 0x002 /* SW pte type bit x */ 222#define _PAGE_SWX 0x002 /* SW pte type bit x */
223#define _PAGE_SPECIAL 0x004 /* SW associated with special page */
224#define __HAVE_ARCH_PTE_SPECIAL
222 225
223/* Six different types of pages. */ 226/* Six different types of pages. */
224#define _PAGE_TYPE_EMPTY 0x400 227#define _PAGE_TYPE_EMPTY 0x400
@@ -258,6 +261,13 @@ extern char empty_zero_page[PAGE_SIZE];
258 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid. 261 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
259 */ 262 */
260 263
264/* Page status table bits for virtualization */
265#define RCP_PCL_BIT 55
266#define RCP_HR_BIT 54
267#define RCP_HC_BIT 53
268#define RCP_GR_BIT 50
269#define RCP_GC_BIT 49
270
261#ifndef __s390x__ 271#ifndef __s390x__
262 272
263/* Bits in the segment table address-space-control-element */ 273/* Bits in the segment table address-space-control-element */
@@ -510,9 +520,56 @@ static inline int pte_file(pte_t pte)
510 return (pte_val(pte) & mask) == _PAGE_TYPE_FILE; 520 return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
511} 521}
512 522
523static inline int pte_special(pte_t pte)
524{
525 return (pte_val(pte) & _PAGE_SPECIAL);
526}
527
513#define __HAVE_ARCH_PTE_SAME 528#define __HAVE_ARCH_PTE_SAME
514#define pte_same(a,b) (pte_val(a) == pte_val(b)) 529#define pte_same(a,b) (pte_val(a) == pte_val(b))
515 530
531static inline void rcp_lock(pte_t *ptep)
532{
533#ifdef CONFIG_PGSTE
534 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
535 preempt_disable();
536 while (test_and_set_bit(RCP_PCL_BIT, pgste))
537 ;
538#endif
539}
540
541static inline void rcp_unlock(pte_t *ptep)
542{
543#ifdef CONFIG_PGSTE
544 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
545 clear_bit(RCP_PCL_BIT, pgste);
546 preempt_enable();
547#endif
548}
549
550/* forward declaration for SetPageUptodate in page-flags.h*/
551static inline void page_clear_dirty(struct page *page);
552#include <linux/page-flags.h>
553
554static inline void ptep_rcp_copy(pte_t *ptep)
555{
556#ifdef CONFIG_PGSTE
557 struct page *page = virt_to_page(pte_val(*ptep));
558 unsigned int skey;
559 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
560
561 skey = page_get_storage_key(page_to_phys(page));
562 if (skey & _PAGE_CHANGED)
563 set_bit_simple(RCP_GC_BIT, pgste);
564 if (skey & _PAGE_REFERENCED)
565 set_bit_simple(RCP_GR_BIT, pgste);
566 if (test_and_clear_bit_simple(RCP_HC_BIT, pgste))
567 SetPageDirty(page);
568 if (test_and_clear_bit_simple(RCP_HR_BIT, pgste))
569 SetPageReferenced(page);
570#endif
571}
572
516/* 573/*
517 * query functions pte_write/pte_dirty/pte_young only work if 574 * query functions pte_write/pte_dirty/pte_young only work if
518 * pte_present() is true. Undefined behaviour if not.. 575 * pte_present() is true. Undefined behaviour if not..
@@ -599,6 +656,8 @@ static inline void pmd_clear(pmd_t *pmd)
599 656
600static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 657static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
601{ 658{
659 if (mm->context.pgstes)
660 ptep_rcp_copy(ptep);
602 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 661 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
603 if (mm->context.noexec) 662 if (mm->context.noexec)
604 pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY; 663 pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY;
@@ -663,10 +722,34 @@ static inline pte_t pte_mkyoung(pte_t pte)
663 return pte; 722 return pte;
664} 723}
665 724
725static inline pte_t pte_mkspecial(pte_t pte)
726{
727 pte_val(pte) |= _PAGE_SPECIAL;
728 return pte;
729}
730
666#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 731#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
667static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 732static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
668 unsigned long addr, pte_t *ptep) 733 unsigned long addr, pte_t *ptep)
669{ 734{
735#ifdef CONFIG_PGSTE
736 unsigned long physpage;
737 int young;
738 unsigned long *pgste;
739
740 if (!vma->vm_mm->context.pgstes)
741 return 0;
742 physpage = pte_val(*ptep) & PAGE_MASK;
743 pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
744
745 young = ((page_get_storage_key(physpage) & _PAGE_REFERENCED) != 0);
746 rcp_lock(ptep);
747 if (young)
748 set_bit_simple(RCP_GR_BIT, pgste);
749 young |= test_and_clear_bit_simple(RCP_HR_BIT, pgste);
750 rcp_unlock(ptep);
751 return young;
752#endif
670 return 0; 753 return 0;
671} 754}
672 755
@@ -674,7 +757,13 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
674static inline int ptep_clear_flush_young(struct vm_area_struct *vma, 757static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
675 unsigned long address, pte_t *ptep) 758 unsigned long address, pte_t *ptep)
676{ 759{
677 /* No need to flush TLB; bits are in storage key */ 760 /* No need to flush TLB
761 * On s390 reference bits are in storage key and never in TLB
762 * With virtualization we handle the reference bit, without we
763 * we can simply return */
764#ifdef CONFIG_PGSTE
765 return ptep_test_and_clear_young(vma, address, ptep);
766#endif
678 return 0; 767 return 0;
679} 768}
680 769
@@ -693,15 +782,25 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
693 : "=m" (*ptep) : "m" (*ptep), 782 : "=m" (*ptep) : "m" (*ptep),
694 "a" (pto), "a" (address)); 783 "a" (pto), "a" (address));
695 } 784 }
696 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
697} 785}
698 786
699static inline void ptep_invalidate(struct mm_struct *mm, 787static inline void ptep_invalidate(struct mm_struct *mm,
700 unsigned long address, pte_t *ptep) 788 unsigned long address, pte_t *ptep)
701{ 789{
790 if (mm->context.pgstes) {
791 rcp_lock(ptep);
792 __ptep_ipte(address, ptep);
793 ptep_rcp_copy(ptep);
794 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
795 rcp_unlock(ptep);
796 return;
797 }
702 __ptep_ipte(address, ptep); 798 __ptep_ipte(address, ptep);
703 if (mm->context.noexec) 799 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
800 if (mm->context.noexec) {
704 __ptep_ipte(address, ptep + PTRS_PER_PTE); 801 __ptep_ipte(address, ptep + PTRS_PER_PTE);
802 pte_val(*(ptep + PTRS_PER_PTE)) = _PAGE_TYPE_EMPTY;
803 }
705} 804}
706 805
707/* 806/*
@@ -966,6 +1065,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
966 1065
967extern int add_shared_memory(unsigned long start, unsigned long size); 1066extern int add_shared_memory(unsigned long start, unsigned long size);
968extern int remove_shared_memory(unsigned long start, unsigned long size); 1067extern int remove_shared_memory(unsigned long start, unsigned long size);
1068extern int s390_enable_sie(void);
969 1069
970/* 1070/*
971 * No page table caches to initialise 1071 * No page table caches to initialise
diff --git a/include/asm-s390/setup.h b/include/asm-s390/setup.h
index a76a6b8fd887..aaf4b518b940 100644
--- a/include/asm-s390/setup.h
+++ b/include/asm-s390/setup.h
@@ -62,6 +62,7 @@ extern unsigned long machine_flags;
62#define MACHINE_IS_VM (machine_flags & 1) 62#define MACHINE_IS_VM (machine_flags & 1)
63#define MACHINE_IS_P390 (machine_flags & 4) 63#define MACHINE_IS_P390 (machine_flags & 4)
64#define MACHINE_HAS_MVPG (machine_flags & 16) 64#define MACHINE_HAS_MVPG (machine_flags & 16)
65#define MACHINE_IS_KVM (machine_flags & 64)
65#define MACHINE_HAS_IDTE (machine_flags & 128) 66#define MACHINE_HAS_IDTE (machine_flags & 128)
66#define MACHINE_HAS_DIAG9C (machine_flags & 256) 67#define MACHINE_HAS_DIAG9C (machine_flags & 256)
67 68
diff --git a/include/asm-sh/hugetlb.h b/include/asm-sh/hugetlb.h
new file mode 100644
index 000000000000..02402303d89b
--- /dev/null
+++ b/include/asm-sh/hugetlb.h
@@ -0,0 +1,91 @@
1#ifndef _ASM_SH_HUGETLB_H
2#define _ASM_SH_HUGETLB_H
3
4#include <asm/page.h>
5
6
7static inline int is_hugepage_only_range(struct mm_struct *mm,
8 unsigned long addr,
9 unsigned long len) {
10 return 0;
11}
12
13/*
14 * If the arch doesn't supply something else, assume that hugepage
15 * size aligned regions are ok without further preparation.
16 */
17static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
18{
19 if (len & ~HPAGE_MASK)
20 return -EINVAL;
21 if (addr & ~HPAGE_MASK)
22 return -EINVAL;
23 return 0;
24}
25
26static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) {
27}
28
29static inline void hugetlb_free_pgd_range(struct mmu_gather **tlb,
30 unsigned long addr, unsigned long end,
31 unsigned long floor,
32 unsigned long ceiling)
33{
34 free_pgd_range(tlb, addr, end, floor, ceiling);
35}
36
37static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
38 pte_t *ptep, pte_t pte)
39{
40 set_pte_at(mm, addr, ptep, pte);
41}
42
43static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
44 unsigned long addr, pte_t *ptep)
45{
46 return ptep_get_and_clear(mm, addr, ptep);
47}
48
49static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
50 unsigned long addr, pte_t *ptep)
51{
52}
53
54static inline int huge_pte_none(pte_t pte)
55{
56 return pte_none(pte);
57}
58
59static inline pte_t huge_pte_wrprotect(pte_t pte)
60{
61 return pte_wrprotect(pte);
62}
63
64static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
65 unsigned long addr, pte_t *ptep)
66{
67 ptep_set_wrprotect(mm, addr, ptep);
68}
69
70static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
71 unsigned long addr, pte_t *ptep,
72 pte_t pte, int dirty)
73{
74 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
75}
76
77static inline pte_t huge_ptep_get(pte_t *ptep)
78{
79 return *ptep;
80}
81
82static inline int arch_prepare_hugepage(struct page *page)
83{
84 return 0;
85}
86
87static inline void arch_release_hugepage(struct page *page)
88{
89}
90
91#endif /* _ASM_SH_HUGETLB_H */
diff --git a/include/asm-sh/pgtable_32.h b/include/asm-sh/pgtable_32.h
index 3e3557c53c55..cbc731d35c25 100644
--- a/include/asm-sh/pgtable_32.h
+++ b/include/asm-sh/pgtable_32.h
@@ -326,6 +326,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
326#define pte_dirty(pte) ((pte).pte_low & _PAGE_DIRTY) 326#define pte_dirty(pte) ((pte).pte_low & _PAGE_DIRTY)
327#define pte_young(pte) ((pte).pte_low & _PAGE_ACCESSED) 327#define pte_young(pte) ((pte).pte_low & _PAGE_ACCESSED)
328#define pte_file(pte) ((pte).pte_low & _PAGE_FILE) 328#define pte_file(pte) ((pte).pte_low & _PAGE_FILE)
329#define pte_special(pte) (0)
329 330
330#ifdef CONFIG_X2TLB 331#ifdef CONFIG_X2TLB
331#define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE) 332#define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE)
@@ -356,6 +357,8 @@ PTE_BIT_FUNC(low, mkdirty, |= _PAGE_DIRTY);
356PTE_BIT_FUNC(low, mkold, &= ~_PAGE_ACCESSED); 357PTE_BIT_FUNC(low, mkold, &= ~_PAGE_ACCESSED);
357PTE_BIT_FUNC(low, mkyoung, |= _PAGE_ACCESSED); 358PTE_BIT_FUNC(low, mkyoung, |= _PAGE_ACCESSED);
358 359
360static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
361
359/* 362/*
360 * Macro and implementation to make a page protection as uncachable. 363 * Macro and implementation to make a page protection as uncachable.
361 */ 364 */
diff --git a/include/asm-sh/pgtable_64.h b/include/asm-sh/pgtable_64.h
index f9dd9d311441..c78990cda557 100644
--- a/include/asm-sh/pgtable_64.h
+++ b/include/asm-sh/pgtable_64.h
@@ -254,10 +254,11 @@ extern void __handle_bad_pmd_kernel(pmd_t * pmd);
254/* 254/*
255 * The following have defined behavior only work if pte_present() is true. 255 * The following have defined behavior only work if pte_present() is true.
256 */ 256 */
257static inline int pte_dirty(pte_t pte){ return pte_val(pte) & _PAGE_DIRTY; } 257static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
258static inline int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; } 258static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
259static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } 259static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
260static inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_WRITE; } 260static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
261static inline int pte_special(pte_t pte){ return 0; }
261 262
262static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; } 263static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; }
263static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; } 264static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
@@ -266,6 +267,7 @@ static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) |
266static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } 267static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
267static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } 268static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
268static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; } 269static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; }
270static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
269 271
270 272
271/* 273/*
diff --git a/include/asm-sparc/pgtable.h b/include/asm-sparc/pgtable.h
index 2cc235b74d94..d84af6d95f5c 100644
--- a/include/asm-sparc/pgtable.h
+++ b/include/asm-sparc/pgtable.h
@@ -219,6 +219,11 @@ static inline int pte_file(pte_t pte)
219 return pte_val(pte) & BTFIXUP_HALF(pte_filei); 219 return pte_val(pte) & BTFIXUP_HALF(pte_filei);
220} 220}
221 221
222static inline int pte_special(pte_t pte)
223{
224 return 0;
225}
226
222/* 227/*
223 */ 228 */
224BTFIXUPDEF_HALF(pte_wrprotecti) 229BTFIXUPDEF_HALF(pte_wrprotecti)
@@ -251,6 +256,8 @@ BTFIXUPDEF_CALL_CONST(pte_t, pte_mkyoung, pte_t)
251#define pte_mkdirty(pte) BTFIXUP_CALL(pte_mkdirty)(pte) 256#define pte_mkdirty(pte) BTFIXUP_CALL(pte_mkdirty)(pte)
252#define pte_mkyoung(pte) BTFIXUP_CALL(pte_mkyoung)(pte) 257#define pte_mkyoung(pte) BTFIXUP_CALL(pte_mkyoung)(pte)
253 258
259#define pte_mkspecial(pte) (pte)
260
254#define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot) 261#define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot)
255 262
256BTFIXUPDEF_CALL(unsigned long, pte_pfn, pte_t) 263BTFIXUPDEF_CALL(unsigned long, pte_pfn, pte_t)
diff --git a/include/asm-sparc64/hugetlb.h b/include/asm-sparc64/hugetlb.h
new file mode 100644
index 000000000000..412af58926a0
--- /dev/null
+++ b/include/asm-sparc64/hugetlb.h
@@ -0,0 +1,84 @@
1#ifndef _ASM_SPARC64_HUGETLB_H
2#define _ASM_SPARC64_HUGETLB_H
3
4#include <asm/page.h>
5
6
7void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
8 pte_t *ptep, pte_t pte);
9
10pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
11 pte_t *ptep);
12
13void hugetlb_prefault_arch_hook(struct mm_struct *mm);
14
15static inline int is_hugepage_only_range(struct mm_struct *mm,
16 unsigned long addr,
17 unsigned long len) {
18 return 0;
19}
20
21/*
22 * If the arch doesn't supply something else, assume that hugepage
23 * size aligned regions are ok without further preparation.
24 */
25static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
26{
27 if (len & ~HPAGE_MASK)
28 return -EINVAL;
29 if (addr & ~HPAGE_MASK)
30 return -EINVAL;
31 return 0;
32}
33
34static inline void hugetlb_free_pgd_range(struct mmu_gather **tlb,
35 unsigned long addr, unsigned long end,
36 unsigned long floor,
37 unsigned long ceiling)
38{
39 free_pgd_range(tlb, addr, end, floor, ceiling);
40}
41
42static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
43 unsigned long addr, pte_t *ptep)
44{
45}
46
47static inline int huge_pte_none(pte_t pte)
48{
49 return pte_none(pte);
50}
51
52static inline pte_t huge_pte_wrprotect(pte_t pte)
53{
54 return pte_wrprotect(pte);
55}
56
57static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
58 unsigned long addr, pte_t *ptep)
59{
60 ptep_set_wrprotect(mm, addr, ptep);
61}
62
63static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
64 unsigned long addr, pte_t *ptep,
65 pte_t pte, int dirty)
66{
67 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
68}
69
70static inline pte_t huge_ptep_get(pte_t *ptep)
71{
72 return *ptep;
73}
74
75static inline int arch_prepare_hugepage(struct page *page)
76{
77 return 0;
78}
79
80static inline void arch_release_hugepage(struct page *page)
81{
82}
83
84#endif /* _ASM_SPARC64_HUGETLB_H */
diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h
index e93a482aa24a..618117def0dc 100644
--- a/include/asm-sparc64/page.h
+++ b/include/asm-sparc64/page.h
@@ -39,8 +39,6 @@
39#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) 39#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
40#define HPAGE_MASK (~(HPAGE_SIZE - 1UL)) 40#define HPAGE_MASK (~(HPAGE_SIZE - 1UL))
41#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 41#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
42#define ARCH_HAS_SETCLEAR_HUGE_PTE
43#define ARCH_HAS_HUGETLB_PREFAULT_HOOK
44#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 42#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
45#endif 43#endif
46 44
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index 549e45266b68..0e200e7acec7 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -506,6 +506,11 @@ static inline pte_t pte_mkyoung(pte_t pte)
506 return __pte(pte_val(pte) | mask); 506 return __pte(pte_val(pte) | mask);
507} 507}
508 508
509static inline pte_t pte_mkspecial(pte_t pte)
510{
511 return pte;
512}
513
509static inline unsigned long pte_young(pte_t pte) 514static inline unsigned long pte_young(pte_t pte)
510{ 515{
511 unsigned long mask; 516 unsigned long mask;
@@ -608,6 +613,11 @@ static inline unsigned long pte_present(pte_t pte)
608 return val; 613 return val;
609} 614}
610 615
616static inline int pte_special(pte_t pte)
617{
618 return 0;
619}
620
611#define pmd_set(pmdp, ptep) \ 621#define pmd_set(pmdp, ptep) \
612 (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL)) 622 (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL))
613#define pud_set(pudp, pmdp) \ 623#define pud_set(pudp, pmdp) \
diff --git a/include/asm-um/pgtable.h b/include/asm-um/pgtable.h
index 4102b443e925..02db81b7b86e 100644
--- a/include/asm-um/pgtable.h
+++ b/include/asm-um/pgtable.h
@@ -173,6 +173,11 @@ static inline int pte_newprot(pte_t pte)
173 return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT))); 173 return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
174} 174}
175 175
176static inline int pte_special(pte_t pte)
177{
178 return 0;
179}
180
176/* 181/*
177 * ================================= 182 * =================================
178 * Flags setting section. 183 * Flags setting section.
@@ -241,6 +246,11 @@ static inline pte_t pte_mknewpage(pte_t pte)
241 return(pte); 246 return(pte);
242} 247}
243 248
249static inline pte_t pte_mkspecial(pte_t pte)
250{
251 return(pte);
252}
253
244static inline void set_pte(pte_t *pteptr, pte_t pteval) 254static inline void set_pte(pte_t *pteptr, pte_t pteval)
245{ 255{
246 pte_copy(*pteptr, pteval); 256 pte_copy(*pteptr, pteval);
diff --git a/include/asm-x86/geode.h b/include/asm-x86/geode.h
index 9870cc1f2f8f..7154dc4de951 100644
--- a/include/asm-x86/geode.h
+++ b/include/asm-x86/geode.h
@@ -30,7 +30,13 @@ extern int geode_get_dev_base(unsigned int dev);
30 30
31/* MSRS */ 31/* MSRS */
32 32
33#define GX_GLCP_SYS_RSTPLL 0x4C000014 33#define MSR_GLIU_P2D_RO0 0x10000029
34
35#define MSR_LX_GLD_MSR_CONFIG 0x48002001
36#define MSR_LX_MSR_PADSEL 0x48002011 /* NOT 0x48000011; the data
37 * sheet has the wrong value */
38#define MSR_GLCP_SYS_RSTPLL 0x4C000014
39#define MSR_GLCP_DOTPLL 0x4C000015
34 40
35#define MSR_LBAR_SMB 0x5140000B 41#define MSR_LBAR_SMB 0x5140000B
36#define MSR_LBAR_GPIO 0x5140000C 42#define MSR_LBAR_GPIO 0x5140000C
@@ -45,8 +51,14 @@ extern int geode_get_dev_base(unsigned int dev);
45#define MSR_PIC_ZSEL_LOW 0x51400022 51#define MSR_PIC_ZSEL_LOW 0x51400022
46#define MSR_PIC_ZSEL_HIGH 0x51400023 52#define MSR_PIC_ZSEL_HIGH 0x51400023
47 53
48#define MFGPT_IRQ_MSR 0x51400028 54#define MSR_MFGPT_IRQ 0x51400028
49#define MFGPT_NR_MSR 0x51400029 55#define MSR_MFGPT_NR 0x51400029
56#define MSR_MFGPT_SETUP 0x5140002B
57
58#define MSR_LX_SPARE_MSR 0x80000011 /* DC-specific */
59
60#define MSR_GX_GLD_MSR_CONFIG 0xC0002001
61#define MSR_GX_MSR_PADSEL 0xC0002011
50 62
51/* Resource Sizes */ 63/* Resource Sizes */
52 64
@@ -93,6 +105,15 @@ extern int geode_get_dev_base(unsigned int dev);
93#define PM_AWKD 0x50 105#define PM_AWKD 0x50
94#define PM_SSC 0x54 106#define PM_SSC 0x54
95 107
108/* VSA2 magic values */
109
110#define VSA_VRC_INDEX 0xAC1C
111#define VSA_VRC_DATA 0xAC1E
112#define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */
113#define VSA_VR_SIGNATURE 0x0003
114#define VSA_VR_MEM_SIZE 0x0200
115#define VSA_SIG 0x4132 /* signature is ascii 'VSA2' */
116
96/* GPIO */ 117/* GPIO */
97 118
98#define GPIO_OUTPUT_VAL 0x00 119#define GPIO_OUTPUT_VAL 0x00
@@ -164,6 +185,17 @@ static inline int is_geode(void)
164 return (is_geode_gx() || is_geode_lx()); 185 return (is_geode_gx() || is_geode_lx());
165} 186}
166 187
188/*
189 * The VSA has virtual registers that we can query for a signature.
190 */
191static inline int geode_has_vsa2(void)
192{
193 outw(VSA_VR_UNLOCK, VSA_VRC_INDEX);
194 outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX);
195
196 return (inw(VSA_VRC_DATA) == VSA_SIG);
197}
198
167/* MFGPTs */ 199/* MFGPTs */
168 200
169#define MFGPT_MAX_TIMERS 8 201#define MFGPT_MAX_TIMERS 8
diff --git a/include/asm-x86/hugetlb.h b/include/asm-x86/hugetlb.h
new file mode 100644
index 000000000000..14171a4924f6
--- /dev/null
+++ b/include/asm-x86/hugetlb.h
@@ -0,0 +1,91 @@
1#ifndef _ASM_X86_HUGETLB_H
2#define _ASM_X86_HUGETLB_H
3
4#include <asm/page.h>
5
6
7static inline int is_hugepage_only_range(struct mm_struct *mm,
8 unsigned long addr,
9 unsigned long len) {
10 return 0;
11}
12
13/*
14 * If the arch doesn't supply something else, assume that hugepage
15 * size aligned regions are ok without further preparation.
16 */
17static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
18{
19 if (len & ~HPAGE_MASK)
20 return -EINVAL;
21 if (addr & ~HPAGE_MASK)
22 return -EINVAL;
23 return 0;
24}
25
26static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) {
27}
28
29static inline void hugetlb_free_pgd_range(struct mmu_gather **tlb,
30 unsigned long addr, unsigned long end,
31 unsigned long floor,
32 unsigned long ceiling)
33{
34 free_pgd_range(tlb, addr, end, floor, ceiling);
35}
36
37static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
38 pte_t *ptep, pte_t pte)
39{
40 set_pte_at(mm, addr, ptep, pte);
41}
42
43static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
44 unsigned long addr, pte_t *ptep)
45{
46 return ptep_get_and_clear(mm, addr, ptep);
47}
48
49static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
50 unsigned long addr, pte_t *ptep)
51{
52}
53
54static inline int huge_pte_none(pte_t pte)
55{
56 return pte_none(pte);
57}
58
59static inline pte_t huge_pte_wrprotect(pte_t pte)
60{
61 return pte_wrprotect(pte);
62}
63
64static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
65 unsigned long addr, pte_t *ptep)
66{
67 ptep_set_wrprotect(mm, addr, ptep);
68}
69
70static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
71 unsigned long addr, pte_t *ptep,
72 pte_t pte, int dirty)
73{
74 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
75}
76
77static inline pte_t huge_ptep_get(pte_t *ptep)
78{
79 return *ptep;
80}
81
82static inline int arch_prepare_hugepage(struct page *page)
83{
84 return 0;
85}
86
87static inline void arch_release_hugepage(struct page *page)
88{
89}
90
91#endif /* _ASM_X86_HUGETLB_H */
diff --git a/include/asm-x86/kvm.h b/include/asm-x86/kvm.h
index 7a71120426a3..80eefef2cc76 100644
--- a/include/asm-x86/kvm.h
+++ b/include/asm-x86/kvm.h
@@ -188,4 +188,45 @@ struct kvm_cpuid2 {
188 struct kvm_cpuid_entry2 entries[0]; 188 struct kvm_cpuid_entry2 entries[0];
189}; 189};
190 190
191/* for KVM_GET_PIT and KVM_SET_PIT */
192struct kvm_pit_channel_state {
193 __u32 count; /* can be 65536 */
194 __u16 latched_count;
195 __u8 count_latched;
196 __u8 status_latched;
197 __u8 status;
198 __u8 read_state;
199 __u8 write_state;
200 __u8 write_latch;
201 __u8 rw_mode;
202 __u8 mode;
203 __u8 bcd;
204 __u8 gate;
205 __s64 count_load_time;
206};
207
208struct kvm_pit_state {
209 struct kvm_pit_channel_state channels[3];
210};
211
212#define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02)
213#define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03)
214#define KVM_TRC_PEND_INTR (KVM_TRC_HANDLER + 0x04)
215#define KVM_TRC_IO_READ (KVM_TRC_HANDLER + 0x05)
216#define KVM_TRC_IO_WRITE (KVM_TRC_HANDLER + 0x06)
217#define KVM_TRC_CR_READ (KVM_TRC_HANDLER + 0x07)
218#define KVM_TRC_CR_WRITE (KVM_TRC_HANDLER + 0x08)
219#define KVM_TRC_DR_READ (KVM_TRC_HANDLER + 0x09)
220#define KVM_TRC_DR_WRITE (KVM_TRC_HANDLER + 0x0A)
221#define KVM_TRC_MSR_READ (KVM_TRC_HANDLER + 0x0B)
222#define KVM_TRC_MSR_WRITE (KVM_TRC_HANDLER + 0x0C)
223#define KVM_TRC_CPUID (KVM_TRC_HANDLER + 0x0D)
224#define KVM_TRC_INTR (KVM_TRC_HANDLER + 0x0E)
225#define KVM_TRC_NMI (KVM_TRC_HANDLER + 0x0F)
226#define KVM_TRC_VMMCALL (KVM_TRC_HANDLER + 0x10)
227#define KVM_TRC_HLT (KVM_TRC_HANDLER + 0x11)
228#define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12)
229#define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13)
230#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14)
231
191#endif 232#endif
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 68ee390b2844..9d963cd6533c 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -20,6 +20,13 @@
20 20
21#include <asm/desc.h> 21#include <asm/desc.h>
22 22
23#define KVM_MAX_VCPUS 16
24#define KVM_MEMORY_SLOTS 32
25/* memory slots that does not exposed to userspace */
26#define KVM_PRIVATE_MEM_SLOTS 4
27
28#define KVM_PIO_PAGE_OFFSET 1
29
23#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) 30#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
24#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) 31#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
25#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \ 32#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
@@ -39,6 +46,13 @@
39#define INVALID_PAGE (~(hpa_t)0) 46#define INVALID_PAGE (~(hpa_t)0)
40#define UNMAPPED_GVA (~(gpa_t)0) 47#define UNMAPPED_GVA (~(gpa_t)0)
41 48
49/* shadow tables are PAE even on non-PAE hosts */
50#define KVM_HPAGE_SHIFT 21
51#define KVM_HPAGE_SIZE (1UL << KVM_HPAGE_SHIFT)
52#define KVM_HPAGE_MASK (~(KVM_HPAGE_SIZE - 1))
53
54#define KVM_PAGES_PER_HPAGE (KVM_HPAGE_SIZE / PAGE_SIZE)
55
42#define DE_VECTOR 0 56#define DE_VECTOR 0
43#define UD_VECTOR 6 57#define UD_VECTOR 6
44#define NM_VECTOR 7 58#define NM_VECTOR 7
@@ -48,6 +62,7 @@
48#define SS_VECTOR 12 62#define SS_VECTOR 12
49#define GP_VECTOR 13 63#define GP_VECTOR 13
50#define PF_VECTOR 14 64#define PF_VECTOR 14
65#define MC_VECTOR 18
51 66
52#define SELECTOR_TI_MASK (1 << 2) 67#define SELECTOR_TI_MASK (1 << 2)
53#define SELECTOR_RPL_MASK 0x03 68#define SELECTOR_RPL_MASK 0x03
@@ -58,7 +73,8 @@
58 73
59#define KVM_PERMILLE_MMU_PAGES 20 74#define KVM_PERMILLE_MMU_PAGES 20
60#define KVM_MIN_ALLOC_MMU_PAGES 64 75#define KVM_MIN_ALLOC_MMU_PAGES 64
61#define KVM_NUM_MMU_PAGES 1024 76#define KVM_MMU_HASH_SHIFT 10
77#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
62#define KVM_MIN_FREE_MMU_PAGES 5 78#define KVM_MIN_FREE_MMU_PAGES 5
63#define KVM_REFILL_PAGES 25 79#define KVM_REFILL_PAGES 25
64#define KVM_MAX_CPUID_ENTRIES 40 80#define KVM_MAX_CPUID_ENTRIES 40
@@ -106,6 +122,12 @@ enum {
106 122
107#define KVM_NR_MEM_OBJS 40 123#define KVM_NR_MEM_OBJS 40
108 124
125struct kvm_guest_debug {
126 int enabled;
127 unsigned long bp[4];
128 int singlestep;
129};
130
109/* 131/*
110 * We don't want allocation failures within the mmu code, so we preallocate 132 * We don't want allocation failures within the mmu code, so we preallocate
111 * enough memory for a single page fault in a cache. 133 * enough memory for a single page fault in a cache.
@@ -140,6 +162,7 @@ union kvm_mmu_page_role {
140 unsigned pad_for_nice_hex_output:6; 162 unsigned pad_for_nice_hex_output:6;
141 unsigned metaphysical:1; 163 unsigned metaphysical:1;
142 unsigned access:3; 164 unsigned access:3;
165 unsigned invalid:1;
143 }; 166 };
144}; 167};
145 168
@@ -204,11 +227,6 @@ struct kvm_vcpu_arch {
204 u64 shadow_efer; 227 u64 shadow_efer;
205 u64 apic_base; 228 u64 apic_base;
206 struct kvm_lapic *apic; /* kernel irqchip context */ 229 struct kvm_lapic *apic; /* kernel irqchip context */
207#define VCPU_MP_STATE_RUNNABLE 0
208#define VCPU_MP_STATE_UNINITIALIZED 1
209#define VCPU_MP_STATE_INIT_RECEIVED 2
210#define VCPU_MP_STATE_SIPI_RECEIVED 3
211#define VCPU_MP_STATE_HALTED 4
212 int mp_state; 230 int mp_state;
213 int sipi_vector; 231 int sipi_vector;
214 u64 ia32_misc_enable_msr; 232 u64 ia32_misc_enable_msr;
@@ -226,8 +244,9 @@ struct kvm_vcpu_arch {
226 u64 *last_pte_updated; 244 u64 *last_pte_updated;
227 245
228 struct { 246 struct {
229 gfn_t gfn; /* presumed gfn during guest pte update */ 247 gfn_t gfn; /* presumed gfn during guest pte update */
230 struct page *page; /* page corresponding to that gfn */ 248 pfn_t pfn; /* pfn corresponding to that gfn */
249 int largepage;
231 } update_pte; 250 } update_pte;
232 251
233 struct i387_fxsave_struct host_fx_image; 252 struct i387_fxsave_struct host_fx_image;
@@ -261,6 +280,11 @@ struct kvm_vcpu_arch {
261 /* emulate context */ 280 /* emulate context */
262 281
263 struct x86_emulate_ctxt emulate_ctxt; 282 struct x86_emulate_ctxt emulate_ctxt;
283
284 gpa_t time;
285 struct kvm_vcpu_time_info hv_clock;
286 unsigned int time_offset;
287 struct page *time_page;
264}; 288};
265 289
266struct kvm_mem_alias { 290struct kvm_mem_alias {
@@ -283,10 +307,13 @@ struct kvm_arch{
283 struct list_head active_mmu_pages; 307 struct list_head active_mmu_pages;
284 struct kvm_pic *vpic; 308 struct kvm_pic *vpic;
285 struct kvm_ioapic *vioapic; 309 struct kvm_ioapic *vioapic;
310 struct kvm_pit *vpit;
286 311
287 int round_robin_prev_vcpu; 312 int round_robin_prev_vcpu;
288 unsigned int tss_addr; 313 unsigned int tss_addr;
289 struct page *apic_access_page; 314 struct page *apic_access_page;
315
316 gpa_t wall_clock;
290}; 317};
291 318
292struct kvm_vm_stat { 319struct kvm_vm_stat {
@@ -298,6 +325,7 @@ struct kvm_vm_stat {
298 u32 mmu_recycled; 325 u32 mmu_recycled;
299 u32 mmu_cache_miss; 326 u32 mmu_cache_miss;
300 u32 remote_tlb_flush; 327 u32 remote_tlb_flush;
328 u32 lpages;
301}; 329};
302 330
303struct kvm_vcpu_stat { 331struct kvm_vcpu_stat {
@@ -320,6 +348,7 @@ struct kvm_vcpu_stat {
320 u32 fpu_reload; 348 u32 fpu_reload;
321 u32 insn_emulation; 349 u32 insn_emulation;
322 u32 insn_emulation_fail; 350 u32 insn_emulation_fail;
351 u32 hypercalls;
323}; 352};
324 353
325struct descriptor_table { 354struct descriptor_table {
@@ -355,6 +384,7 @@ struct kvm_x86_ops {
355 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); 384 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
356 void (*get_segment)(struct kvm_vcpu *vcpu, 385 void (*get_segment)(struct kvm_vcpu *vcpu,
357 struct kvm_segment *var, int seg); 386 struct kvm_segment *var, int seg);
387 int (*get_cpl)(struct kvm_vcpu *vcpu);
358 void (*set_segment)(struct kvm_vcpu *vcpu, 388 void (*set_segment)(struct kvm_vcpu *vcpu,
359 struct kvm_segment *var, int seg); 389 struct kvm_segment *var, int seg);
360 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); 390 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
@@ -410,6 +440,15 @@ void kvm_mmu_zap_all(struct kvm *kvm);
410unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); 440unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
411void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); 441void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
412 442
443int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
444
445int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
446 const void *val, int bytes);
447int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
448 gpa_t addr, unsigned long *ret);
449
450extern bool tdp_enabled;
451
413enum emulation_result { 452enum emulation_result {
414 EMULATE_DONE, /* no further processing */ 453 EMULATE_DONE, /* no further processing */
415 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */ 454 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */
@@ -429,6 +468,7 @@ void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
429unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr); 468unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr);
430void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value, 469void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
431 unsigned long *rflags); 470 unsigned long *rflags);
471void kvm_enable_efer_bits(u64);
432int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data); 472int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
433int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); 473int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
434 474
@@ -448,12 +488,14 @@ int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
448int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, 488int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
449 unsigned long value); 489 unsigned long value);
450 490
451void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 491int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason);
452void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0); 492
453void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0); 493void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
454void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0); 494void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
455unsigned long get_cr8(struct kvm_vcpu *vcpu); 495void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
456void lmsw(struct kvm_vcpu *vcpu, unsigned long msw); 496void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
497unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
498void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
457void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); 499void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
458 500
459int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); 501int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
@@ -491,6 +533,8 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
491 533
492int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code); 534int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
493 535
536void kvm_enable_tdp(void);
537
494int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); 538int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
495int complete_pio(struct kvm_vcpu *vcpu); 539int complete_pio(struct kvm_vcpu *vcpu);
496 540
@@ -600,6 +644,7 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
600#define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4" 644#define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
601#define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4" 645#define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
602#define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30" 646#define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
647#define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08"
603 648
604#define MSR_IA32_TIME_STAMP_COUNTER 0x010 649#define MSR_IA32_TIME_STAMP_COUNTER 0x010
605 650
@@ -610,4 +655,30 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
610#define RMODE_TSS_SIZE \ 655#define RMODE_TSS_SIZE \
611 (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) 656 (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
612 657
658enum {
659 TASK_SWITCH_CALL = 0,
660 TASK_SWITCH_IRET = 1,
661 TASK_SWITCH_JMP = 2,
662 TASK_SWITCH_GATE = 3,
663};
664
665#define KVMTRACE_5D(evt, vcpu, d1, d2, d3, d4, d5, name) \
666 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
667 vcpu, 5, d1, d2, d3, d4, d5)
668#define KVMTRACE_4D(evt, vcpu, d1, d2, d3, d4, name) \
669 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
670 vcpu, 4, d1, d2, d3, d4, 0)
671#define KVMTRACE_3D(evt, vcpu, d1, d2, d3, name) \
672 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
673 vcpu, 3, d1, d2, d3, 0, 0)
674#define KVMTRACE_2D(evt, vcpu, d1, d2, name) \
675 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
676 vcpu, 2, d1, d2, 0, 0, 0)
677#define KVMTRACE_1D(evt, vcpu, d1, name) \
678 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
679 vcpu, 1, d1, 0, 0, 0, 0)
680#define KVMTRACE_0D(evt, vcpu, name) \
681 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
682 vcpu, 0, 0, 0, 0, 0, 0)
683
613#endif 684#endif
diff --git a/include/asm-x86/kvm_para.h b/include/asm-x86/kvm_para.h
index c6f3fd8d8c53..509845942070 100644
--- a/include/asm-x86/kvm_para.h
+++ b/include/asm-x86/kvm_para.h
@@ -10,10 +10,65 @@
10 * paravirtualization, the appropriate feature bit should be checked. 10 * paravirtualization, the appropriate feature bit should be checked.
11 */ 11 */
12#define KVM_CPUID_FEATURES 0x40000001 12#define KVM_CPUID_FEATURES 0x40000001
13#define KVM_FEATURE_CLOCKSOURCE 0
14#define KVM_FEATURE_NOP_IO_DELAY 1
15#define KVM_FEATURE_MMU_OP 2
16
17#define MSR_KVM_WALL_CLOCK 0x11
18#define MSR_KVM_SYSTEM_TIME 0x12
19
20#define KVM_MAX_MMU_OP_BATCH 32
21
22/* Operations for KVM_HC_MMU_OP */
23#define KVM_MMU_OP_WRITE_PTE 1
24#define KVM_MMU_OP_FLUSH_TLB 2
25#define KVM_MMU_OP_RELEASE_PT 3
26
27/* Payload for KVM_HC_MMU_OP */
28struct kvm_mmu_op_header {
29 __u32 op;
30 __u32 pad;
31};
32
33struct kvm_mmu_op_write_pte {
34 struct kvm_mmu_op_header header;
35 __u64 pte_phys;
36 __u64 pte_val;
37};
38
39struct kvm_mmu_op_flush_tlb {
40 struct kvm_mmu_op_header header;
41};
42
43struct kvm_mmu_op_release_pt {
44 struct kvm_mmu_op_header header;
45 __u64 pt_phys;
46};
13 47
14#ifdef __KERNEL__ 48#ifdef __KERNEL__
15#include <asm/processor.h> 49#include <asm/processor.h>
16 50
51/* xen binary-compatible interface. See xen headers for details */
52struct kvm_vcpu_time_info {
53 uint32_t version;
54 uint32_t pad0;
55 uint64_t tsc_timestamp;
56 uint64_t system_time;
57 uint32_t tsc_to_system_mul;
58 int8_t tsc_shift;
59 int8_t pad[3];
60} __attribute__((__packed__)); /* 32 bytes */
61
62struct kvm_wall_clock {
63 uint32_t wc_version;
64 uint32_t wc_sec;
65 uint32_t wc_nsec;
66} __attribute__((__packed__));
67
68
69extern void kvmclock_init(void);
70
71
17/* This instruction is vmcall. On non-VT architectures, it will generate a 72/* This instruction is vmcall. On non-VT architectures, it will generate a
18 * trap that we will then rewrite to the appropriate instruction. 73 * trap that we will then rewrite to the appropriate instruction.
19 */ 74 */
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
index a496d6335d3b..801b31f71452 100644
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -195,6 +195,11 @@ static inline int pte_exec(pte_t pte)
195 return !(pte_val(pte) & _PAGE_NX); 195 return !(pte_val(pte) & _PAGE_NX);
196} 196}
197 197
198static inline int pte_special(pte_t pte)
199{
200 return 0;
201}
202
198static inline int pmd_large(pmd_t pte) 203static inline int pmd_large(pmd_t pte)
199{ 204{
200 return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == 205 return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
@@ -256,6 +261,11 @@ static inline pte_t pte_clrglobal(pte_t pte)
256 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL); 261 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL);
257} 262}
258 263
264static inline pte_t pte_mkspecial(pte_t pte)
265{
266 return pte;
267}
268
259extern pteval_t __supported_pte_mask; 269extern pteval_t __supported_pte_mask;
260 270
261static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) 271static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
index 117343b0c271..2e7974ec77ec 100644
--- a/include/asm-x86/processor.h
+++ b/include/asm-x86/processor.h
@@ -722,6 +722,7 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
722 722
723static inline void __sti_mwait(unsigned long eax, unsigned long ecx) 723static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
724{ 724{
725 trace_hardirqs_on();
725 /* "mwait %eax, %ecx;" */ 726 /* "mwait %eax, %ecx;" */
726 asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" 727 asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
727 :: "a" (eax), "c" (ecx)); 728 :: "a" (eax), "c" (ecx));
diff --git a/include/asm-x86/reboot.h b/include/asm-x86/reboot.h
index 6b5233b4f84b..e63741f19392 100644
--- a/include/asm-x86/reboot.h
+++ b/include/asm-x86/reboot.h
@@ -15,5 +15,7 @@ struct machine_ops {
15extern struct machine_ops machine_ops; 15extern struct machine_ops machine_ops;
16 16
17void machine_real_restart(unsigned char *code, int length); 17void machine_real_restart(unsigned char *code, int length);
18void native_machine_crash_shutdown(struct pt_regs *regs);
19void native_machine_shutdown(void);
18 20
19#endif /* _ASM_REBOOT_H */ 21#endif /* _ASM_REBOOT_H */
diff --git a/include/asm-xtensa/pgtable.h b/include/asm-xtensa/pgtable.h
index c8b024a48b4d..8014d96b21f1 100644
--- a/include/asm-xtensa/pgtable.h
+++ b/include/asm-xtensa/pgtable.h
@@ -210,6 +210,8 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITABLE; }
210static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 210static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
211static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 211static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
212static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } 212static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
213static inline int pte_special(pte_t pte) { return 0; }
214
213static inline pte_t pte_wrprotect(pte_t pte) 215static inline pte_t pte_wrprotect(pte_t pte)
214 { pte_val(pte) &= ~(_PAGE_WRITABLE | _PAGE_HW_WRITE); return pte; } 216 { pte_val(pte) &= ~(_PAGE_WRITABLE | _PAGE_HW_WRITE); return pte; }
215static inline pte_t pte_mkclean(pte_t pte) 217static inline pte_t pte_mkclean(pte_t pte)
@@ -222,6 +224,8 @@ static inline pte_t pte_mkyoung(pte_t pte)
222 { pte_val(pte) |= _PAGE_ACCESSED; return pte; } 224 { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
223static inline pte_t pte_mkwrite(pte_t pte) 225static inline pte_t pte_mkwrite(pte_t pte)
224 { pte_val(pte) |= _PAGE_WRITABLE; return pte; } 226 { pte_val(pte) |= _PAGE_WRITABLE; return pte; }
227static inline pte_t pte_mkspecial(pte_t pte)
228 { return pte; }
225 229
226/* 230/*
227 * Conversion functions: convert a page and protection to a page entry, 231 * Conversion functions: convert a page and protection to a page entry,
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 1dbe074f1c64..43b406def35f 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -46,6 +46,8 @@
46 * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n 46 * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
47 * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) 47 * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
48 * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit) 48 * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
49 * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap
50 * bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz
49 * bitmap_scnprintf(buf, len, src, nbits) Print bitmap src to buf 51 * bitmap_scnprintf(buf, len, src, nbits) Print bitmap src to buf
50 * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf 52 * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf
51 * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf 53 * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf
@@ -121,6 +123,10 @@ extern void bitmap_remap(unsigned long *dst, const unsigned long *src,
121 const unsigned long *old, const unsigned long *new, int bits); 123 const unsigned long *old, const unsigned long *new, int bits);
122extern int bitmap_bitremap(int oldbit, 124extern int bitmap_bitremap(int oldbit,
123 const unsigned long *old, const unsigned long *new, int bits); 125 const unsigned long *old, const unsigned long *new, int bits);
126extern void bitmap_onto(unsigned long *dst, const unsigned long *orig,
127 const unsigned long *relmap, int bits);
128extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
129 int sz, int bits);
124extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order); 130extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order);
125extern void bitmap_release_region(unsigned long *bitmap, int pos, int order); 131extern void bitmap_release_region(unsigned long *bitmap, int pos, int order);
126extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order); 132extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order);
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 4e4e340592fb..6a5dbdc8a7dc 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -101,6 +101,8 @@ extern void reserve_bootmem_node(pg_data_t *pgdat,
101extern void free_bootmem_node(pg_data_t *pgdat, 101extern void free_bootmem_node(pg_data_t *pgdat,
102 unsigned long addr, 102 unsigned long addr,
103 unsigned long size); 103 unsigned long size);
104extern void *alloc_bootmem_section(unsigned long size,
105 unsigned long section_nr);
104 106
105#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE 107#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
106#define alloc_bootmem_node(pgdat, x) \ 108#define alloc_bootmem_node(pgdat, x) \
diff --git a/include/linux/bsg.h b/include/linux/bsg.h
index e8406c55c6d3..cf0303a60611 100644
--- a/include/linux/bsg.h
+++ b/include/linux/bsg.h
@@ -56,19 +56,25 @@ struct sg_io_v4 {
56#if defined(CONFIG_BLK_DEV_BSG) 56#if defined(CONFIG_BLK_DEV_BSG)
57struct bsg_class_device { 57struct bsg_class_device {
58 struct device *class_dev; 58 struct device *class_dev;
59 struct device *dev; 59 struct device *parent;
60 int minor; 60 int minor;
61 struct request_queue *queue; 61 struct request_queue *queue;
62 struct kref ref;
63 void (*release)(struct device *);
62}; 64};
63 65
64extern int bsg_register_queue(struct request_queue *, struct device *, const char *); 66extern int bsg_register_queue(struct request_queue *q,
67 struct device *parent, const char *name,
68 void (*release)(struct device *));
65extern void bsg_unregister_queue(struct request_queue *); 69extern void bsg_unregister_queue(struct request_queue *);
66#else 70#else
67static inline int bsg_register_queue(struct request_queue * rq, struct device *dev, const char *name) 71static inline int bsg_register_queue(struct request_queue *q,
72 struct device *parent, const char *name,
73 void (*release)(struct device *))
68{ 74{
69 return 0; 75 return 0;
70} 76}
71static inline void bsg_unregister_queue(struct request_queue *rq) 77static inline void bsg_unregister_queue(struct request_queue *q)
72{ 78{
73} 79}
74#endif 80#endif
diff --git a/include/linux/cache.h b/include/linux/cache.h
index 4552504c0228..97e24881c4c6 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -60,4 +60,8 @@
60#endif 60#endif
61#endif 61#endif
62 62
63#ifndef CONFIG_ARCH_HAS_CACHE_LINE_SIZE
64#define cache_line_size() L1_CACHE_BYTES
65#endif
66
63#endif /* __LINUX_CACHE_H */ 67#endif /* __LINUX_CACHE_H */
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 7d50ff6d269f..eaab759b1460 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -155,6 +155,7 @@ typedef struct kernel_cap_struct {
155 * Add any capability from current's capability bounding set 155 * Add any capability from current's capability bounding set
156 * to the current process' inheritable set 156 * to the current process' inheritable set
157 * Allow taking bits out of capability bounding set 157 * Allow taking bits out of capability bounding set
158 * Allow modification of the securebits for a process
158 */ 159 */
159 160
160#define CAP_SETPCAP 8 161#define CAP_SETPCAP 8
@@ -490,8 +491,6 @@ extern const kernel_cap_t __cap_init_eff_set;
490int capable(int cap); 491int capable(int cap);
491int __capable(struct task_struct *t, int cap); 492int __capable(struct task_struct *t, int cap);
492 493
493extern long cap_prctl_drop(unsigned long cap);
494
495#endif /* __KERNEL__ */ 494#endif /* __KERNEL__ */
496 495
497#endif /* !_LINUX_CAPABILITY_H */ 496#endif /* !_LINUX_CAPABILITY_H */
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 259c8051155d..9650806fe2ea 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -14,6 +14,8 @@
14 * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c. 14 * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c.
15 * For details of cpu_remap(), see bitmap_bitremap in lib/bitmap.c 15 * For details of cpu_remap(), see bitmap_bitremap in lib/bitmap.c
16 * For details of cpus_remap(), see bitmap_remap in lib/bitmap.c. 16 * For details of cpus_remap(), see bitmap_remap in lib/bitmap.c.
17 * For details of cpus_onto(), see bitmap_onto in lib/bitmap.c.
18 * For details of cpus_fold(), see bitmap_fold in lib/bitmap.c.
17 * 19 *
18 * The available cpumask operations are: 20 * The available cpumask operations are:
19 * 21 *
@@ -53,7 +55,9 @@
53 * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing 55 * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
54 * int cpulist_parse(buf, map) Parse ascii string as cpulist 56 * int cpulist_parse(buf, map) Parse ascii string as cpulist
55 * int cpu_remap(oldbit, old, new) newbit = map(old, new)(oldbit) 57 * int cpu_remap(oldbit, old, new) newbit = map(old, new)(oldbit)
56 * int cpus_remap(dst, src, old, new) *dst = map(old, new)(src) 58 * void cpus_remap(dst, src, old, new) *dst = map(old, new)(src)
59 * void cpus_onto(dst, orig, relmap) *dst = orig relative to relmap
60 * void cpus_fold(dst, orig, sz) dst bits = orig bits mod sz
57 * 61 *
58 * for_each_cpu_mask(cpu, mask) for-loop cpu over mask 62 * for_each_cpu_mask(cpu, mask) for-loop cpu over mask
59 * 63 *
@@ -330,6 +334,22 @@ static inline void __cpus_remap(cpumask_t *dstp, const cpumask_t *srcp,
330 bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits); 334 bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
331} 335}
332 336
337#define cpus_onto(dst, orig, relmap) \
338 __cpus_onto(&(dst), &(orig), &(relmap), NR_CPUS)
339static inline void __cpus_onto(cpumask_t *dstp, const cpumask_t *origp,
340 const cpumask_t *relmapp, int nbits)
341{
342 bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
343}
344
345#define cpus_fold(dst, orig, sz) \
346 __cpus_fold(&(dst), &(orig), sz, NR_CPUS)
347static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp,
348 int sz, int nbits)
349{
350 bitmap_fold(dstp->bits, origp->bits, sz, nbits);
351}
352
333#if NR_CPUS > 1 353#if NR_CPUS > 1
334#define for_each_cpu_mask(cpu, mask) \ 354#define for_each_cpu_mask(cpu, mask) \
335 for ((cpu) = first_cpu(mask); \ 355 for ((cpu) = first_cpu(mask); \
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 726761e24003..038578362b47 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -26,7 +26,7 @@ extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
26#define cpuset_current_mems_allowed (current->mems_allowed) 26#define cpuset_current_mems_allowed (current->mems_allowed)
27void cpuset_init_current_mems_allowed(void); 27void cpuset_init_current_mems_allowed(void);
28void cpuset_update_task_memory_state(void); 28void cpuset_update_task_memory_state(void);
29int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl); 29int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
30 30
31extern int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask); 31extern int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask);
32extern int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask); 32extern int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask);
@@ -103,7 +103,7 @@ static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
103static inline void cpuset_init_current_mems_allowed(void) {} 103static inline void cpuset_init_current_mems_allowed(void) {}
104static inline void cpuset_update_task_memory_state(void) {} 104static inline void cpuset_update_task_memory_state(void) {}
105 105
106static inline int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl) 106static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
107{ 107{
108 return 1; 108 return 1;
109} 109}
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index 325acdf5c462..2a063b64133f 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -90,6 +90,7 @@ static inline int dmi_check_system(const struct dmi_system_id *list) { return 0;
90static inline const char * dmi_get_system_info(int field) { return NULL; } 90static inline const char * dmi_get_system_info(int field) { return NULL; }
91static inline const struct dmi_device * dmi_find_device(int type, const char *name, 91static inline const struct dmi_device * dmi_find_device(int type, const char *name,
92 const struct dmi_device *from) { return NULL; } 92 const struct dmi_device *from) { return NULL; }
93static inline void dmi_scan_machine(void) { return; }
93static inline int dmi_get_year(int year) { return 0; } 94static inline int dmi_get_year(int year) { return 0; }
94static inline int dmi_name_in_vendors(const char *s) { return 0; } 95static inline int dmi_name_in_vendors(const char *s) { return 0; }
95#define dmi_available 0 96#define dmi_available 0
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 58c57a33e5dd..72295b099228 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -791,6 +791,17 @@ struct fb_tile_ops {
791 */ 791 */
792#define FBINFO_MISC_ALWAYS_SETPAR 0x40000 792#define FBINFO_MISC_ALWAYS_SETPAR 0x40000
793 793
794/*
795 * Host and GPU endianness differ.
796 */
797#define FBINFO_FOREIGN_ENDIAN 0x100000
798/*
799 * Big endian math. This is the same flags as above, but with different
800 * meaning, it is set by the fb subsystem depending FOREIGN_ENDIAN flag
801 * and host endianness. Drivers should not use this flag.
802 */
803#define FBINFO_BE_MATH 0x100000
804
794struct fb_info { 805struct fb_info {
795 int node; 806 int node;
796 int flags; 807 int flags;
@@ -899,15 +910,11 @@ struct fb_info {
899 910
900#endif 911#endif
901 912
902#if defined (__BIG_ENDIAN) 913#define FB_LEFT_POS(p, bpp) (fb_be_math(p) ? (32 - (bpp)) : 0)
903#define FB_LEFT_POS(bpp) (32 - bpp) 914#define FB_SHIFT_HIGH(p, val, bits) (fb_be_math(p) ? (val) >> (bits) : \
904#define FB_SHIFT_HIGH(val, bits) ((val) >> (bits)) 915 (val) << (bits))
905#define FB_SHIFT_LOW(val, bits) ((val) << (bits)) 916#define FB_SHIFT_LOW(p, val, bits) (fb_be_math(p) ? (val) << (bits) : \
906#else 917 (val) >> (bits))
907#define FB_LEFT_POS(bpp) (0)
908#define FB_SHIFT_HIGH(val, bits) ((val) << (bits))
909#define FB_SHIFT_LOW(val, bits) ((val) >> (bits))
910#endif
911 918
912 /* 919 /*
913 * `Generic' versions of the frame buffer device operations 920 * `Generic' versions of the frame buffer device operations
@@ -970,6 +977,25 @@ extern void fb_deferred_io_cleanup(struct fb_info *info);
970extern int fb_deferred_io_fsync(struct file *file, struct dentry *dentry, 977extern int fb_deferred_io_fsync(struct file *file, struct dentry *dentry,
971 int datasync); 978 int datasync);
972 979
980static inline bool fb_be_math(struct fb_info *info)
981{
982#ifdef CONFIG_FB_FOREIGN_ENDIAN
983#if defined(CONFIG_FB_BOTH_ENDIAN)
984 return info->flags & FBINFO_BE_MATH;
985#elif defined(CONFIG_FB_BIG_ENDIAN)
986 return true;
987#elif defined(CONFIG_FB_LITTLE_ENDIAN)
988 return false;
989#endif /* CONFIG_FB_BOTH_ENDIAN */
990#else
991#ifdef __BIG_ENDIAN
992 return true;
993#else
994 return false;
995#endif /* __BIG_ENDIAN */
996#endif /* CONFIG_FB_FOREIGN_ENDIAN */
997}
998
973/* drivers/video/fbsysfs.c */ 999/* drivers/video/fbsysfs.c */
974extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev); 1000extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev);
975extern void framebuffer_release(struct fb_info *info); 1001extern void framebuffer_release(struct fb_info *info);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index d6d7c52055c6..2c925747bc49 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -474,8 +474,8 @@ struct address_space_operations {
474 int (*releasepage) (struct page *, gfp_t); 474 int (*releasepage) (struct page *, gfp_t);
475 ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov, 475 ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
476 loff_t offset, unsigned long nr_segs); 476 loff_t offset, unsigned long nr_segs);
477 struct page* (*get_xip_page)(struct address_space *, sector_t, 477 int (*get_xip_mem)(struct address_space *, pgoff_t, int,
478 int); 478 void **, unsigned long *);
479 /* migrate the contents of a page to the specified target */ 479 /* migrate the contents of a page to the specified target */
480 int (*migratepage) (struct address_space *, 480 int (*migratepage) (struct address_space *,
481 struct page *, struct page *); 481 struct page *, struct page *);
@@ -1178,7 +1178,8 @@ struct block_device_operations {
1178 int (*ioctl) (struct inode *, struct file *, unsigned, unsigned long); 1178 int (*ioctl) (struct inode *, struct file *, unsigned, unsigned long);
1179 long (*unlocked_ioctl) (struct file *, unsigned, unsigned long); 1179 long (*unlocked_ioctl) (struct file *, unsigned, unsigned long);
1180 long (*compat_ioctl) (struct file *, unsigned, unsigned long); 1180 long (*compat_ioctl) (struct file *, unsigned, unsigned long);
1181 int (*direct_access) (struct block_device *, sector_t, unsigned long *); 1181 int (*direct_access) (struct block_device *, sector_t,
1182 void **, unsigned long *);
1182 int (*media_changed) (struct gendisk *); 1183 int (*media_changed) (struct gendisk *);
1183 int (*revalidate_disk) (struct gendisk *); 1184 int (*revalidate_disk) (struct gendisk *);
1184 int (*getgeo)(struct block_device *, struct hd_geometry *); 1185 int (*getgeo)(struct block_device *, struct hd_geometry *);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 164be9da3c1b..c37653b6843f 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -119,35 +119,22 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags)
119 119
120static inline enum zone_type gfp_zone(gfp_t flags) 120static inline enum zone_type gfp_zone(gfp_t flags)
121{ 121{
122 int base = 0;
123
124#ifdef CONFIG_NUMA
125 if (flags & __GFP_THISNODE)
126 base = MAX_NR_ZONES;
127#endif
128
129#ifdef CONFIG_ZONE_DMA 122#ifdef CONFIG_ZONE_DMA
130 if (flags & __GFP_DMA) 123 if (flags & __GFP_DMA)
131 return base + ZONE_DMA; 124 return ZONE_DMA;
132#endif 125#endif
133#ifdef CONFIG_ZONE_DMA32 126#ifdef CONFIG_ZONE_DMA32
134 if (flags & __GFP_DMA32) 127 if (flags & __GFP_DMA32)
135 return base + ZONE_DMA32; 128 return ZONE_DMA32;
136#endif 129#endif
137 if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) == 130 if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) ==
138 (__GFP_HIGHMEM | __GFP_MOVABLE)) 131 (__GFP_HIGHMEM | __GFP_MOVABLE))
139 return base + ZONE_MOVABLE; 132 return ZONE_MOVABLE;
140#ifdef CONFIG_HIGHMEM 133#ifdef CONFIG_HIGHMEM
141 if (flags & __GFP_HIGHMEM) 134 if (flags & __GFP_HIGHMEM)
142 return base + ZONE_HIGHMEM; 135 return ZONE_HIGHMEM;
143#endif 136#endif
144 return base + ZONE_NORMAL; 137 return ZONE_NORMAL;
145}
146
147static inline gfp_t set_migrateflags(gfp_t gfp, gfp_t migrate_flags)
148{
149 BUG_ON((gfp & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
150 return (gfp & ~(GFP_MOVABLE_MASK)) | migrate_flags;
151} 138}
152 139
153/* 140/*
@@ -157,13 +144,27 @@ static inline gfp_t set_migrateflags(gfp_t gfp, gfp_t migrate_flags)
157 * virtual kernel addresses to the allocated page(s). 144 * virtual kernel addresses to the allocated page(s).
158 */ 145 */
159 146
147static inline int gfp_zonelist(gfp_t flags)
148{
149 if (NUMA_BUILD && unlikely(flags & __GFP_THISNODE))
150 return 1;
151
152 return 0;
153}
154
160/* 155/*
161 * We get the zone list from the current node and the gfp_mask. 156 * We get the zone list from the current node and the gfp_mask.
162 * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones. 157 * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones.
158 * There are two zonelists per node, one for all zones with memory and
159 * one containing just zones from the node the zonelist belongs to.
163 * 160 *
164 * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets 161 * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
165 * optimized to &contig_page_data at compile-time. 162 * optimized to &contig_page_data at compile-time.
166 */ 163 */
164static inline struct zonelist *node_zonelist(int nid, gfp_t flags)
165{
166 return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags);
167}
167 168
168#ifndef HAVE_ARCH_FREE_PAGE 169#ifndef HAVE_ARCH_FREE_PAGE
169static inline void arch_free_page(struct page *page, int order) { } 170static inline void arch_free_page(struct page *page, int order) { }
@@ -174,6 +175,10 @@ static inline void arch_alloc_page(struct page *page, int order) { }
174 175
175extern struct page *__alloc_pages(gfp_t, unsigned int, struct zonelist *); 176extern struct page *__alloc_pages(gfp_t, unsigned int, struct zonelist *);
176 177
178extern struct page *
179__alloc_pages_nodemask(gfp_t, unsigned int,
180 struct zonelist *, nodemask_t *nodemask);
181
177static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, 182static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
178 unsigned int order) 183 unsigned int order)
179{ 184{
@@ -184,8 +189,7 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
184 if (nid < 0) 189 if (nid < 0)
185 nid = numa_node_id(); 190 nid = numa_node_id();
186 191
187 return __alloc_pages(gfp_mask, order, 192 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
188 NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_mask));
189} 193}
190 194
191#ifdef CONFIG_NUMA 195#ifdef CONFIG_NUMA
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index addca4cd4f11..a79e80b689d8 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -8,6 +8,7 @@
8#include <linux/mempolicy.h> 8#include <linux/mempolicy.h>
9#include <linux/shm.h> 9#include <linux/shm.h>
10#include <asm/tlbflush.h> 10#include <asm/tlbflush.h>
11#include <asm/hugetlb.h>
11 12
12struct ctl_table; 13struct ctl_table;
13 14
@@ -51,51 +52,6 @@ int pmd_huge(pmd_t pmd);
51void hugetlb_change_protection(struct vm_area_struct *vma, 52void hugetlb_change_protection(struct vm_area_struct *vma,
52 unsigned long address, unsigned long end, pgprot_t newprot); 53 unsigned long address, unsigned long end, pgprot_t newprot);
53 54
54#ifndef ARCH_HAS_HUGEPAGE_ONLY_RANGE
55#define is_hugepage_only_range(mm, addr, len) 0
56#endif
57
58#ifndef ARCH_HAS_HUGETLB_FREE_PGD_RANGE
59#define hugetlb_free_pgd_range free_pgd_range
60#else
61void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
62 unsigned long end, unsigned long floor,
63 unsigned long ceiling);
64#endif
65
66#ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE
67/*
68 * If the arch doesn't supply something else, assume that hugepage
69 * size aligned regions are ok without further preparation.
70 */
71static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
72{
73 if (len & ~HPAGE_MASK)
74 return -EINVAL;
75 if (addr & ~HPAGE_MASK)
76 return -EINVAL;
77 return 0;
78}
79#else
80int prepare_hugepage_range(unsigned long addr, unsigned long len);
81#endif
82
83#ifndef ARCH_HAS_SETCLEAR_HUGE_PTE
84#define set_huge_pte_at(mm, addr, ptep, pte) set_pte_at(mm, addr, ptep, pte)
85#define huge_ptep_get_and_clear(mm, addr, ptep) ptep_get_and_clear(mm, addr, ptep)
86#else
87void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
88 pte_t *ptep, pte_t pte);
89pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
90 pte_t *ptep);
91#endif
92
93#ifndef ARCH_HAS_HUGETLB_PREFAULT_HOOK
94#define hugetlb_prefault_arch_hook(mm) do { } while (0)
95#else
96void hugetlb_prefault_arch_hook(struct mm_struct *mm);
97#endif
98
99#else /* !CONFIG_HUGETLB_PAGE */ 55#else /* !CONFIG_HUGETLB_PAGE */
100 56
101static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) 57static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index e92170dda245..f65e58a1d925 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -613,14 +613,9 @@ struct i2o_sys_tbl {
613extern struct list_head i2o_controllers; 613extern struct list_head i2o_controllers;
614 614
615/* Message functions */ 615/* Message functions */
616static inline struct i2o_message *i2o_msg_get(struct i2o_controller *);
617extern struct i2o_message *i2o_msg_get_wait(struct i2o_controller *, int); 616extern struct i2o_message *i2o_msg_get_wait(struct i2o_controller *, int);
618static inline void i2o_msg_post(struct i2o_controller *, struct i2o_message *);
619static inline int i2o_msg_post_wait(struct i2o_controller *,
620 struct i2o_message *, unsigned long);
621extern int i2o_msg_post_wait_mem(struct i2o_controller *, struct i2o_message *, 617extern int i2o_msg_post_wait_mem(struct i2o_controller *, struct i2o_message *,
622 unsigned long, struct i2o_dma *); 618 unsigned long, struct i2o_dma *);
623static inline void i2o_flush_reply(struct i2o_controller *, u32);
624 619
625/* IOP functions */ 620/* IOP functions */
626extern int i2o_status_get(struct i2o_controller *); 621extern int i2o_status_get(struct i2o_controller *);
diff --git a/include/linux/ide.h b/include/linux/ide.h
index f0af504dfa42..32fd77bb4436 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -48,13 +48,6 @@ typedef unsigned char byte; /* used everywhere */
48#define ERROR_RECAL 1 /* Recalibrate every 2nd retry */ 48#define ERROR_RECAL 1 /* Recalibrate every 2nd retry */
49 49
50/* 50/*
51 * Tune flags
52 */
53#define IDE_TUNE_NOAUTO 2
54#define IDE_TUNE_AUTO 1
55#define IDE_TUNE_DEFAULT 0
56
57/*
58 * state flags 51 * state flags
59 */ 52 */
60 53
@@ -68,23 +61,30 @@ typedef unsigned char byte; /* used everywhere */
68 */ 61 */
69#define IDE_NR_PORTS (10) 62#define IDE_NR_PORTS (10)
70 63
71#define IDE_DATA_OFFSET (0) 64struct ide_io_ports {
72#define IDE_ERROR_OFFSET (1) 65 unsigned long data_addr;
73#define IDE_NSECTOR_OFFSET (2) 66
74#define IDE_SECTOR_OFFSET (3) 67 union {
75#define IDE_LCYL_OFFSET (4) 68 unsigned long error_addr; /* read: error */
76#define IDE_HCYL_OFFSET (5) 69 unsigned long feature_addr; /* write: feature */
77#define IDE_SELECT_OFFSET (6) 70 };
78#define IDE_STATUS_OFFSET (7) 71
79#define IDE_CONTROL_OFFSET (8) 72 unsigned long nsect_addr;
80#define IDE_IRQ_OFFSET (9) 73 unsigned long lbal_addr;
81 74 unsigned long lbam_addr;
82#define IDE_FEATURE_OFFSET IDE_ERROR_OFFSET 75 unsigned long lbah_addr;
83#define IDE_COMMAND_OFFSET IDE_STATUS_OFFSET 76
84#define IDE_ALTSTATUS_OFFSET IDE_CONTROL_OFFSET 77 unsigned long device_addr;
85#define IDE_IREASON_OFFSET IDE_NSECTOR_OFFSET 78
86#define IDE_BCOUNTL_OFFSET IDE_LCYL_OFFSET 79 union {
87#define IDE_BCOUNTH_OFFSET IDE_HCYL_OFFSET 80 unsigned long status_addr; /*  read: status  */
81 unsigned long command_addr; /* write: command */
82 };
83
84 unsigned long ctl_addr;
85
86 unsigned long irq_addr;
87};
88 88
89#define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good)) 89#define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good))
90#define BAD_R_STAT (BUSY_STAT | ERR_STAT) 90#define BAD_R_STAT (BUSY_STAT | ERR_STAT)
@@ -163,7 +163,11 @@ typedef u8 hwif_chipset_t;
163 * Structure to hold all information about the location of this port 163 * Structure to hold all information about the location of this port
164 */ 164 */
165typedef struct hw_regs_s { 165typedef struct hw_regs_s {
166 unsigned long io_ports[IDE_NR_PORTS]; /* task file registers */ 166 union {
167 struct ide_io_ports io_ports;
168 unsigned long io_ports_array[IDE_NR_PORTS];
169 };
170
167 int irq; /* our irq number */ 171 int irq; /* our irq number */
168 ide_ack_intr_t *ack_intr; /* acknowledge interrupt */ 172 ide_ack_intr_t *ack_intr; /* acknowledge interrupt */
169 hwif_chipset_t chipset; 173 hwif_chipset_t chipset;
@@ -179,10 +183,10 @@ static inline void ide_std_init_ports(hw_regs_t *hw,
179{ 183{
180 unsigned int i; 184 unsigned int i;
181 185
182 for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) 186 for (i = 0; i <= 7; i++)
183 hw->io_ports[i] = io_addr++; 187 hw->io_ports_array[i] = io_addr++;
184 188
185 hw->io_ports[IDE_CONTROL_OFFSET] = ctl_addr; 189 hw->io_ports.ctl_addr = ctl_addr;
186} 190}
187 191
188#include <asm/ide.h> 192#include <asm/ide.h>
@@ -328,7 +332,6 @@ typedef struct ide_drive_s {
328 unsigned atapi_overlap : 1; /* ATAPI overlap (not supported) */ 332 unsigned atapi_overlap : 1; /* ATAPI overlap (not supported) */
329 unsigned doorlocking : 1; /* for removable only: door lock/unlock works */ 333 unsigned doorlocking : 1; /* for removable only: door lock/unlock works */
330 unsigned nodma : 1; /* disallow DMA */ 334 unsigned nodma : 1; /* disallow DMA */
331 unsigned autotune : 2; /* 0=default, 1=autotune, 2=noautotune */
332 unsigned remap_0_to_1 : 1; /* 0=noremap, 1=remap 0->1 (for EZDrive) */ 335 unsigned remap_0_to_1 : 1; /* 0=noremap, 1=remap 0->1 (for EZDrive) */
333 unsigned blocked : 1; /* 1=powermanagment told us not to do anything, so sleep nicely */ 336 unsigned blocked : 1; /* 1=powermanagment told us not to do anything, so sleep nicely */
334 unsigned vdma : 1; /* 1=doing PIO over DMA 0=doing normal DMA */ 337 unsigned vdma : 1; /* 1=doing PIO over DMA 0=doing normal DMA */
@@ -432,8 +435,8 @@ typedef struct hwif_s {
432 435
433 char name[6]; /* name of interface, eg. "ide0" */ 436 char name[6]; /* name of interface, eg. "ide0" */
434 437
435 /* task file registers for pata and sata */ 438 struct ide_io_ports io_ports;
436 unsigned long io_ports[IDE_NR_PORTS]; 439
437 unsigned long sata_scr[SATA_NR_PORTS]; 440 unsigned long sata_scr[SATA_NR_PORTS];
438 441
439 ide_drive_t drives[MAX_DRIVES]; /* drive info */ 442 ide_drive_t drives[MAX_DRIVES]; /* drive info */
@@ -520,7 +523,6 @@ typedef struct hwif_s {
520 unsigned present : 1; /* this interface exists */ 523 unsigned present : 1; /* this interface exists */
521 unsigned serialized : 1; /* serialized all channel operation */ 524 unsigned serialized : 1; /* serialized all channel operation */
522 unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */ 525 unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */
523 unsigned reset : 1; /* reset after probe */
524 unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */ 526 unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */
525 unsigned mmio : 1; /* host uses MMIO */ 527 unsigned mmio : 1; /* host uses MMIO */
526 528
@@ -703,10 +705,6 @@ void ide_add_generic_settings(ide_drive_t *);
703read_proc_t proc_ide_read_capacity; 705read_proc_t proc_ide_read_capacity;
704read_proc_t proc_ide_read_geometry; 706read_proc_t proc_ide_read_geometry;
705 707
706#ifdef CONFIG_BLK_DEV_IDEPCI
707void ide_pci_create_host_proc(const char *, get_info_t *);
708#endif
709
710/* 708/*
711 * Standard exit stuff: 709 * Standard exit stuff:
712 */ 710 */
@@ -807,8 +805,14 @@ int generic_ide_ioctl(ide_drive_t *, struct file *, struct block_device *, unsig
807#ifndef _IDE_C 805#ifndef _IDE_C
808extern ide_hwif_t ide_hwifs[]; /* master data repository */ 806extern ide_hwif_t ide_hwifs[]; /* master data repository */
809#endif 807#endif
808extern int ide_noacpi;
809extern int ide_acpigtf;
810extern int ide_acpionboot;
810extern int noautodma; 811extern int noautodma;
811 812
813extern int ide_vlb_clk;
814extern int ide_pci_clk;
815
812ide_hwif_t *ide_find_port_slot(const struct ide_port_info *); 816ide_hwif_t *ide_find_port_slot(const struct ide_port_info *);
813 817
814static inline ide_hwif_t *ide_find_port(void) 818static inline ide_hwif_t *ide_find_port(void)
@@ -1068,8 +1072,6 @@ enum {
1068 IDE_HFLAG_NO_DMA = (1 << 14), 1072 IDE_HFLAG_NO_DMA = (1 << 14),
1069 /* check if host is PCI IDE device before allowing DMA */ 1073 /* check if host is PCI IDE device before allowing DMA */
1070 IDE_HFLAG_NO_AUTODMA = (1 << 15), 1074 IDE_HFLAG_NO_AUTODMA = (1 << 15),
1071 /* don't autotune PIO */
1072 IDE_HFLAG_NO_AUTOTUNE = (1 << 16),
1073 /* host is CS5510/CS5520 */ 1075 /* host is CS5510/CS5520 */
1074 IDE_HFLAG_CS5520 = IDE_HFLAG_VDMA, 1076 IDE_HFLAG_CS5520 = IDE_HFLAG_VDMA,
1075 /* no LBA48 */ 1077 /* no LBA48 */
@@ -1215,13 +1217,15 @@ static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {}
1215#endif 1217#endif
1216 1218
1217void ide_remove_port_from_hwgroup(ide_hwif_t *); 1219void ide_remove_port_from_hwgroup(ide_hwif_t *);
1218void ide_unregister(unsigned int); 1220void ide_unregister(ide_hwif_t *);
1219 1221
1220void ide_register_region(struct gendisk *); 1222void ide_register_region(struct gendisk *);
1221void ide_unregister_region(struct gendisk *); 1223void ide_unregister_region(struct gendisk *);
1222 1224
1223void ide_undecoded_slave(ide_drive_t *); 1225void ide_undecoded_slave(ide_drive_t *);
1224 1226
1227void ide_port_apply_params(ide_hwif_t *);
1228
1225int ide_device_add_all(u8 *idx, const struct ide_port_info *); 1229int ide_device_add_all(u8 *idx, const struct ide_port_info *);
1226int ide_device_add(u8 idx[4], const struct ide_port_info *); 1230int ide_device_add(u8 idx[4], const struct ide_port_info *);
1227int ide_legacy_device_add(const struct ide_port_info *, unsigned long); 1231int ide_legacy_device_add(const struct ide_port_info *, unsigned long);
@@ -1333,29 +1337,28 @@ static inline void ide_set_irq(ide_drive_t *drive, int on)
1333{ 1337{
1334 ide_hwif_t *hwif = drive->hwif; 1338 ide_hwif_t *hwif = drive->hwif;
1335 1339
1336 hwif->OUTB(drive->ctl | (on ? 0 : 2), 1340 hwif->OUTB(drive->ctl | (on ? 0 : 2), hwif->io_ports.ctl_addr);
1337 hwif->io_ports[IDE_CONTROL_OFFSET]);
1338} 1341}
1339 1342
1340static inline u8 ide_read_status(ide_drive_t *drive) 1343static inline u8 ide_read_status(ide_drive_t *drive)
1341{ 1344{
1342 ide_hwif_t *hwif = drive->hwif; 1345 ide_hwif_t *hwif = drive->hwif;
1343 1346
1344 return hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); 1347 return hwif->INB(hwif->io_ports.status_addr);
1345} 1348}
1346 1349
1347static inline u8 ide_read_altstatus(ide_drive_t *drive) 1350static inline u8 ide_read_altstatus(ide_drive_t *drive)
1348{ 1351{
1349 ide_hwif_t *hwif = drive->hwif; 1352 ide_hwif_t *hwif = drive->hwif;
1350 1353
1351 return hwif->INB(hwif->io_ports[IDE_CONTROL_OFFSET]); 1354 return hwif->INB(hwif->io_ports.ctl_addr);
1352} 1355}
1353 1356
1354static inline u8 ide_read_error(ide_drive_t *drive) 1357static inline u8 ide_read_error(ide_drive_t *drive)
1355{ 1358{
1356 ide_hwif_t *hwif = drive->hwif; 1359 ide_hwif_t *hwif = drive->hwif;
1357 1360
1358 return hwif->INB(hwif->io_ports[IDE_ERROR_OFFSET]); 1361 return hwif->INB(hwif->io_ports.error_addr);
1359} 1362}
1360 1363
1361/* 1364/*
@@ -1368,7 +1371,7 @@ static inline void ide_atapi_discard_data(ide_drive_t *drive, unsigned bcount)
1368 1371
1369 /* FIXME: use ->atapi_input_bytes */ 1372 /* FIXME: use ->atapi_input_bytes */
1370 while (bcount--) 1373 while (bcount--)
1371 (void)hwif->INB(hwif->io_ports[IDE_DATA_OFFSET]); 1374 (void)hwif->INB(hwif->io_ports.data_addr);
1372} 1375}
1373 1376
1374static inline void ide_atapi_write_zeros(ide_drive_t *drive, unsigned bcount) 1377static inline void ide_atapi_write_zeros(ide_drive_t *drive, unsigned bcount)
@@ -1377,7 +1380,7 @@ static inline void ide_atapi_write_zeros(ide_drive_t *drive, unsigned bcount)
1377 1380
1378 /* FIXME: use ->atapi_output_bytes */ 1381 /* FIXME: use ->atapi_output_bytes */
1379 while (bcount--) 1382 while (bcount--)
1380 hwif->OUTB(0, hwif->io_ports[IDE_DATA_OFFSET]); 1383 hwif->OUTB(0, hwif->io_ports.data_addr);
1381} 1384}
1382 1385
1383#endif /* _IDE_H */ 1386#endif /* _IDE_H */
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 37a6f5bc4a92..bf6b8a61f8db 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -9,6 +9,7 @@
9#include <linux/ipc.h> 9#include <linux/ipc.h>
10#include <linux/pid_namespace.h> 10#include <linux/pid_namespace.h>
11#include <linux/user_namespace.h> 11#include <linux/user_namespace.h>
12#include <linux/securebits.h>
12#include <net/net_namespace.h> 13#include <net/net_namespace.h>
13 14
14#define INIT_FDTABLE \ 15#define INIT_FDTABLE \
@@ -172,7 +173,7 @@ extern struct group_info init_groups;
172 .cap_inheritable = CAP_INIT_INH_SET, \ 173 .cap_inheritable = CAP_INIT_INH_SET, \
173 .cap_permitted = CAP_FULL_SET, \ 174 .cap_permitted = CAP_FULL_SET, \
174 .cap_bset = CAP_INIT_BSET, \ 175 .cap_bset = CAP_INIT_BSET, \
175 .keep_capabilities = 0, \ 176 .securebits = SECUREBITS_DEFAULT, \
176 .user = INIT_USER, \ 177 .user = INIT_USER, \
177 .comm = "swapper", \ 178 .comm = "swapper", \
178 .thread = INIT_THREAD, \ 179 .thread = INIT_THREAD, \
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 0f28486f6360..1036631ff4fa 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -173,6 +173,13 @@ struct kretprobe_blackpoint {
173 const char *name; 173 const char *name;
174 void *addr; 174 void *addr;
175}; 175};
176
177struct kprobe_blackpoint {
178 const char *name;
179 unsigned long start_addr;
180 unsigned long range;
181};
182
176extern struct kretprobe_blackpoint kretprobe_blacklist[]; 183extern struct kretprobe_blackpoint kretprobe_blacklist[];
177 184
178static inline void kretprobe_assert(struct kretprobe_instance *ri, 185static inline void kretprobe_assert(struct kretprobe_instance *ri,
@@ -227,15 +234,21 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
227 234
228int register_kprobe(struct kprobe *p); 235int register_kprobe(struct kprobe *p);
229void unregister_kprobe(struct kprobe *p); 236void unregister_kprobe(struct kprobe *p);
237int register_kprobes(struct kprobe **kps, int num);
238void unregister_kprobes(struct kprobe **kps, int num);
230int setjmp_pre_handler(struct kprobe *, struct pt_regs *); 239int setjmp_pre_handler(struct kprobe *, struct pt_regs *);
231int longjmp_break_handler(struct kprobe *, struct pt_regs *); 240int longjmp_break_handler(struct kprobe *, struct pt_regs *);
232int register_jprobe(struct jprobe *p); 241int register_jprobe(struct jprobe *p);
233void unregister_jprobe(struct jprobe *p); 242void unregister_jprobe(struct jprobe *p);
243int register_jprobes(struct jprobe **jps, int num);
244void unregister_jprobes(struct jprobe **jps, int num);
234void jprobe_return(void); 245void jprobe_return(void);
235unsigned long arch_deref_entry_point(void *); 246unsigned long arch_deref_entry_point(void *);
236 247
237int register_kretprobe(struct kretprobe *rp); 248int register_kretprobe(struct kretprobe *rp);
238void unregister_kretprobe(struct kretprobe *rp); 249void unregister_kretprobe(struct kretprobe *rp);
250int register_kretprobes(struct kretprobe **rps, int num);
251void unregister_kretprobes(struct kretprobe **rps, int num);
239 252
240void kprobe_flush_task(struct task_struct *tk); 253void kprobe_flush_task(struct task_struct *tk);
241void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); 254void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head);
@@ -254,16 +267,30 @@ static inline int register_kprobe(struct kprobe *p)
254{ 267{
255 return -ENOSYS; 268 return -ENOSYS;
256} 269}
270static inline int register_kprobes(struct kprobe **kps, int num)
271{
272 return -ENOSYS;
273}
257static inline void unregister_kprobe(struct kprobe *p) 274static inline void unregister_kprobe(struct kprobe *p)
258{ 275{
259} 276}
277static inline void unregister_kprobes(struct kprobe **kps, int num)
278{
279}
260static inline int register_jprobe(struct jprobe *p) 280static inline int register_jprobe(struct jprobe *p)
261{ 281{
262 return -ENOSYS; 282 return -ENOSYS;
263} 283}
284static inline int register_jprobes(struct jprobe **jps, int num)
285{
286 return -ENOSYS;
287}
264static inline void unregister_jprobe(struct jprobe *p) 288static inline void unregister_jprobe(struct jprobe *p)
265{ 289{
266} 290}
291static inline void unregister_jprobes(struct jprobe **jps, int num)
292{
293}
267static inline void jprobe_return(void) 294static inline void jprobe_return(void)
268{ 295{
269} 296}
@@ -271,9 +298,16 @@ static inline int register_kretprobe(struct kretprobe *rp)
271{ 298{
272 return -ENOSYS; 299 return -ENOSYS;
273} 300}
301static inline int register_kretprobes(struct kretprobe **rps, int num)
302{
303 return -ENOSYS;
304}
274static inline void unregister_kretprobe(struct kretprobe *rp) 305static inline void unregister_kretprobe(struct kretprobe *rp)
275{ 306{
276} 307}
308static inline void unregister_kretprobes(struct kretprobe **rps, int num)
309{
310}
277static inline void kprobe_flush_task(struct task_struct *tk) 311static inline void kprobe_flush_task(struct task_struct *tk)
278{ 312{
279} 313}
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index c1ec04fd000d..a281afeddfbb 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -8,11 +8,18 @@
8 */ 8 */
9 9
10#include <asm/types.h> 10#include <asm/types.h>
11#include <linux/compiler.h>
11#include <linux/ioctl.h> 12#include <linux/ioctl.h>
12#include <asm/kvm.h> 13#include <asm/kvm.h>
13 14
14#define KVM_API_VERSION 12 15#define KVM_API_VERSION 12
15 16
17/* for KVM_TRACE_ENABLE */
18struct kvm_user_trace_setup {
19 __u32 buf_size; /* sub_buffer size of each per-cpu */
20 __u32 buf_nr; /* the number of sub_buffers of each per-cpu */
21};
22
16/* for KVM_CREATE_MEMORY_REGION */ 23/* for KVM_CREATE_MEMORY_REGION */
17struct kvm_memory_region { 24struct kvm_memory_region {
18 __u32 slot; 25 __u32 slot;
@@ -73,6 +80,9 @@ struct kvm_irqchip {
73#define KVM_EXIT_INTR 10 80#define KVM_EXIT_INTR 10
74#define KVM_EXIT_SET_TPR 11 81#define KVM_EXIT_SET_TPR 11
75#define KVM_EXIT_TPR_ACCESS 12 82#define KVM_EXIT_TPR_ACCESS 12
83#define KVM_EXIT_S390_SIEIC 13
84#define KVM_EXIT_S390_RESET 14
85#define KVM_EXIT_DCR 15
76 86
77/* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */ 87/* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */
78struct kvm_run { 88struct kvm_run {
@@ -137,6 +147,27 @@ struct kvm_run {
137 __u32 is_write; 147 __u32 is_write;
138 __u32 pad; 148 __u32 pad;
139 } tpr_access; 149 } tpr_access;
150 /* KVM_EXIT_S390_SIEIC */
151 struct {
152 __u8 icptcode;
153 __u64 mask; /* psw upper half */
154 __u64 addr; /* psw lower half */
155 __u16 ipa;
156 __u32 ipb;
157 } s390_sieic;
158 /* KVM_EXIT_S390_RESET */
159#define KVM_S390_RESET_POR 1
160#define KVM_S390_RESET_CLEAR 2
161#define KVM_S390_RESET_SUBSYSTEM 4
162#define KVM_S390_RESET_CPU_INIT 8
163#define KVM_S390_RESET_IPL 16
164 __u64 s390_reset_flags;
165 /* KVM_EXIT_DCR */
166 struct {
167 __u32 dcrn;
168 __u32 data;
169 __u8 is_write;
170 } dcr;
140 /* Fix the size of the union. */ 171 /* Fix the size of the union. */
141 char padding[256]; 172 char padding[256];
142 }; 173 };
@@ -204,6 +235,74 @@ struct kvm_vapic_addr {
204 __u64 vapic_addr; 235 __u64 vapic_addr;
205}; 236};
206 237
238/* for KVM_SET_MPSTATE */
239
240#define KVM_MP_STATE_RUNNABLE 0
241#define KVM_MP_STATE_UNINITIALIZED 1
242#define KVM_MP_STATE_INIT_RECEIVED 2
243#define KVM_MP_STATE_HALTED 3
244#define KVM_MP_STATE_SIPI_RECEIVED 4
245
246struct kvm_mp_state {
247 __u32 mp_state;
248};
249
250struct kvm_s390_psw {
251 __u64 mask;
252 __u64 addr;
253};
254
255/* valid values for type in kvm_s390_interrupt */
256#define KVM_S390_SIGP_STOP 0xfffe0000u
257#define KVM_S390_PROGRAM_INT 0xfffe0001u
258#define KVM_S390_SIGP_SET_PREFIX 0xfffe0002u
259#define KVM_S390_RESTART 0xfffe0003u
260#define KVM_S390_INT_VIRTIO 0xffff2603u
261#define KVM_S390_INT_SERVICE 0xffff2401u
262#define KVM_S390_INT_EMERGENCY 0xffff1201u
263
264struct kvm_s390_interrupt {
265 __u32 type;
266 __u32 parm;
267 __u64 parm64;
268};
269
270#define KVM_TRC_SHIFT 16
271/*
272 * kvm trace categories
273 */
274#define KVM_TRC_ENTRYEXIT (1 << KVM_TRC_SHIFT)
275#define KVM_TRC_HANDLER (1 << (KVM_TRC_SHIFT + 1)) /* only 12 bits */
276
277/*
278 * kvm trace action
279 */
280#define KVM_TRC_VMENTRY (KVM_TRC_ENTRYEXIT + 0x01)
281#define KVM_TRC_VMEXIT (KVM_TRC_ENTRYEXIT + 0x02)
282#define KVM_TRC_PAGE_FAULT (KVM_TRC_HANDLER + 0x01)
283
284#define KVM_TRC_HEAD_SIZE 12
285#define KVM_TRC_CYCLE_SIZE 8
286#define KVM_TRC_EXTRA_MAX 7
287
288/* This structure represents a single trace buffer record. */
289struct kvm_trace_rec {
290 __u32 event:28;
291 __u32 extra_u32:3;
292 __u32 cycle_in:1;
293 __u32 pid;
294 __u32 vcpu_id;
295 union {
296 struct {
297 __u32 cycle_lo, cycle_hi;
298 __u32 extra_u32[KVM_TRC_EXTRA_MAX];
299 } cycle;
300 struct {
301 __u32 extra_u32[KVM_TRC_EXTRA_MAX];
302 } nocycle;
303 } u;
304};
305
207#define KVMIO 0xAE 306#define KVMIO 0xAE
208 307
209/* 308/*
@@ -212,6 +311,8 @@ struct kvm_vapic_addr {
212#define KVM_GET_API_VERSION _IO(KVMIO, 0x00) 311#define KVM_GET_API_VERSION _IO(KVMIO, 0x00)
213#define KVM_CREATE_VM _IO(KVMIO, 0x01) /* returns a VM fd */ 312#define KVM_CREATE_VM _IO(KVMIO, 0x01) /* returns a VM fd */
214#define KVM_GET_MSR_INDEX_LIST _IOWR(KVMIO, 0x02, struct kvm_msr_list) 313#define KVM_GET_MSR_INDEX_LIST _IOWR(KVMIO, 0x02, struct kvm_msr_list)
314
315#define KVM_S390_ENABLE_SIE _IO(KVMIO, 0x06)
215/* 316/*
216 * Check if a kvm extension is available. Argument is extension number, 317 * Check if a kvm extension is available. Argument is extension number,
217 * return is 1 (yes) or 0 (no, sorry). 318 * return is 1 (yes) or 0 (no, sorry).
@@ -222,7 +323,12 @@ struct kvm_vapic_addr {
222 */ 323 */
223#define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* in bytes */ 324#define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* in bytes */
224#define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x05, struct kvm_cpuid2) 325#define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x05, struct kvm_cpuid2)
225 326/*
327 * ioctls for kvm trace
328 */
329#define KVM_TRACE_ENABLE _IOW(KVMIO, 0x06, struct kvm_user_trace_setup)
330#define KVM_TRACE_PAUSE _IO(KVMIO, 0x07)
331#define KVM_TRACE_DISABLE _IO(KVMIO, 0x08)
226/* 332/*
227 * Extension capability list. 333 * Extension capability list.
228 */ 334 */
@@ -233,6 +339,13 @@ struct kvm_vapic_addr {
233#define KVM_CAP_SET_TSS_ADDR 4 339#define KVM_CAP_SET_TSS_ADDR 4
234#define KVM_CAP_VAPIC 6 340#define KVM_CAP_VAPIC 6
235#define KVM_CAP_EXT_CPUID 7 341#define KVM_CAP_EXT_CPUID 7
342#define KVM_CAP_CLOCKSOURCE 8
343#define KVM_CAP_NR_VCPUS 9 /* returns max vcpus per vm */
344#define KVM_CAP_NR_MEMSLOTS 10 /* returns max memory slots per vm */
345#define KVM_CAP_PIT 11
346#define KVM_CAP_NOP_IO_DELAY 12
347#define KVM_CAP_PV_MMU 13
348#define KVM_CAP_MP_STATE 14
236 349
237/* 350/*
238 * ioctls for VM fds 351 * ioctls for VM fds
@@ -255,6 +368,9 @@ struct kvm_vapic_addr {
255#define KVM_IRQ_LINE _IOW(KVMIO, 0x61, struct kvm_irq_level) 368#define KVM_IRQ_LINE _IOW(KVMIO, 0x61, struct kvm_irq_level)
256#define KVM_GET_IRQCHIP _IOWR(KVMIO, 0x62, struct kvm_irqchip) 369#define KVM_GET_IRQCHIP _IOWR(KVMIO, 0x62, struct kvm_irqchip)
257#define KVM_SET_IRQCHIP _IOR(KVMIO, 0x63, struct kvm_irqchip) 370#define KVM_SET_IRQCHIP _IOR(KVMIO, 0x63, struct kvm_irqchip)
371#define KVM_CREATE_PIT _IO(KVMIO, 0x64)
372#define KVM_GET_PIT _IOWR(KVMIO, 0x65, struct kvm_pit_state)
373#define KVM_SET_PIT _IOR(KVMIO, 0x66, struct kvm_pit_state)
258 374
259/* 375/*
260 * ioctls for vcpu fds 376 * ioctls for vcpu fds
@@ -281,5 +397,17 @@ struct kvm_vapic_addr {
281#define KVM_TPR_ACCESS_REPORTING _IOWR(KVMIO, 0x92, struct kvm_tpr_access_ctl) 397#define KVM_TPR_ACCESS_REPORTING _IOWR(KVMIO, 0x92, struct kvm_tpr_access_ctl)
282/* Available with KVM_CAP_VAPIC */ 398/* Available with KVM_CAP_VAPIC */
283#define KVM_SET_VAPIC_ADDR _IOW(KVMIO, 0x93, struct kvm_vapic_addr) 399#define KVM_SET_VAPIC_ADDR _IOW(KVMIO, 0x93, struct kvm_vapic_addr)
400/* valid for virtual machine (for floating interrupt)_and_ vcpu */
401#define KVM_S390_INTERRUPT _IOW(KVMIO, 0x94, struct kvm_s390_interrupt)
402/* store status for s390 */
403#define KVM_S390_STORE_STATUS_NOADDR (-1ul)
404#define KVM_S390_STORE_STATUS_PREFIXED (-2ul)
405#define KVM_S390_STORE_STATUS _IOW(KVMIO, 0x95, unsigned long)
406/* initial ipl psw for s390 */
407#define KVM_S390_SET_INITIAL_PSW _IOW(KVMIO, 0x96, struct kvm_s390_psw)
408/* initial reset for s390 */
409#define KVM_S390_INITIAL_RESET _IO(KVMIO, 0x97)
410#define KVM_GET_MP_STATE _IOR(KVMIO, 0x98, struct kvm_mp_state)
411#define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state)
284 412
285#endif 413#endif
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 928b0d59e9ba..398978972b7a 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -15,6 +15,7 @@
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/mm.h> 16#include <linux/mm.h>
17#include <linux/preempt.h> 17#include <linux/preempt.h>
18#include <linux/marker.h>
18#include <asm/signal.h> 19#include <asm/signal.h>
19 20
20#include <linux/kvm.h> 21#include <linux/kvm.h>
@@ -24,29 +25,18 @@
24 25
25#include <asm/kvm_host.h> 26#include <asm/kvm_host.h>
26 27
27#define KVM_MAX_VCPUS 4
28#define KVM_MEMORY_SLOTS 8
29/* memory slots that does not exposed to userspace */
30#define KVM_PRIVATE_MEM_SLOTS 4
31
32#define KVM_PIO_PAGE_OFFSET 1
33
34/* 28/*
35 * vcpu->requests bit members 29 * vcpu->requests bit members
36 */ 30 */
37#define KVM_REQ_TLB_FLUSH 0 31#define KVM_REQ_TLB_FLUSH 0
38#define KVM_REQ_MIGRATE_TIMER 1 32#define KVM_REQ_MIGRATE_TIMER 1
39#define KVM_REQ_REPORT_TPR_ACCESS 2 33#define KVM_REQ_REPORT_TPR_ACCESS 2
34#define KVM_REQ_MMU_RELOAD 3
35#define KVM_REQ_TRIPLE_FAULT 4
40 36
41struct kvm_vcpu; 37struct kvm_vcpu;
42extern struct kmem_cache *kvm_vcpu_cache; 38extern struct kmem_cache *kvm_vcpu_cache;
43 39
44struct kvm_guest_debug {
45 int enabled;
46 unsigned long bp[4];
47 int singlestep;
48};
49
50/* 40/*
51 * It would be nice to use something smarter than a linear search, TBD... 41 * It would be nice to use something smarter than a linear search, TBD...
52 * Thankfully we dont expect many devices to register (famous last words :), 42 * Thankfully we dont expect many devices to register (famous last words :),
@@ -67,7 +57,9 @@ void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
67 57
68struct kvm_vcpu { 58struct kvm_vcpu {
69 struct kvm *kvm; 59 struct kvm *kvm;
60#ifdef CONFIG_PREEMPT_NOTIFIERS
70 struct preempt_notifier preempt_notifier; 61 struct preempt_notifier preempt_notifier;
62#endif
71 int vcpu_id; 63 int vcpu_id;
72 struct mutex mutex; 64 struct mutex mutex;
73 int cpu; 65 int cpu;
@@ -100,6 +92,10 @@ struct kvm_memory_slot {
100 unsigned long flags; 92 unsigned long flags;
101 unsigned long *rmap; 93 unsigned long *rmap;
102 unsigned long *dirty_bitmap; 94 unsigned long *dirty_bitmap;
95 struct {
96 unsigned long rmap_pde;
97 int write_count;
98 } *lpage_info;
103 unsigned long userspace_addr; 99 unsigned long userspace_addr;
104 int user_alloc; 100 int user_alloc;
105}; 101};
@@ -114,11 +110,11 @@ struct kvm {
114 KVM_PRIVATE_MEM_SLOTS]; 110 KVM_PRIVATE_MEM_SLOTS];
115 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; 111 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
116 struct list_head vm_list; 112 struct list_head vm_list;
117 struct file *filp;
118 struct kvm_io_bus mmio_bus; 113 struct kvm_io_bus mmio_bus;
119 struct kvm_io_bus pio_bus; 114 struct kvm_io_bus pio_bus;
120 struct kvm_vm_stat stat; 115 struct kvm_vm_stat stat;
121 struct kvm_arch arch; 116 struct kvm_arch arch;
117 atomic_t users_count;
122}; 118};
123 119
124/* The guest did something we don't support. */ 120/* The guest did something we don't support. */
@@ -145,14 +141,19 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
145 struct module *module); 141 struct module *module);
146void kvm_exit(void); 142void kvm_exit(void);
147 143
144void kvm_get_kvm(struct kvm *kvm);
145void kvm_put_kvm(struct kvm *kvm);
146
148#define HPA_MSB ((sizeof(hpa_t) * 8) - 1) 147#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
149#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB) 148#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
150static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } 149static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
151struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva); 150struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
152 151
153extern struct page *bad_page; 152extern struct page *bad_page;
153extern pfn_t bad_pfn;
154 154
155int is_error_page(struct page *page); 155int is_error_page(struct page *page);
156int is_error_pfn(pfn_t pfn);
156int kvm_is_error_hva(unsigned long addr); 157int kvm_is_error_hva(unsigned long addr);
157int kvm_set_memory_region(struct kvm *kvm, 158int kvm_set_memory_region(struct kvm *kvm,
158 struct kvm_userspace_memory_region *mem, 159 struct kvm_userspace_memory_region *mem,
@@ -166,8 +167,19 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
166 int user_alloc); 167 int user_alloc);
167gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); 168gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
168struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 169struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
170unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
169void kvm_release_page_clean(struct page *page); 171void kvm_release_page_clean(struct page *page);
170void kvm_release_page_dirty(struct page *page); 172void kvm_release_page_dirty(struct page *page);
173void kvm_set_page_dirty(struct page *page);
174void kvm_set_page_accessed(struct page *page);
175
176pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
177void kvm_release_pfn_dirty(pfn_t);
178void kvm_release_pfn_clean(pfn_t pfn);
179void kvm_set_pfn_dirty(pfn_t pfn);
180void kvm_set_pfn_accessed(pfn_t pfn);
181void kvm_get_pfn(pfn_t pfn);
182
171int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 183int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
172 int len); 184 int len);
173int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, 185int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
@@ -188,6 +200,7 @@ void kvm_resched(struct kvm_vcpu *vcpu);
188void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); 200void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
189void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); 201void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
190void kvm_flush_remote_tlbs(struct kvm *kvm); 202void kvm_flush_remote_tlbs(struct kvm *kvm);
203void kvm_reload_remote_mmus(struct kvm *kvm);
191 204
192long kvm_arch_dev_ioctl(struct file *filp, 205long kvm_arch_dev_ioctl(struct file *filp,
193 unsigned int ioctl, unsigned long arg); 206 unsigned int ioctl, unsigned long arg);
@@ -223,6 +236,10 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
223 struct kvm_sregs *sregs); 236 struct kvm_sregs *sregs);
224int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 237int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
225 struct kvm_sregs *sregs); 238 struct kvm_sregs *sregs);
239int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
240 struct kvm_mp_state *mp_state);
241int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
242 struct kvm_mp_state *mp_state);
226int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, 243int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
227 struct kvm_debug_guest *dbg); 244 struct kvm_debug_guest *dbg);
228int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); 245int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
@@ -255,6 +272,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm);
255 272
256int kvm_cpu_get_interrupt(struct kvm_vcpu *v); 273int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
257int kvm_cpu_has_interrupt(struct kvm_vcpu *v); 274int kvm_cpu_has_interrupt(struct kvm_vcpu *v);
275int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
258void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 276void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
259 277
260static inline void kvm_guest_enter(void) 278static inline void kvm_guest_enter(void)
@@ -296,5 +314,18 @@ struct kvm_stats_debugfs_item {
296 struct dentry *dentry; 314 struct dentry *dentry;
297}; 315};
298extern struct kvm_stats_debugfs_item debugfs_entries[]; 316extern struct kvm_stats_debugfs_item debugfs_entries[];
317extern struct dentry *kvm_debugfs_dir;
318
319#ifdef CONFIG_KVM_TRACE
320int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg);
321void kvm_trace_cleanup(void);
322#else
323static inline
324int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg)
325{
326 return -EINVAL;
327}
328#define kvm_trace_cleanup() ((void)0)
329#endif
299 330
300#endif 331#endif
diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h
index 5497aac0d2f8..3ddce03766ca 100644
--- a/include/linux/kvm_para.h
+++ b/include/linux/kvm_para.h
@@ -11,8 +11,11 @@
11 11
12/* Return values for hypercalls */ 12/* Return values for hypercalls */
13#define KVM_ENOSYS 1000 13#define KVM_ENOSYS 1000
14#define KVM_EFAULT EFAULT
15#define KVM_E2BIG E2BIG
14 16
15#define KVM_HC_VAPIC_POLL_IRQ 1 17#define KVM_HC_VAPIC_POLL_IRQ 1
18#define KVM_HC_MMU_OP 2
16 19
17/* 20/*
18 * hypercalls use architecture specific 21 * hypercalls use architecture specific
@@ -20,6 +23,12 @@
20#include <asm/kvm_para.h> 23#include <asm/kvm_para.h>
21 24
22#ifdef __KERNEL__ 25#ifdef __KERNEL__
26#ifdef CONFIG_KVM_GUEST
27void __init kvm_guest_init(void);
28#else
29#define kvm_guest_init() do { } while (0)
30#endif
31
23static inline int kvm_para_has_feature(unsigned int feature) 32static inline int kvm_para_has_feature(unsigned int feature)
24{ 33{
25 if (kvm_arch_para_features() & (1UL << feature)) 34 if (kvm_arch_para_features() & (1UL << feature))
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 1c4e46decb22..9b6f395c9625 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -38,6 +38,8 @@ typedef unsigned long hva_t;
38typedef u64 hpa_t; 38typedef u64 hpa_t;
39typedef unsigned long hfn_t; 39typedef unsigned long hfn_t;
40 40
41typedef hfn_t pfn_t;
42
41struct kvm_pio_request { 43struct kvm_pio_request {
42 unsigned long count; 44 unsigned long count;
43 int cur_count; 45 int cur_count;
diff --git a/include/linux/list.h b/include/linux/list.h
index dac16f99c701..b4a939b6b625 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -319,6 +319,15 @@ static inline int list_empty_careful(const struct list_head *head)
319 return (next == head) && (next == head->prev); 319 return (next == head) && (next == head->prev);
320} 320}
321 321
322/**
323 * list_is_singular - tests whether a list has just one entry.
324 * @head: the list to test.
325 */
326static inline int list_is_singular(const struct list_head *head)
327{
328 return !list_empty(head) && (head->next == head->prev);
329}
330
322static inline void __list_splice(struct list_head *list, 331static inline void __list_splice(struct list_head *list,
323 struct list_head *head) 332 struct list_head *head)
324{ 333{
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 8fee7a45736b..73e358612eaf 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -8,8 +8,18 @@
8struct page; 8struct page;
9struct zone; 9struct zone;
10struct pglist_data; 10struct pglist_data;
11struct mem_section;
11 12
12#ifdef CONFIG_MEMORY_HOTPLUG 13#ifdef CONFIG_MEMORY_HOTPLUG
14
15/*
16 * Magic number for free bootmem.
17 * The normal smallest mapcount is -1. Here is smaller value than it.
18 */
19#define SECTION_INFO 0xfffffffe
20#define MIX_INFO 0xfffffffd
21#define NODE_INFO 0xfffffffc
22
13/* 23/*
14 * pgdat resizing functions 24 * pgdat resizing functions
15 */ 25 */
@@ -64,9 +74,11 @@ extern int offline_pages(unsigned long, unsigned long, unsigned long);
64/* reasonably generic interface to expand the physical pages in a zone */ 74/* reasonably generic interface to expand the physical pages in a zone */
65extern int __add_pages(struct zone *zone, unsigned long start_pfn, 75extern int __add_pages(struct zone *zone, unsigned long start_pfn,
66 unsigned long nr_pages); 76 unsigned long nr_pages);
77extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
78 unsigned long nr_pages);
67 79
68/* 80/*
69 * Walk thorugh all memory which is registered as resource. 81 * Walk through all memory which is registered as resource.
70 * arg is (start_pfn, nr_pages, private_arg_pointer) 82 * arg is (start_pfn, nr_pages, private_arg_pointer)
71 */ 83 */
72extern int walk_memory_resource(unsigned long start_pfn, 84extern int walk_memory_resource(unsigned long start_pfn,
@@ -142,6 +154,18 @@ static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
142#endif /* CONFIG_NUMA */ 154#endif /* CONFIG_NUMA */
143#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ 155#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
144 156
157#ifdef CONFIG_SPARSEMEM_VMEMMAP
158static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
159{
160}
161static inline void put_page_bootmem(struct page *page)
162{
163}
164#else
165extern void register_page_bootmem_info_node(struct pglist_data *pgdat);
166extern void put_page_bootmem(struct page *page);
167#endif
168
145#else /* ! CONFIG_MEMORY_HOTPLUG */ 169#else /* ! CONFIG_MEMORY_HOTPLUG */
146/* 170/*
147 * Stub functions for when hotplug is off 171 * Stub functions for when hotplug is off
@@ -169,6 +193,10 @@ static inline int mhp_notimplemented(const char *func)
169 return -ENOSYS; 193 return -ENOSYS;
170} 194}
171 195
196static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
197{
198}
199
172#endif /* ! CONFIG_MEMORY_HOTPLUG */ 200#endif /* ! CONFIG_MEMORY_HOTPLUG */
173 201
174extern int add_memory(int nid, u64 start, u64 size); 202extern int add_memory(int nid, u64 start, u64 size);
@@ -176,5 +204,8 @@ extern int arch_add_memory(int nid, u64 start, u64 size);
176extern int remove_memory(u64 start, u64 size); 204extern int remove_memory(u64 start, u64 size);
177extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, 205extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
178 int nr_pages); 206 int nr_pages);
207extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms);
208extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
209 unsigned long pnum);
179 210
180#endif /* __LINUX_MEMORY_HOTPLUG_H */ 211#endif /* __LINUX_MEMORY_HOTPLUG_H */
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 59c4865bc85f..3a39570b81b8 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -8,15 +8,32 @@
8 * Copyright 2003,2004 Andi Kleen SuSE Labs 8 * Copyright 2003,2004 Andi Kleen SuSE Labs
9 */ 9 */
10 10
11/*
12 * Both the MPOL_* mempolicy mode and the MPOL_F_* optional mode flags are
13 * passed by the user to either set_mempolicy() or mbind() in an 'int' actual.
14 * The MPOL_MODE_FLAGS macro determines the legal set of optional mode flags.
15 */
16
11/* Policies */ 17/* Policies */
12#define MPOL_DEFAULT 0 18enum {
13#define MPOL_PREFERRED 1 19 MPOL_DEFAULT,
14#define MPOL_BIND 2 20 MPOL_PREFERRED,
15#define MPOL_INTERLEAVE 3 21 MPOL_BIND,
22 MPOL_INTERLEAVE,
23 MPOL_MAX, /* always last member of enum */
24};
16 25
17#define MPOL_MAX MPOL_INTERLEAVE 26/* Flags for set_mempolicy */
27#define MPOL_F_STATIC_NODES (1 << 15)
28#define MPOL_F_RELATIVE_NODES (1 << 14)
18 29
19/* Flags for get_mem_policy */ 30/*
31 * MPOL_MODE_FLAGS is the union of all possible optional mode flags passed to
32 * either set_mempolicy() or mbind().
33 */
34#define MPOL_MODE_FLAGS (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES)
35
36/* Flags for get_mempolicy */
20#define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */ 37#define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
21#define MPOL_F_ADDR (1<<1) /* look up vma using address */ 38#define MPOL_F_ADDR (1<<1) /* look up vma using address */
22#define MPOL_F_MEMS_ALLOWED (1<<2) /* return allowed memories */ 39#define MPOL_F_MEMS_ALLOWED (1<<2) /* return allowed memories */
@@ -27,6 +44,14 @@
27#define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */ 44#define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */
28#define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */ 45#define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */
29 46
47/*
48 * Internal flags that share the struct mempolicy flags word with
49 * "mode flags". These flags are allocated from bit 0 up, as they
50 * are never OR'ed into the mode in mempolicy API arguments.
51 */
52#define MPOL_F_SHARED (1 << 0) /* identify shared policies */
53#define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */
54
30#ifdef __KERNEL__ 55#ifdef __KERNEL__
31 56
32#include <linux/mmzone.h> 57#include <linux/mmzone.h>
@@ -35,7 +60,6 @@
35#include <linux/spinlock.h> 60#include <linux/spinlock.h>
36#include <linux/nodemask.h> 61#include <linux/nodemask.h>
37 62
38struct vm_area_struct;
39struct mm_struct; 63struct mm_struct;
40 64
41#ifdef CONFIG_NUMA 65#ifdef CONFIG_NUMA
@@ -54,22 +78,27 @@ struct mm_struct;
54 * mmap_sem. 78 * mmap_sem.
55 * 79 *
56 * Freeing policy: 80 * Freeing policy:
57 * When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd. 81 * Mempolicy objects are reference counted. A mempolicy will be freed when
58 * All other policies don't have any external state. mpol_free() handles this. 82 * mpol_put() decrements the reference count to zero.
59 * 83 *
60 * Copying policy objects: 84 * Duplicating policy objects:
61 * For MPOL_BIND the zonelist must be always duplicated. mpol_clone() does this. 85 * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
86 * to the new storage. The reference count of the new object is initialized
87 * to 1, representing the caller of mpol_dup().
62 */ 88 */
63struct mempolicy { 89struct mempolicy {
64 atomic_t refcnt; 90 atomic_t refcnt;
65 short policy; /* See MPOL_* above */ 91 unsigned short mode; /* See MPOL_* above */
92 unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
66 union { 93 union {
67 struct zonelist *zonelist; /* bind */
68 short preferred_node; /* preferred */ 94 short preferred_node; /* preferred */
69 nodemask_t nodes; /* interleave */ 95 nodemask_t nodes; /* interleave/bind */
70 /* undefined for default */ 96 /* undefined for default */
71 } v; 97 } v;
72 nodemask_t cpuset_mems_allowed; /* mempolicy relative to these nodes */ 98 union {
99 nodemask_t cpuset_mems_allowed; /* relative to these nodes */
100 nodemask_t user_nodemask; /* nodemask passed by user */
101 } w;
73}; 102};
74 103
75/* 104/*
@@ -77,18 +106,43 @@ struct mempolicy {
77 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined. 106 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
78 */ 107 */
79 108
80extern void __mpol_free(struct mempolicy *pol); 109extern void __mpol_put(struct mempolicy *pol);
81static inline void mpol_free(struct mempolicy *pol) 110static inline void mpol_put(struct mempolicy *pol)
82{ 111{
83 if (pol) 112 if (pol)
84 __mpol_free(pol); 113 __mpol_put(pol);
85} 114}
86 115
87extern struct mempolicy *__mpol_copy(struct mempolicy *pol); 116/*
88static inline struct mempolicy *mpol_copy(struct mempolicy *pol) 117 * Does mempolicy pol need explicit unref after use?
118 * Currently only needed for shared policies.
119 */
120static inline int mpol_needs_cond_ref(struct mempolicy *pol)
121{
122 return (pol && (pol->flags & MPOL_F_SHARED));
123}
124
125static inline void mpol_cond_put(struct mempolicy *pol)
126{
127 if (mpol_needs_cond_ref(pol))
128 __mpol_put(pol);
129}
130
131extern struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
132 struct mempolicy *frompol);
133static inline struct mempolicy *mpol_cond_copy(struct mempolicy *tompol,
134 struct mempolicy *frompol)
135{
136 if (!frompol)
137 return frompol;
138 return __mpol_cond_copy(tompol, frompol);
139}
140
141extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
142static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
89{ 143{
90 if (pol) 144 if (pol)
91 pol = __mpol_copy(pol); 145 pol = __mpol_dup(pol);
92 return pol; 146 return pol;
93} 147}
94 148
@@ -108,11 +162,6 @@ static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
108 return 1; 162 return 1;
109 return __mpol_equal(a, b); 163 return __mpol_equal(a, b);
110} 164}
111#define vma_mpol_equal(a,b) mpol_equal(vma_policy(a), vma_policy(b))
112
113/* Could later add inheritance of the process policy here. */
114
115#define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL)
116 165
117/* 166/*
118 * Tree of shared policies for a shared memory region. 167 * Tree of shared policies for a shared memory region.
@@ -133,8 +182,7 @@ struct shared_policy {
133 spinlock_t lock; 182 spinlock_t lock;
134}; 183};
135 184
136void mpol_shared_policy_init(struct shared_policy *info, int policy, 185void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
137 nodemask_t *nodes);
138int mpol_set_shared_policy(struct shared_policy *info, 186int mpol_set_shared_policy(struct shared_policy *info,
139 struct vm_area_struct *vma, 187 struct vm_area_struct *vma,
140 struct mempolicy *new); 188 struct mempolicy *new);
@@ -149,9 +197,9 @@ extern void mpol_rebind_task(struct task_struct *tsk,
149extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); 197extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
150extern void mpol_fix_fork_child_flag(struct task_struct *p); 198extern void mpol_fix_fork_child_flag(struct task_struct *p);
151 199
152extern struct mempolicy default_policy;
153extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, 200extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
154 unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol); 201 unsigned long addr, gfp_t gfp_flags,
202 struct mempolicy **mpol, nodemask_t **nodemask);
155extern unsigned slab_node(struct mempolicy *policy); 203extern unsigned slab_node(struct mempolicy *policy);
156 204
157extern enum zone_type policy_zone; 205extern enum zone_type policy_zone;
@@ -165,6 +213,13 @@ static inline void check_highest_zone(enum zone_type k)
165int do_migrate_pages(struct mm_struct *mm, 213int do_migrate_pages(struct mm_struct *mm,
166 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags); 214 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags);
167 215
216
217#ifdef CONFIG_TMPFS
218extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context);
219
220extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
221 int no_context);
222#endif
168#else 223#else
169 224
170struct mempolicy {}; 225struct mempolicy {};
@@ -173,19 +228,26 @@ static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
173{ 228{
174 return 1; 229 return 1;
175} 230}
176#define vma_mpol_equal(a,b) 1
177 231
178#define mpol_set_vma_default(vma) do {} while(0) 232static inline void mpol_put(struct mempolicy *p)
233{
234}
235
236static inline void mpol_cond_put(struct mempolicy *pol)
237{
238}
179 239
180static inline void mpol_free(struct mempolicy *p) 240static inline struct mempolicy *mpol_cond_copy(struct mempolicy *to,
241 struct mempolicy *from)
181{ 242{
243 return from;
182} 244}
183 245
184static inline void mpol_get(struct mempolicy *pol) 246static inline void mpol_get(struct mempolicy *pol)
185{ 247{
186} 248}
187 249
188static inline struct mempolicy *mpol_copy(struct mempolicy *old) 250static inline struct mempolicy *mpol_dup(struct mempolicy *old)
189{ 251{
190 return NULL; 252 return NULL;
191} 253}
@@ -199,8 +261,8 @@ static inline int mpol_set_shared_policy(struct shared_policy *info,
199 return -EINVAL; 261 return -EINVAL;
200} 262}
201 263
202static inline void mpol_shared_policy_init(struct shared_policy *info, 264static inline void mpol_shared_policy_init(struct shared_policy *sp,
203 int policy, nodemask_t *nodes) 265 struct mempolicy *mpol)
204{ 266{
205} 267}
206 268
@@ -239,9 +301,12 @@ static inline void mpol_fix_fork_child_flag(struct task_struct *p)
239} 301}
240 302
241static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, 303static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
242 unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol) 304 unsigned long addr, gfp_t gfp_flags,
305 struct mempolicy **mpol, nodemask_t **nodemask)
243{ 306{
244 return NODE_DATA(0)->node_zonelists + gfp_zone(gfp_flags); 307 *mpol = NULL;
308 *nodemask = NULL;
309 return node_zonelist(0, gfp_flags);
245} 310}
246 311
247static inline int do_migrate_pages(struct mm_struct *mm, 312static inline int do_migrate_pages(struct mm_struct *mm,
@@ -254,6 +319,21 @@ static inline int do_migrate_pages(struct mm_struct *mm,
254static inline void check_highest_zone(int k) 319static inline void check_highest_zone(int k)
255{ 320{
256} 321}
322
323#ifdef CONFIG_TMPFS
324static inline int mpol_parse_str(char *str, struct mempolicy **mpol,
325 int no_context)
326{
327 return 1; /* error */
328}
329
330static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
331 int no_context)
332{
333 return 0;
334}
335#endif
336
257#endif /* CONFIG_NUMA */ 337#endif /* CONFIG_NUMA */
258#endif /* __KERNEL__ */ 338#endif /* __KERNEL__ */
259 339
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index ff7df1a2222f..9fa1a8002ce2 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -208,6 +208,38 @@ struct mlx4_mtt {
208 int page_shift; 208 int page_shift;
209}; 209};
210 210
211enum {
212 MLX4_DB_PER_PAGE = PAGE_SIZE / 4
213};
214
215struct mlx4_db_pgdir {
216 struct list_head list;
217 DECLARE_BITMAP(order0, MLX4_DB_PER_PAGE);
218 DECLARE_BITMAP(order1, MLX4_DB_PER_PAGE / 2);
219 unsigned long *bits[2];
220 __be32 *db_page;
221 dma_addr_t db_dma;
222};
223
224struct mlx4_ib_user_db_page;
225
226struct mlx4_db {
227 __be32 *db;
228 union {
229 struct mlx4_db_pgdir *pgdir;
230 struct mlx4_ib_user_db_page *user_page;
231 } u;
232 dma_addr_t dma;
233 int index;
234 int order;
235};
236
237struct mlx4_hwq_resources {
238 struct mlx4_db db;
239 struct mlx4_mtt mtt;
240 struct mlx4_buf buf;
241};
242
211struct mlx4_mr { 243struct mlx4_mr {
212 struct mlx4_mtt mtt; 244 struct mlx4_mtt mtt;
213 u64 iova; 245 u64 iova;
@@ -341,6 +373,14 @@ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
341int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 373int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
342 struct mlx4_buf *buf); 374 struct mlx4_buf *buf);
343 375
376int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order);
377void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
378
379int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
380 int size, int max_direct);
381void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
382 int size);
383
344int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, 384int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
345 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq); 385 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq);
346void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); 386void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index a5e43febee4f..7f128b266faa 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -296,6 +296,10 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
296int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, 296int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
297 struct mlx4_qp_context *context); 297 struct mlx4_qp_context *context);
298 298
299int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
300 struct mlx4_qp_context *context,
301 struct mlx4_qp *qp, enum mlx4_qp_state *qp_state);
302
299static inline struct mlx4_qp *__mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn) 303static inline struct mlx4_qp *__mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
300{ 304{
301 return radix_tree_lookup(&dev->qp_table_tree, qpn & (dev->caps.num_qps - 1)); 305 return radix_tree_lookup(&dev->qp_table_tree, qpn & (dev->caps.num_qps - 1));
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 286d31521605..8b7f4a5d4f6a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -107,6 +107,7 @@ extern unsigned int kobjsize(const void *objp);
107#define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */ 107#define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */
108 108
109#define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ 109#define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
110#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
110 111
111#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ 112#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
112#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS 113#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
@@ -164,8 +165,6 @@ struct vm_operations_struct {
164 void (*open)(struct vm_area_struct * area); 165 void (*open)(struct vm_area_struct * area);
165 void (*close)(struct vm_area_struct * area); 166 void (*close)(struct vm_area_struct * area);
166 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); 167 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
167 struct page *(*nopage)(struct vm_area_struct *area,
168 unsigned long address, int *type);
169 unsigned long (*nopfn)(struct vm_area_struct *area, 168 unsigned long (*nopfn)(struct vm_area_struct *area,
170 unsigned long address); 169 unsigned long address);
171 170
@@ -173,7 +172,25 @@ struct vm_operations_struct {
173 * writable, if an error is returned it will cause a SIGBUS */ 172 * writable, if an error is returned it will cause a SIGBUS */
174 int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page); 173 int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page);
175#ifdef CONFIG_NUMA 174#ifdef CONFIG_NUMA
175 /*
176 * set_policy() op must add a reference to any non-NULL @new mempolicy
177 * to hold the policy upon return. Caller should pass NULL @new to
178 * remove a policy and fall back to surrounding context--i.e. do not
179 * install a MPOL_DEFAULT policy, nor the task or system default
180 * mempolicy.
181 */
176 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); 182 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
183
184 /*
185 * get_policy() op must add reference [mpol_get()] to any policy at
186 * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure
187 * in mm/mempolicy.c will do this automatically.
188 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
189 * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
190 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
191 * must return NULL--i.e., do not "fallback" to task or system default
192 * policy.
193 */
177 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 194 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
178 unsigned long addr); 195 unsigned long addr);
179 int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from, 196 int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
@@ -397,11 +414,11 @@ static inline void set_compound_order(struct page *page, unsigned long order)
397 * we have run out of space and have to fall back to an 414 * we have run out of space and have to fall back to an
398 * alternate (slower) way of determining the node. 415 * alternate (slower) way of determining the node.
399 * 416 *
400 * No sparsemem: | NODE | ZONE | ... | FLAGS | 417 * No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS |
401 * with space for node: | SECTION | NODE | ZONE | ... | FLAGS | 418 * classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS |
402 * no space for node: | SECTION | ZONE | ... | FLAGS | 419 * classic sparse no space for node: | SECTION | ZONE | ... | FLAGS |
403 */ 420 */
404#ifdef CONFIG_SPARSEMEM 421#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
405#define SECTIONS_WIDTH SECTIONS_SHIFT 422#define SECTIONS_WIDTH SECTIONS_SHIFT
406#else 423#else
407#define SECTIONS_WIDTH 0 424#define SECTIONS_WIDTH 0
@@ -409,9 +426,12 @@ static inline void set_compound_order(struct page *page, unsigned long order)
409 426
410#define ZONES_WIDTH ZONES_SHIFT 427#define ZONES_WIDTH ZONES_SHIFT
411 428
412#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= FLAGS_RESERVED 429#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
413#define NODES_WIDTH NODES_SHIFT 430#define NODES_WIDTH NODES_SHIFT
414#else 431#else
432#ifdef CONFIG_SPARSEMEM_VMEMMAP
433#error "Vmemmap: No space for nodes field in page flags"
434#endif
415#define NODES_WIDTH 0 435#define NODES_WIDTH 0
416#endif 436#endif
417 437
@@ -454,8 +474,8 @@ static inline void set_compound_order(struct page *page, unsigned long order)
454 474
455#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0)) 475#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
456 476
457#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED 477#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
458#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED 478#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
459#endif 479#endif
460 480
461#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) 481#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
@@ -504,10 +524,12 @@ static inline struct zone *page_zone(struct page *page)
504 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; 524 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
505} 525}
506 526
527#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
507static inline unsigned long page_to_section(struct page *page) 528static inline unsigned long page_to_section(struct page *page)
508{ 529{
509 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; 530 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
510} 531}
532#endif
511 533
512static inline void set_page_zone(struct page *page, enum zone_type zone) 534static inline void set_page_zone(struct page *page, enum zone_type zone)
513{ 535{
@@ -602,9 +624,12 @@ static inline struct address_space *page_mapping(struct page *page)
602 struct address_space *mapping = page->mapping; 624 struct address_space *mapping = page->mapping;
603 625
604 VM_BUG_ON(PageSlab(page)); 626 VM_BUG_ON(PageSlab(page));
627#ifdef CONFIG_SWAP
605 if (unlikely(PageSwapCache(page))) 628 if (unlikely(PageSwapCache(page)))
606 mapping = &swapper_space; 629 mapping = &swapper_space;
607 else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON)) 630 else
631#endif
632 if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
608 mapping = NULL; 633 mapping = NULL;
609 return mapping; 634 return mapping;
610} 635}
@@ -649,12 +674,6 @@ static inline int page_mapped(struct page *page)
649} 674}
650 675
651/* 676/*
652 * Error return values for the *_nopage functions
653 */
654#define NOPAGE_SIGBUS (NULL)
655#define NOPAGE_OOM ((struct page *) (-1))
656
657/*
658 * Error return values for the *_nopfn functions 677 * Error return values for the *_nopfn functions
659 */ 678 */
660#define NOPFN_SIGBUS ((unsigned long) -1) 679#define NOPFN_SIGBUS ((unsigned long) -1)
@@ -720,7 +739,9 @@ struct zap_details {
720 unsigned long truncate_count; /* Compare vm_truncate_count */ 739 unsigned long truncate_count; /* Compare vm_truncate_count */
721}; 740};
722 741
723struct page *vm_normal_page(struct vm_area_struct *, unsigned long, pte_t); 742struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
743 pte_t pte);
744
724unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, 745unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
725 unsigned long size, struct zap_details *); 746 unsigned long size, struct zap_details *);
726unsigned long unmap_vmas(struct mmu_gather **tlb, 747unsigned long unmap_vmas(struct mmu_gather **tlb,
@@ -1149,6 +1170,8 @@ int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
1149int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); 1170int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
1150int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 1171int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1151 unsigned long pfn); 1172 unsigned long pfn);
1173int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
1174 unsigned long pfn);
1152 1175
1153struct page *follow_page(struct vm_area_struct *, unsigned long address, 1176struct page *follow_page(struct vm_area_struct *, unsigned long address,
1154 unsigned int foll_flags); 1177 unsigned int foll_flags);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index af190ceab971..29adaa781cb6 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -172,6 +172,7 @@ struct mm_struct {
172 atomic_t mm_users; /* How many users with user space? */ 172 atomic_t mm_users; /* How many users with user space? */
173 atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ 173 atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
174 int map_count; /* number of VMAs */ 174 int map_count; /* number of VMAs */
175 int core_waiters;
175 struct rw_semaphore mmap_sem; 176 struct rw_semaphore mmap_sem;
176 spinlock_t page_table_lock; /* Protects page tables and some counters */ 177 spinlock_t page_table_lock; /* Protects page tables and some counters */
177 178
@@ -216,11 +217,10 @@ struct mm_struct {
216 unsigned long flags; /* Must use atomic bitops to access the bits */ 217 unsigned long flags; /* Must use atomic bitops to access the bits */
217 218
218 /* coredumping support */ 219 /* coredumping support */
219 int core_waiters;
220 struct completion *core_startup_done, core_done; 220 struct completion *core_startup_done, core_done;
221 221
222 /* aio bits */ 222 /* aio bits */
223 rwlock_t ioctx_list_lock; 223 rwlock_t ioctx_list_lock; /* aio lock */
224 struct kioctx *ioctx_list; 224 struct kioctx *ioctx_list;
225#ifdef CONFIG_CGROUP_MEM_RES_CTLR 225#ifdef CONFIG_CGROUP_MEM_RES_CTLR
226 struct mem_cgroup *mem_cgroup; 226 struct mem_cgroup *mem_cgroup;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 9f274a687c7e..aad98003176f 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -3,6 +3,7 @@
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5#ifndef __ASSEMBLY__ 5#ifndef __ASSEMBLY__
6#ifndef __GENERATING_BOUNDS_H
6 7
7#include <linux/spinlock.h> 8#include <linux/spinlock.h>
8#include <linux/list.h> 9#include <linux/list.h>
@@ -15,6 +16,7 @@
15#include <linux/seqlock.h> 16#include <linux/seqlock.h>
16#include <linux/nodemask.h> 17#include <linux/nodemask.h>
17#include <linux/pageblock-flags.h> 18#include <linux/pageblock-flags.h>
19#include <linux/bounds.h>
18#include <asm/atomic.h> 20#include <asm/atomic.h>
19#include <asm/page.h> 21#include <asm/page.h>
20 22
@@ -129,6 +131,8 @@ struct per_cpu_pageset {
129#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)]) 131#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)])
130#endif 132#endif
131 133
134#endif /* !__GENERATING_BOUNDS.H */
135
132enum zone_type { 136enum zone_type {
133#ifdef CONFIG_ZONE_DMA 137#ifdef CONFIG_ZONE_DMA
134 /* 138 /*
@@ -177,9 +181,11 @@ enum zone_type {
177 ZONE_HIGHMEM, 181 ZONE_HIGHMEM,
178#endif 182#endif
179 ZONE_MOVABLE, 183 ZONE_MOVABLE,
180 MAX_NR_ZONES 184 __MAX_NR_ZONES
181}; 185};
182 186
187#ifndef __GENERATING_BOUNDS_H
188
183/* 189/*
184 * When a memory allocation must conform to specific limitations (such 190 * When a memory allocation must conform to specific limitations (such
185 * as being suitable for DMA) the caller will pass in hints to the 191 * as being suitable for DMA) the caller will pass in hints to the
@@ -188,28 +194,15 @@ enum zone_type {
188 * match the requested limits. See gfp_zone() in include/linux/gfp.h 194 * match the requested limits. See gfp_zone() in include/linux/gfp.h
189 */ 195 */
190 196
191/* 197#if MAX_NR_ZONES < 2
192 * Count the active zones. Note that the use of defined(X) outside
193 * #if and family is not necessarily defined so ensure we cannot use
194 * it later. Use __ZONE_COUNT to work out how many shift bits we need.
195 */
196#define __ZONE_COUNT ( \
197 defined(CONFIG_ZONE_DMA) \
198 + defined(CONFIG_ZONE_DMA32) \
199 + 1 \
200 + defined(CONFIG_HIGHMEM) \
201 + 1 \
202)
203#if __ZONE_COUNT < 2
204#define ZONES_SHIFT 0 198#define ZONES_SHIFT 0
205#elif __ZONE_COUNT <= 2 199#elif MAX_NR_ZONES <= 2
206#define ZONES_SHIFT 1 200#define ZONES_SHIFT 1
207#elif __ZONE_COUNT <= 4 201#elif MAX_NR_ZONES <= 4
208#define ZONES_SHIFT 2 202#define ZONES_SHIFT 2
209#else 203#else
210#error ZONES_SHIFT -- too many zones configured adjust calculation 204#error ZONES_SHIFT -- too many zones configured adjust calculation
211#endif 205#endif
212#undef __ZONE_COUNT
213 206
214struct zone { 207struct zone {
215 /* Fields commonly accessed by the page allocator */ 208 /* Fields commonly accessed by the page allocator */
@@ -393,10 +386,10 @@ static inline int zone_is_oom_locked(const struct zone *zone)
393 * The NUMA zonelists are doubled becausse we need zonelists that restrict the 386 * The NUMA zonelists are doubled becausse we need zonelists that restrict the
394 * allocations to a single node for GFP_THISNODE. 387 * allocations to a single node for GFP_THISNODE.
395 * 388 *
396 * [0 .. MAX_NR_ZONES -1] : Zonelists with fallback 389 * [0] : Zonelist with fallback
397 * [MAZ_NR_ZONES ... MAZ_ZONELISTS -1] : No fallback (GFP_THISNODE) 390 * [1] : No fallback (GFP_THISNODE)
398 */ 391 */
399#define MAX_ZONELISTS (2 * MAX_NR_ZONES) 392#define MAX_ZONELISTS 2
400 393
401 394
402/* 395/*
@@ -464,11 +457,20 @@ struct zonelist_cache {
464 unsigned long last_full_zap; /* when last zap'd (jiffies) */ 457 unsigned long last_full_zap; /* when last zap'd (jiffies) */
465}; 458};
466#else 459#else
467#define MAX_ZONELISTS MAX_NR_ZONES 460#define MAX_ZONELISTS 1
468struct zonelist_cache; 461struct zonelist_cache;
469#endif 462#endif
470 463
471/* 464/*
465 * This struct contains information about a zone in a zonelist. It is stored
466 * here to avoid dereferences into large structures and lookups of tables
467 */
468struct zoneref {
469 struct zone *zone; /* Pointer to actual zone */
470 int zone_idx; /* zone_idx(zoneref->zone) */
471};
472
473/*
472 * One allocation request operates on a zonelist. A zonelist 474 * One allocation request operates on a zonelist. A zonelist
473 * is a list of zones, the first one is the 'goal' of the 475 * is a list of zones, the first one is the 'goal' of the
474 * allocation, the other zones are fallback zones, in decreasing 476 * allocation, the other zones are fallback zones, in decreasing
@@ -476,34 +478,23 @@ struct zonelist_cache;
476 * 478 *
477 * If zlcache_ptr is not NULL, then it is just the address of zlcache, 479 * If zlcache_ptr is not NULL, then it is just the address of zlcache,
478 * as explained above. If zlcache_ptr is NULL, there is no zlcache. 480 * as explained above. If zlcache_ptr is NULL, there is no zlcache.
481 * *
482 * To speed the reading of the zonelist, the zonerefs contain the zone index
483 * of the entry being read. Helper functions to access information given
484 * a struct zoneref are
485 *
486 * zonelist_zone() - Return the struct zone * for an entry in _zonerefs
487 * zonelist_zone_idx() - Return the index of the zone for an entry
488 * zonelist_node_idx() - Return the index of the node for an entry
479 */ 489 */
480
481struct zonelist { 490struct zonelist {
482 struct zonelist_cache *zlcache_ptr; // NULL or &zlcache 491 struct zonelist_cache *zlcache_ptr; // NULL or &zlcache
483 struct zone *zones[MAX_ZONES_PER_ZONELIST + 1]; // NULL delimited 492 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
484#ifdef CONFIG_NUMA 493#ifdef CONFIG_NUMA
485 struct zonelist_cache zlcache; // optional ... 494 struct zonelist_cache zlcache; // optional ...
486#endif 495#endif
487}; 496};
488 497
489#ifdef CONFIG_NUMA
490/*
491 * Only custom zonelists like MPOL_BIND need to be filtered as part of
492 * policies. As described in the comment for struct zonelist_cache, these
493 * zonelists will not have a zlcache so zlcache_ptr will not be set. Use
494 * that to determine if the zonelists needs to be filtered or not.
495 */
496static inline int alloc_should_filter_zonelist(struct zonelist *zonelist)
497{
498 return !zonelist->zlcache_ptr;
499}
500#else
501static inline int alloc_should_filter_zonelist(struct zonelist *zonelist)
502{
503 return 0;
504}
505#endif /* CONFIG_NUMA */
506
507#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 498#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
508struct node_active_region { 499struct node_active_region {
509 unsigned long start_pfn; 500 unsigned long start_pfn;
@@ -637,9 +628,10 @@ static inline int is_normal_idx(enum zone_type idx)
637static inline int is_highmem(struct zone *zone) 628static inline int is_highmem(struct zone *zone)
638{ 629{
639#ifdef CONFIG_HIGHMEM 630#ifdef CONFIG_HIGHMEM
640 int zone_idx = zone - zone->zone_pgdat->node_zones; 631 int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones;
641 return zone_idx == ZONE_HIGHMEM || 632 return zone_off == ZONE_HIGHMEM * sizeof(*zone) ||
642 (zone_idx == ZONE_MOVABLE && zone_movable_is_highmem()); 633 (zone_off == ZONE_MOVABLE * sizeof(*zone) &&
634 zone_movable_is_highmem());
643#else 635#else
644 return 0; 636 return 0;
645#endif 637#endif
@@ -730,32 +722,103 @@ extern struct zone *next_zone(struct zone *zone);
730 zone; \ 722 zone; \
731 zone = next_zone(zone)) 723 zone = next_zone(zone))
732 724
733#ifdef CONFIG_SPARSEMEM 725static inline struct zone *zonelist_zone(struct zoneref *zoneref)
734#include <asm/sparsemem.h> 726{
735#endif 727 return zoneref->zone;
728}
736 729
737#if BITS_PER_LONG == 32 730static inline int zonelist_zone_idx(struct zoneref *zoneref)
738/* 731{
739 * with 32 bit page->flags field, we reserve 9 bits for node/zone info. 732 return zoneref->zone_idx;
740 * there are 4 zones (3 bits) and this leaves 9-3=6 bits for nodes. 733}
734
735static inline int zonelist_node_idx(struct zoneref *zoneref)
736{
737#ifdef CONFIG_NUMA
738 /* zone_to_nid not available in this context */
739 return zoneref->zone->node;
740#else
741 return 0;
742#endif /* CONFIG_NUMA */
743}
744
745/**
746 * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point
747 * @z - The cursor used as a starting point for the search
748 * @highest_zoneidx - The zone index of the highest zone to return
749 * @nodes - An optional nodemask to filter the zonelist with
750 * @zone - The first suitable zone found is returned via this parameter
751 *
752 * This function returns the next zone at or below a given zone index that is
753 * within the allowed nodemask using a cursor as the starting point for the
754 * search. The zoneref returned is a cursor that is used as the next starting
755 * point for future calls to next_zones_zonelist().
741 */ 756 */
742#define FLAGS_RESERVED 9 757struct zoneref *next_zones_zonelist(struct zoneref *z,
758 enum zone_type highest_zoneidx,
759 nodemask_t *nodes,
760 struct zone **zone);
743 761
744#elif BITS_PER_LONG == 64 762/**
745/* 763 * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
746 * with 64 bit flags field, there's plenty of room. 764 * @zonelist - The zonelist to search for a suitable zone
765 * @highest_zoneidx - The zone index of the highest zone to return
766 * @nodes - An optional nodemask to filter the zonelist with
767 * @zone - The first suitable zone found is returned via this parameter
768 *
769 * This function returns the first zone at or below a given zone index that is
770 * within the allowed nodemask. The zoneref returned is a cursor that can be
771 * used to iterate the zonelist with next_zones_zonelist. The cursor should
772 * not be used by the caller as it does not match the value of the zone
773 * returned.
747 */ 774 */
748#define FLAGS_RESERVED 32 775static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
776 enum zone_type highest_zoneidx,
777 nodemask_t *nodes,
778 struct zone **zone)
779{
780 return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes,
781 zone);
782}
749 783
750#else 784/**
785 * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
786 * @zone - The current zone in the iterator
787 * @z - The current pointer within zonelist->zones being iterated
788 * @zlist - The zonelist being iterated
789 * @highidx - The zone index of the highest zone to return
790 * @nodemask - Nodemask allowed by the allocator
791 *
792 * This iterator iterates though all zones at or below a given zone index and
793 * within a given nodemask
794 */
795#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
796 for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \
797 zone; \
798 z = next_zones_zonelist(z, highidx, nodemask, &zone)) \
751 799
752#error BITS_PER_LONG not defined 800/**
801 * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
802 * @zone - The current zone in the iterator
803 * @z - The current pointer within zonelist->zones being iterated
804 * @zlist - The zonelist being iterated
805 * @highidx - The zone index of the highest zone to return
806 *
807 * This iterator iterates though all zones at or below a given zone index.
808 */
809#define for_each_zone_zonelist(zone, z, zlist, highidx) \
810 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
753 811
812#ifdef CONFIG_SPARSEMEM
813#include <asm/sparsemem.h>
754#endif 814#endif
755 815
756#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ 816#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
757 !defined(CONFIG_ARCH_POPULATES_NODE_MAP) 817 !defined(CONFIG_ARCH_POPULATES_NODE_MAP)
758#define early_pfn_to_nid(nid) (0UL) 818static inline unsigned long early_pfn_to_nid(unsigned long pfn)
819{
820 return 0;
821}
759#endif 822#endif
760 823
761#ifdef CONFIG_FLATMEM 824#ifdef CONFIG_FLATMEM
@@ -833,6 +896,7 @@ static inline struct mem_section *__nr_to_section(unsigned long nr)
833 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; 896 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
834} 897}
835extern int __section_nr(struct mem_section* ms); 898extern int __section_nr(struct mem_section* ms);
899extern unsigned long usemap_size(void);
836 900
837/* 901/*
838 * We use the lower bits of the mem_map pointer to store 902 * We use the lower bits of the mem_map pointer to store
@@ -938,6 +1002,7 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
938#define pfn_valid_within(pfn) (1) 1002#define pfn_valid_within(pfn) (1)
939#endif 1003#endif
940 1004
1005#endif /* !__GENERATING_BOUNDS.H */
941#endif /* !__ASSEMBLY__ */ 1006#endif /* !__ASSEMBLY__ */
942#endif /* __KERNEL__ */ 1007#endif /* __KERNEL__ */
943#endif /* _LINUX_MMZONE_H */ 1008#endif /* _LINUX_MMZONE_H */
diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h
index f950921523f5..b03b27457413 100644
--- a/include/linux/msdos_fs.h
+++ b/include/linux/msdos_fs.h
@@ -58,7 +58,11 @@
58#define MSDOS_DOTDOT ".. " /* "..", padded to MSDOS_NAME chars */ 58#define MSDOS_DOTDOT ".. " /* "..", padded to MSDOS_NAME chars */
59 59
60/* media of boot sector */ 60/* media of boot sector */
61#define FAT_VALID_MEDIA(x) ((0xF8 <= (x) && (x) <= 0xFF) || (x) == 0xF0) 61static inline int fat_valid_media(u8 media)
62{
63 return 0xf8 <= media || media == 0xf0;
64}
65
62#define FAT_FIRST_ENT(s, x) ((MSDOS_SB(s)->fat_bits == 32 ? 0x0FFFFF00 : \ 66#define FAT_FIRST_ENT(s, x) ((MSDOS_SB(s)->fat_bits == 32 ? 0x0FFFFF00 : \
63 MSDOS_SB(s)->fat_bits == 16 ? 0xFF00 : 0xF00) | (x)) 67 MSDOS_SB(s)->fat_bits == 16 ? 0xFF00 : 0xF00) | (x))
64 68
@@ -195,6 +199,7 @@ struct fat_mount_options {
195 char *iocharset; /* Charset used for filename input/display */ 199 char *iocharset; /* Charset used for filename input/display */
196 unsigned short shortname; /* flags for shortname display/create rule */ 200 unsigned short shortname; /* flags for shortname display/create rule */
197 unsigned char name_check; /* r = relaxed, n = normal, s = strict */ 201 unsigned char name_check; /* r = relaxed, n = normal, s = strict */
202 unsigned short allow_utime;/* permission for setting the [am]time */
198 unsigned quiet:1, /* set = fake successful chmods and chowns */ 203 unsigned quiet:1, /* set = fake successful chmods and chowns */
199 showexec:1, /* set = only set x bit for com/exe/bat */ 204 showexec:1, /* set = only set x bit for com/exe/bat */
200 sys_immutable:1, /* set = system files are immutable */ 205 sys_immutable:1, /* set = system files are immutable */
@@ -232,6 +237,7 @@ struct msdos_sb_info {
232 struct mutex fat_lock; 237 struct mutex fat_lock;
233 unsigned int prev_free; /* previously allocated cluster number */ 238 unsigned int prev_free; /* previously allocated cluster number */
234 unsigned int free_clusters; /* -1 if undefined */ 239 unsigned int free_clusters; /* -1 if undefined */
240 unsigned int free_clus_valid; /* is free_clusters valid? */
235 struct fat_mount_options options; 241 struct fat_mount_options options;
236 struct nls_table *nls_disk; /* Codepage used on disk */ 242 struct nls_table *nls_disk; /* Codepage used on disk */
237 struct nls_table *nls_io; /* Charset used for input and display */ 243 struct nls_table *nls_io; /* Charset used for input and display */
@@ -401,7 +407,7 @@ extern int fat_generic_ioctl(struct inode *inode, struct file *filp,
401 unsigned int cmd, unsigned long arg); 407 unsigned int cmd, unsigned long arg);
402extern const struct file_operations fat_file_operations; 408extern const struct file_operations fat_file_operations;
403extern const struct inode_operations fat_file_inode_operations; 409extern const struct inode_operations fat_file_inode_operations;
404extern int fat_notify_change(struct dentry * dentry, struct iattr * attr); 410extern int fat_setattr(struct dentry * dentry, struct iattr * attr);
405extern void fat_truncate(struct inode *inode); 411extern void fat_truncate(struct inode *inode);
406extern int fat_getattr(struct vfsmount *mnt, struct dentry *dentry, 412extern int fat_getattr(struct vfsmount *mnt, struct dentry *dentry,
407 struct kstat *stat); 413 struct kstat *stat);
diff --git a/include/linux/ncp_fs.h b/include/linux/ncp_fs.h
index 88766e43e121..9f2d76347f19 100644
--- a/include/linux/ncp_fs.h
+++ b/include/linux/ncp_fs.h
@@ -204,6 +204,7 @@ void ncp_update_inode2(struct inode *, struct ncp_entry_info *);
204/* linux/fs/ncpfs/dir.c */ 204/* linux/fs/ncpfs/dir.c */
205extern const struct inode_operations ncp_dir_inode_operations; 205extern const struct inode_operations ncp_dir_inode_operations;
206extern const struct file_operations ncp_dir_operations; 206extern const struct file_operations ncp_dir_operations;
207extern struct dentry_operations ncp_root_dentry_operations;
207int ncp_conn_logged_in(struct super_block *); 208int ncp_conn_logged_in(struct super_block *);
208int ncp_date_dos2unix(__le16 time, __le16 date); 209int ncp_date_dos2unix(__le16 time, __le16 date);
209void ncp_date_unix2dos(int unix_date, __le16 * time, __le16 * date); 210void ncp_date_unix2dos(int unix_date, __le16 * time, __le16 * date);
@@ -223,6 +224,12 @@ int ncp_disconnect(struct ncp_server *server);
223void ncp_lock_server(struct ncp_server *server); 224void ncp_lock_server(struct ncp_server *server);
224void ncp_unlock_server(struct ncp_server *server); 225void ncp_unlock_server(struct ncp_server *server);
225 226
227/* linux/fs/ncpfs/symlink.c */
228#if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS)
229extern const struct address_space_operations ncp_symlink_aops;
230int ncp_symlink(struct inode*, struct dentry*, const char*);
231#endif
232
226/* linux/fs/ncpfs/file.c */ 233/* linux/fs/ncpfs/file.c */
227extern const struct inode_operations ncp_file_inode_operations; 234extern const struct inode_operations ncp_file_inode_operations;
228extern const struct file_operations ncp_file_operations; 235extern const struct file_operations ncp_file_operations;
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 905e18f4b412..848025cd7087 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -14,6 +14,8 @@
14 * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c. 14 * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c.
15 * For details of node_remap(), see bitmap_bitremap in lib/bitmap.c. 15 * For details of node_remap(), see bitmap_bitremap in lib/bitmap.c.
16 * For details of nodes_remap(), see bitmap_remap in lib/bitmap.c. 16 * For details of nodes_remap(), see bitmap_remap in lib/bitmap.c.
17 * For details of nodes_onto(), see bitmap_onto in lib/bitmap.c.
18 * For details of nodes_fold(), see bitmap_fold in lib/bitmap.c.
17 * 19 *
18 * The available nodemask operations are: 20 * The available nodemask operations are:
19 * 21 *
@@ -55,7 +57,9 @@
55 * int nodelist_scnprintf(buf, len, mask) Format nodemask as list for printing 57 * int nodelist_scnprintf(buf, len, mask) Format nodemask as list for printing
56 * int nodelist_parse(buf, map) Parse ascii string as nodelist 58 * int nodelist_parse(buf, map) Parse ascii string as nodelist
57 * int node_remap(oldbit, old, new) newbit = map(old, new)(oldbit) 59 * int node_remap(oldbit, old, new) newbit = map(old, new)(oldbit)
58 * int nodes_remap(dst, src, old, new) *dst = map(old, new)(dst) 60 * void nodes_remap(dst, src, old, new) *dst = map(old, new)(src)
61 * void nodes_onto(dst, orig, relmap) *dst = orig relative to relmap
62 * void nodes_fold(dst, orig, sz) dst bits = orig bits mod sz
59 * 63 *
60 * for_each_node_mask(node, mask) for-loop node over mask 64 * for_each_node_mask(node, mask) for-loop node over mask
61 * 65 *
@@ -326,6 +330,22 @@ static inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
326 bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits); 330 bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
327} 331}
328 332
333#define nodes_onto(dst, orig, relmap) \
334 __nodes_onto(&(dst), &(orig), &(relmap), MAX_NUMNODES)
335static inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp,
336 const nodemask_t *relmapp, int nbits)
337{
338 bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
339}
340
341#define nodes_fold(dst, orig, sz) \
342 __nodes_fold(&(dst), &(orig), sz, MAX_NUMNODES)
343static inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
344 int sz, int nbits)
345{
346 bitmap_fold(dstp->bits, origp->bits, sz, nbits);
347}
348
329#if MAX_NUMNODES > 1 349#if MAX_NUMNODES > 1
330#define for_each_node_mask(node, mask) \ 350#define for_each_node_mask(node, mask) \
331 for ((node) = first_node(mask); \ 351 for ((node) = first_node(mask); \
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index f4df40038f0c..20dfed590183 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -247,6 +247,7 @@ extern struct blocking_notifier_head reboot_notifier_list;
247#define VT_DEALLOCATE 0x0002 /* Console will be deallocated */ 247#define VT_DEALLOCATE 0x0002 /* Console will be deallocated */
248#define VT_WRITE 0x0003 /* A char got output */ 248#define VT_WRITE 0x0003 /* A char got output */
249#define VT_UPDATE 0x0004 /* A bigger update occurred */ 249#define VT_UPDATE 0x0004 /* A bigger update occurred */
250#define VT_PREWRITE 0x0005 /* A char is about to be written to the console */
250 251
251#endif /* __KERNEL__ */ 252#endif /* __KERNEL__ */
252#endif /* _LINUX_NOTIFIER_H */ 253#endif /* _LINUX_NOTIFIER_H */
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 3852436b652a..a7979baf1e39 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -23,8 +23,8 @@ enum oom_constraint {
23 CONSTRAINT_MEMORY_POLICY, 23 CONSTRAINT_MEMORY_POLICY,
24}; 24};
25 25
26extern int try_set_zone_oom(struct zonelist *zonelist); 26extern int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_flags);
27extern void clear_zonelist_oom(struct zonelist *zonelist); 27extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
28 28
29extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order); 29extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order);
30extern int register_oom_notifier(struct notifier_block *nb); 30extern int register_oom_notifier(struct notifier_block *nb);
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index b5b30f1c1e59..590cff32415d 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -6,7 +6,10 @@
6#define PAGE_FLAGS_H 6#define PAGE_FLAGS_H
7 7
8#include <linux/types.h> 8#include <linux/types.h>
9#ifndef __GENERATING_BOUNDS_H
9#include <linux/mm_types.h> 10#include <linux/mm_types.h>
11#include <linux/bounds.h>
12#endif /* !__GENERATING_BOUNDS_H */
10 13
11/* 14/*
12 * Various page->flags bits: 15 * Various page->flags bits:
@@ -59,77 +62,138 @@
59 * extends from the high bits downwards. 62 * extends from the high bits downwards.
60 * 63 *
61 * | FIELD | ... | FLAGS | 64 * | FIELD | ... | FLAGS |
62 * N-1 ^ 0 65 * N-1 ^ 0
63 * (N-FLAGS_RESERVED) 66 * (NR_PAGEFLAGS)
64 * 67 *
65 * The fields area is reserved for fields mapping zone, node and SPARSEMEM 68 * The fields area is reserved for fields mapping zone, node (for NUMA) and
66 * section. The boundry between these two areas is defined by 69 * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
67 * FLAGS_RESERVED which defines the width of the fields section 70 * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
68 * (see linux/mmzone.h). New flags must _not_ overlap with this area.
69 */ 71 */
70#define PG_locked 0 /* Page is locked. Don't touch. */ 72enum pageflags {
71#define PG_error 1 73 PG_locked, /* Page is locked. Don't touch. */
72#define PG_referenced 2 74 PG_error,
73#define PG_uptodate 3 75 PG_referenced,
76 PG_uptodate,
77 PG_dirty,
78 PG_lru,
79 PG_active,
80 PG_slab,
81 PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
82 PG_arch_1,
83 PG_reserved,
84 PG_private, /* If pagecache, has fs-private data */
85 PG_writeback, /* Page is under writeback */
86#ifdef CONFIG_PAGEFLAGS_EXTENDED
87 PG_head, /* A head page */
88 PG_tail, /* A tail page */
89#else
90 PG_compound, /* A compound page */
91#endif
92 PG_swapcache, /* Swap page: swp_entry_t in private */
93 PG_mappedtodisk, /* Has blocks allocated on-disk */
94 PG_reclaim, /* To be reclaimed asap */
95 PG_buddy, /* Page is free, on buddy lists */
96#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
97 PG_uncached, /* Page has been mapped as uncached */
98#endif
99 __NR_PAGEFLAGS
100};
101
102#ifndef __GENERATING_BOUNDS_H
103
104/*
105 * Macros to create function definitions for page flags
106 */
107#define TESTPAGEFLAG(uname, lname) \
108static inline int Page##uname(struct page *page) \
109 { return test_bit(PG_##lname, &page->flags); }
74 110
75#define PG_dirty 4 111#define SETPAGEFLAG(uname, lname) \
76#define PG_lru 5 112static inline void SetPage##uname(struct page *page) \
77#define PG_active 6 113 { set_bit(PG_##lname, &page->flags); }
78#define PG_slab 7 /* slab debug (Suparna wants this) */
79 114
80#define PG_owner_priv_1 8 /* Owner use. If pagecache, fs may use*/ 115#define CLEARPAGEFLAG(uname, lname) \
81#define PG_arch_1 9 116static inline void ClearPage##uname(struct page *page) \
82#define PG_reserved 10 117 { clear_bit(PG_##lname, &page->flags); }
83#define PG_private 11 /* If pagecache, has fs-private data */
84 118
85#define PG_writeback 12 /* Page is under writeback */ 119#define __SETPAGEFLAG(uname, lname) \
86#define PG_compound 14 /* Part of a compound page */ 120static inline void __SetPage##uname(struct page *page) \
87#define PG_swapcache 15 /* Swap page: swp_entry_t in private */ 121 { __set_bit(PG_##lname, &page->flags); }
88 122
89#define PG_mappedtodisk 16 /* Has blocks allocated on-disk */ 123#define __CLEARPAGEFLAG(uname, lname) \
90#define PG_reclaim 17 /* To be reclaimed asap */ 124static inline void __ClearPage##uname(struct page *page) \
91#define PG_buddy 19 /* Page is free, on buddy lists */ 125 { __clear_bit(PG_##lname, &page->flags); }
126
127#define TESTSETFLAG(uname, lname) \
128static inline int TestSetPage##uname(struct page *page) \
129 { return test_and_set_bit(PG_##lname, &page->flags); }
130
131#define TESTCLEARFLAG(uname, lname) \
132static inline int TestClearPage##uname(struct page *page) \
133 { return test_and_clear_bit(PG_##lname, &page->flags); }
92 134
93/* PG_readahead is only used for file reads; PG_reclaim is only for writes */
94#define PG_readahead PG_reclaim /* Reminder to do async read-ahead */
95 135
96/* PG_owner_priv_1 users should have descriptive aliases */ 136#define PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \
97#define PG_checked PG_owner_priv_1 /* Used by some filesystems */ 137 SETPAGEFLAG(uname, lname) CLEARPAGEFLAG(uname, lname)
98#define PG_pinned PG_owner_priv_1 /* Xen pinned pagetable */ 138
139#define __PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \
140 __SETPAGEFLAG(uname, lname) __CLEARPAGEFLAG(uname, lname)
141
142#define PAGEFLAG_FALSE(uname) \
143static inline int Page##uname(struct page *page) \
144 { return 0; }
145
146#define TESTSCFLAG(uname, lname) \
147 TESTSETFLAG(uname, lname) TESTCLEARFLAG(uname, lname)
148
149struct page; /* forward declaration */
150
151PAGEFLAG(Locked, locked) TESTSCFLAG(Locked, locked)
152PAGEFLAG(Error, error)
153PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
154PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
155PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru)
156PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active)
157__PAGEFLAG(Slab, slab)
158PAGEFLAG(Checked, owner_priv_1) /* Used by some filesystems */
159PAGEFLAG(Pinned, owner_priv_1) TESTSCFLAG(Pinned, owner_priv_1) /* Xen */
160PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
161PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private)
162 __SETPAGEFLAG(Private, private)
99 163
100#if (BITS_PER_LONG > 32)
101/* 164/*
102 * 64-bit-only flags build down from bit 31 165 * Only test-and-set exist for PG_writeback. The unconditional operators are
103 * 166 * risky: they bypass page accounting.
104 * 32 bit -------------------------------| FIELDS | FLAGS |
105 * 64 bit | FIELDS | ?????? FLAGS |
106 * 63 32 0
107 */ 167 */
108#define PG_uncached 31 /* Page has been mapped as uncached */ 168TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback)
109#endif 169__PAGEFLAG(Buddy, buddy)
170PAGEFLAG(MappedToDisk, mappedtodisk)
110 171
172/* PG_readahead is only used for file reads; PG_reclaim is only for writes */
173PAGEFLAG(Reclaim, reclaim) TESTCLEARFLAG(Reclaim, reclaim)
174PAGEFLAG(Readahead, reclaim) /* Reminder to do async read-ahead */
175
176#ifdef CONFIG_HIGHMEM
111/* 177/*
112 * Manipulation of page state flags 178 * Must use a macro here due to header dependency issues. page_zone() is not
179 * available at this point.
113 */ 180 */
114#define PageLocked(page) \ 181#define PageHighMem(__p) is_highmem(page_zone(__p))
115 test_bit(PG_locked, &(page)->flags) 182#else
116#define SetPageLocked(page) \ 183PAGEFLAG_FALSE(HighMem)
117 set_bit(PG_locked, &(page)->flags) 184#endif
118#define TestSetPageLocked(page) \ 185
119 test_and_set_bit(PG_locked, &(page)->flags) 186#ifdef CONFIG_SWAP
120#define ClearPageLocked(page) \ 187PAGEFLAG(SwapCache, swapcache)
121 clear_bit(PG_locked, &(page)->flags) 188#else
122#define TestClearPageLocked(page) \ 189PAGEFLAG_FALSE(SwapCache)
123 test_and_clear_bit(PG_locked, &(page)->flags) 190#endif
124 191
125#define PageError(page) test_bit(PG_error, &(page)->flags) 192#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
126#define SetPageError(page) set_bit(PG_error, &(page)->flags) 193PAGEFLAG(Uncached, uncached)
127#define ClearPageError(page) clear_bit(PG_error, &(page)->flags) 194#else
128 195PAGEFLAG_FALSE(Uncached)
129#define PageReferenced(page) test_bit(PG_referenced, &(page)->flags) 196#endif
130#define SetPageReferenced(page) set_bit(PG_referenced, &(page)->flags)
131#define ClearPageReferenced(page) clear_bit(PG_referenced, &(page)->flags)
132#define TestClearPageReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags)
133 197
134static inline int PageUptodate(struct page *page) 198static inline int PageUptodate(struct page *page)
135{ 199{
@@ -177,97 +241,59 @@ static inline void SetPageUptodate(struct page *page)
177#endif 241#endif
178} 242}
179 243
180#define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags) 244CLEARPAGEFLAG(Uptodate, uptodate)
181
182#define PageDirty(page) test_bit(PG_dirty, &(page)->flags)
183#define SetPageDirty(page) set_bit(PG_dirty, &(page)->flags)
184#define TestSetPageDirty(page) test_and_set_bit(PG_dirty, &(page)->flags)
185#define ClearPageDirty(page) clear_bit(PG_dirty, &(page)->flags)
186#define __ClearPageDirty(page) __clear_bit(PG_dirty, &(page)->flags)
187#define TestClearPageDirty(page) test_and_clear_bit(PG_dirty, &(page)->flags)
188
189#define PageLRU(page) test_bit(PG_lru, &(page)->flags)
190#define SetPageLRU(page) set_bit(PG_lru, &(page)->flags)
191#define ClearPageLRU(page) clear_bit(PG_lru, &(page)->flags)
192#define __ClearPageLRU(page) __clear_bit(PG_lru, &(page)->flags)
193
194#define PageActive(page) test_bit(PG_active, &(page)->flags)
195#define SetPageActive(page) set_bit(PG_active, &(page)->flags)
196#define ClearPageActive(page) clear_bit(PG_active, &(page)->flags)
197#define __ClearPageActive(page) __clear_bit(PG_active, &(page)->flags)
198
199#define PageSlab(page) test_bit(PG_slab, &(page)->flags)
200#define __SetPageSlab(page) __set_bit(PG_slab, &(page)->flags)
201#define __ClearPageSlab(page) __clear_bit(PG_slab, &(page)->flags)
202
203#ifdef CONFIG_HIGHMEM
204#define PageHighMem(page) is_highmem(page_zone(page))
205#else
206#define PageHighMem(page) 0 /* needed to optimize away at compile time */
207#endif
208 245
209#define PageChecked(page) test_bit(PG_checked, &(page)->flags) 246extern void cancel_dirty_page(struct page *page, unsigned int account_size);
210#define SetPageChecked(page) set_bit(PG_checked, &(page)->flags)
211#define ClearPageChecked(page) clear_bit(PG_checked, &(page)->flags)
212
213#define PagePinned(page) test_bit(PG_pinned, &(page)->flags)
214#define SetPagePinned(page) set_bit(PG_pinned, &(page)->flags)
215#define ClearPagePinned(page) clear_bit(PG_pinned, &(page)->flags)
216 247
217#define PageReserved(page) test_bit(PG_reserved, &(page)->flags) 248int test_clear_page_writeback(struct page *page);
218#define SetPageReserved(page) set_bit(PG_reserved, &(page)->flags) 249int test_set_page_writeback(struct page *page);
219#define ClearPageReserved(page) clear_bit(PG_reserved, &(page)->flags)
220#define __ClearPageReserved(page) __clear_bit(PG_reserved, &(page)->flags)
221 250
222#define SetPagePrivate(page) set_bit(PG_private, &(page)->flags) 251static inline void set_page_writeback(struct page *page)
223#define ClearPagePrivate(page) clear_bit(PG_private, &(page)->flags) 252{
224#define PagePrivate(page) test_bit(PG_private, &(page)->flags) 253 test_set_page_writeback(page);
225#define __SetPagePrivate(page) __set_bit(PG_private, &(page)->flags) 254}
226#define __ClearPagePrivate(page) __clear_bit(PG_private, &(page)->flags)
227 255
256#ifdef CONFIG_PAGEFLAGS_EXTENDED
228/* 257/*
229 * Only test-and-set exist for PG_writeback. The unconditional operators are 258 * System with lots of page flags available. This allows separate
230 * risky: they bypass page accounting. 259 * flags for PageHead() and PageTail() checks of compound pages so that bit
260 * tests can be used in performance sensitive paths. PageCompound is
261 * generally not used in hot code paths.
231 */ 262 */
232#define PageWriteback(page) test_bit(PG_writeback, &(page)->flags) 263__PAGEFLAG(Head, head)
233#define TestSetPageWriteback(page) test_and_set_bit(PG_writeback, \ 264__PAGEFLAG(Tail, tail)
234 &(page)->flags)
235#define TestClearPageWriteback(page) test_and_clear_bit(PG_writeback, \
236 &(page)->flags)
237 265
238#define PageBuddy(page) test_bit(PG_buddy, &(page)->flags) 266static inline int PageCompound(struct page *page)
239#define __SetPageBuddy(page) __set_bit(PG_buddy, &(page)->flags) 267{
240#define __ClearPageBuddy(page) __clear_bit(PG_buddy, &(page)->flags) 268 return page->flags & ((1L << PG_head) | (1L << PG_tail));
241
242#define PageMappedToDisk(page) test_bit(PG_mappedtodisk, &(page)->flags)
243#define SetPageMappedToDisk(page) set_bit(PG_mappedtodisk, &(page)->flags)
244#define ClearPageMappedToDisk(page) clear_bit(PG_mappedtodisk, &(page)->flags)
245
246#define PageReadahead(page) test_bit(PG_readahead, &(page)->flags)
247#define SetPageReadahead(page) set_bit(PG_readahead, &(page)->flags)
248#define ClearPageReadahead(page) clear_bit(PG_readahead, &(page)->flags)
249
250#define PageReclaim(page) test_bit(PG_reclaim, &(page)->flags)
251#define SetPageReclaim(page) set_bit(PG_reclaim, &(page)->flags)
252#define ClearPageReclaim(page) clear_bit(PG_reclaim, &(page)->flags)
253#define TestClearPageReclaim(page) test_and_clear_bit(PG_reclaim, &(page)->flags)
254 269
255#define PageCompound(page) test_bit(PG_compound, &(page)->flags) 270}
256#define __SetPageCompound(page) __set_bit(PG_compound, &(page)->flags) 271#else
257#define __ClearPageCompound(page) __clear_bit(PG_compound, &(page)->flags) 272/*
273 * Reduce page flag use as much as possible by overlapping
274 * compound page flags with the flags used for page cache pages. Possible
275 * because PageCompound is always set for compound pages and not for
276 * pages on the LRU and/or pagecache.
277 */
278TESTPAGEFLAG(Compound, compound)
279__PAGEFLAG(Head, compound)
258 280
259/* 281/*
260 * PG_reclaim is used in combination with PG_compound to mark the 282 * PG_reclaim is used in combination with PG_compound to mark the
261 * head and tail of a compound page 283 * head and tail of a compound page. This saves one page flag
284 * but makes it impossible to use compound pages for the page cache.
285 * The PG_reclaim bit would have to be used for reclaim or readahead
286 * if compound pages enter the page cache.
262 * 287 *
263 * PG_compound & PG_reclaim => Tail page 288 * PG_compound & PG_reclaim => Tail page
264 * PG_compound & ~PG_reclaim => Head page 289 * PG_compound & ~PG_reclaim => Head page
265 */ 290 */
266
267#define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim)) 291#define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim))
268 292
269#define PageTail(page) (((page)->flags & PG_head_tail_mask) \ 293static inline int PageTail(struct page *page)
270 == PG_head_tail_mask) 294{
295 return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask);
296}
271 297
272static inline void __SetPageTail(struct page *page) 298static inline void __SetPageTail(struct page *page)
273{ 299{
@@ -279,33 +305,6 @@ static inline void __ClearPageTail(struct page *page)
279 page->flags &= ~PG_head_tail_mask; 305 page->flags &= ~PG_head_tail_mask;
280} 306}
281 307
282#define PageHead(page) (((page)->flags & PG_head_tail_mask) \ 308#endif /* !PAGEFLAGS_EXTENDED */
283 == (1L << PG_compound)) 309#endif /* !__GENERATING_BOUNDS_H */
284#define __SetPageHead(page) __SetPageCompound(page)
285#define __ClearPageHead(page) __ClearPageCompound(page)
286
287#ifdef CONFIG_SWAP
288#define PageSwapCache(page) test_bit(PG_swapcache, &(page)->flags)
289#define SetPageSwapCache(page) set_bit(PG_swapcache, &(page)->flags)
290#define ClearPageSwapCache(page) clear_bit(PG_swapcache, &(page)->flags)
291#else
292#define PageSwapCache(page) 0
293#endif
294
295#define PageUncached(page) test_bit(PG_uncached, &(page)->flags)
296#define SetPageUncached(page) set_bit(PG_uncached, &(page)->flags)
297#define ClearPageUncached(page) clear_bit(PG_uncached, &(page)->flags)
298
299struct page; /* forward declaration */
300
301extern void cancel_dirty_page(struct page *page, unsigned int account_size);
302
303int test_clear_page_writeback(struct page *page);
304int test_set_page_writeback(struct page *page);
305
306static inline void set_page_writeback(struct page *page)
307{
308 test_set_page_writeback(page);
309}
310
311#endif /* PAGE_FLAGS_H */ 310#endif /* PAGE_FLAGS_H */
diff --git a/include/linux/prctl.h b/include/linux/prctl.h
index 5c80b1939636..5ad79198d6f9 100644
--- a/include/linux/prctl.h
+++ b/include/linux/prctl.h
@@ -16,7 +16,8 @@
16# define PR_UNALIGN_NOPRINT 1 /* silently fix up unaligned user accesses */ 16# define PR_UNALIGN_NOPRINT 1 /* silently fix up unaligned user accesses */
17# define PR_UNALIGN_SIGBUS 2 /* generate SIGBUS on unaligned user access */ 17# define PR_UNALIGN_SIGBUS 2 /* generate SIGBUS on unaligned user access */
18 18
19/* Get/set whether or not to drop capabilities on setuid() away from uid 0 */ 19/* Get/set whether or not to drop capabilities on setuid() away from
20 * uid 0 (as per security/commoncap.c) */
20#define PR_GET_KEEPCAPS 7 21#define PR_GET_KEEPCAPS 7
21#define PR_SET_KEEPCAPS 8 22#define PR_SET_KEEPCAPS 8
22 23
@@ -63,7 +64,7 @@
63#define PR_GET_SECCOMP 21 64#define PR_GET_SECCOMP 21
64#define PR_SET_SECCOMP 22 65#define PR_SET_SECCOMP 22
65 66
66/* Get/set the capability bounding set */ 67/* Get/set the capability bounding set (as per security/commoncap.c) */
67#define PR_CAPBSET_READ 23 68#define PR_CAPBSET_READ 23
68#define PR_CAPBSET_DROP 24 69#define PR_CAPBSET_DROP 24
69 70
@@ -73,4 +74,8 @@
73# define PR_TSC_ENABLE 1 /* allow the use of the timestamp counter */ 74# define PR_TSC_ENABLE 1 /* allow the use of the timestamp counter */
74# define PR_TSC_SIGSEGV 2 /* throw a SIGSEGV instead of reading the TSC */ 75# define PR_TSC_SIGSEGV 2 /* throw a SIGSEGV instead of reading the TSC */
75 76
77/* Get/set securebits (as per security/commoncap.c) */
78#define PR_GET_SECUREBITS 27
79#define PR_SET_SECUREBITS 28
80
76#endif /* _LINUX_PRCTL_H */ 81#endif /* _LINUX_PRCTL_H */
diff --git a/include/linux/quota.h b/include/linux/quota.h
index eb560d031acd..52e49dce6584 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -202,10 +202,14 @@ struct quota_format_type;
202 202
203struct mem_dqinfo { 203struct mem_dqinfo {
204 struct quota_format_type *dqi_format; 204 struct quota_format_type *dqi_format;
205 int dqi_fmt_id; /* Id of the dqi_format - used when turning
206 * quotas on after remount RW */
205 struct list_head dqi_dirty_list; /* List of dirty dquots */ 207 struct list_head dqi_dirty_list; /* List of dirty dquots */
206 unsigned long dqi_flags; 208 unsigned long dqi_flags;
207 unsigned int dqi_bgrace; 209 unsigned int dqi_bgrace;
208 unsigned int dqi_igrace; 210 unsigned int dqi_igrace;
211 qsize_t dqi_maxblimit;
212 qsize_t dqi_maxilimit;
209 union { 213 union {
210 struct v1_mem_dqinfo v1_i; 214 struct v1_mem_dqinfo v1_i;
211 struct v2_mem_dqinfo v2_i; 215 struct v2_mem_dqinfo v2_i;
@@ -296,8 +300,8 @@ struct dquot_operations {
296 300
297/* Operations handling requests from userspace */ 301/* Operations handling requests from userspace */
298struct quotactl_ops { 302struct quotactl_ops {
299 int (*quota_on)(struct super_block *, int, int, char *); 303 int (*quota_on)(struct super_block *, int, int, char *, int);
300 int (*quota_off)(struct super_block *, int); 304 int (*quota_off)(struct super_block *, int, int);
301 int (*quota_sync)(struct super_block *, int); 305 int (*quota_sync)(struct super_block *, int);
302 int (*get_info)(struct super_block *, int, struct if_dqinfo *); 306 int (*get_info)(struct super_block *, int, struct if_dqinfo *);
303 int (*set_info)(struct super_block *, int, struct if_dqinfo *); 307 int (*set_info)(struct super_block *, int, struct if_dqinfo *);
@@ -318,6 +322,10 @@ struct quota_format_type {
318 322
319#define DQUOT_USR_ENABLED 0x01 /* User diskquotas enabled */ 323#define DQUOT_USR_ENABLED 0x01 /* User diskquotas enabled */
320#define DQUOT_GRP_ENABLED 0x02 /* Group diskquotas enabled */ 324#define DQUOT_GRP_ENABLED 0x02 /* Group diskquotas enabled */
325#define DQUOT_USR_SUSPENDED 0x04 /* User diskquotas are off, but
326 * we have necessary info in
327 * memory to turn them on */
328#define DQUOT_GRP_SUSPENDED 0x08 /* The same for group quotas */
321 329
322struct quota_info { 330struct quota_info {
323 unsigned int flags; /* Flags for diskquotas on this device */ 331 unsigned int flags; /* Flags for diskquotas on this device */
@@ -329,17 +337,16 @@ struct quota_info {
329 struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */ 337 struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */
330}; 338};
331 339
332/* Inline would be better but we need to dereference super_block which is not defined yet */
333int mark_dquot_dirty(struct dquot *dquot);
334
335#define dquot_dirty(dquot) test_bit(DQ_MOD_B, &(dquot)->dq_flags)
336
337#define sb_has_quota_enabled(sb, type) ((type)==USRQUOTA ? \ 340#define sb_has_quota_enabled(sb, type) ((type)==USRQUOTA ? \
338 (sb_dqopt(sb)->flags & DQUOT_USR_ENABLED) : (sb_dqopt(sb)->flags & DQUOT_GRP_ENABLED)) 341 (sb_dqopt(sb)->flags & DQUOT_USR_ENABLED) : (sb_dqopt(sb)->flags & DQUOT_GRP_ENABLED))
339 342
340#define sb_any_quota_enabled(sb) (sb_has_quota_enabled(sb, USRQUOTA) | \ 343#define sb_any_quota_enabled(sb) (sb_has_quota_enabled(sb, USRQUOTA) | \
341 sb_has_quota_enabled(sb, GRPQUOTA)) 344 sb_has_quota_enabled(sb, GRPQUOTA))
342 345
346#define sb_has_quota_suspended(sb, type) \
347 ((type) == USRQUOTA ? (sb_dqopt(sb)->flags & DQUOT_USR_SUSPENDED) : \
348 (sb_dqopt(sb)->flags & DQUOT_GRP_SUSPENDED))
349
343int register_quota_format(struct quota_format_type *fmt); 350int register_quota_format(struct quota_format_type *fmt);
344void unregister_quota_format(struct quota_format_type *fmt); 351void unregister_quota_format(struct quota_format_type *fmt);
345 352
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 5110201a4159..f86702053853 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -37,11 +37,11 @@ extern int dquot_release(struct dquot *dquot);
37extern int dquot_commit_info(struct super_block *sb, int type); 37extern int dquot_commit_info(struct super_block *sb, int type);
38extern int dquot_mark_dquot_dirty(struct dquot *dquot); 38extern int dquot_mark_dquot_dirty(struct dquot *dquot);
39 39
40extern int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path); 40extern int vfs_quota_on(struct super_block *sb, int type, int format_id,
41 char *path, int remount);
41extern int vfs_quota_on_mount(struct super_block *sb, char *qf_name, 42extern int vfs_quota_on_mount(struct super_block *sb, char *qf_name,
42 int format_id, int type); 43 int format_id, int type);
43extern int vfs_quota_off(struct super_block *sb, int type); 44extern int vfs_quota_off(struct super_block *sb, int type, int remount);
44#define vfs_quota_off_mount(sb, type) vfs_quota_off(sb, type)
45extern int vfs_quota_sync(struct super_block *sb, int type); 45extern int vfs_quota_sync(struct super_block *sb, int type);
46extern int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); 46extern int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
47extern int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); 47extern int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
@@ -59,7 +59,7 @@ extern struct quotactl_ops vfs_quotactl_ops;
59 59
60/* It is better to call this function outside of any transaction as it might 60/* It is better to call this function outside of any transaction as it might
61 * need a lot of space in journal for dquot structure allocation. */ 61 * need a lot of space in journal for dquot structure allocation. */
62static __inline__ void DQUOT_INIT(struct inode *inode) 62static inline void DQUOT_INIT(struct inode *inode)
63{ 63{
64 BUG_ON(!inode->i_sb); 64 BUG_ON(!inode->i_sb);
65 if (sb_any_quota_enabled(inode->i_sb) && !IS_NOQUOTA(inode)) 65 if (sb_any_quota_enabled(inode->i_sb) && !IS_NOQUOTA(inode))
@@ -67,7 +67,7 @@ static __inline__ void DQUOT_INIT(struct inode *inode)
67} 67}
68 68
69/* The same as with DQUOT_INIT */ 69/* The same as with DQUOT_INIT */
70static __inline__ void DQUOT_DROP(struct inode *inode) 70static inline void DQUOT_DROP(struct inode *inode)
71{ 71{
72 /* Here we can get arbitrary inode from clear_inode() so we have 72 /* Here we can get arbitrary inode from clear_inode() so we have
73 * to be careful. OTOH we don't need locking as quota operations 73 * to be careful. OTOH we don't need locking as quota operations
@@ -90,7 +90,7 @@ static __inline__ void DQUOT_DROP(struct inode *inode)
90 90
91/* The following allocation/freeing/transfer functions *must* be called inside 91/* The following allocation/freeing/transfer functions *must* be called inside
92 * a transaction (deadlocks possible otherwise) */ 92 * a transaction (deadlocks possible otherwise) */
93static __inline__ int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) 93static inline int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
94{ 94{
95 if (sb_any_quota_enabled(inode->i_sb)) { 95 if (sb_any_quota_enabled(inode->i_sb)) {
96 /* Used space is updated in alloc_space() */ 96 /* Used space is updated in alloc_space() */
@@ -102,7 +102,7 @@ static __inline__ int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t
102 return 0; 102 return 0;
103} 103}
104 104
105static __inline__ int DQUOT_PREALLOC_SPACE(struct inode *inode, qsize_t nr) 105static inline int DQUOT_PREALLOC_SPACE(struct inode *inode, qsize_t nr)
106{ 106{
107 int ret; 107 int ret;
108 if (!(ret = DQUOT_PREALLOC_SPACE_NODIRTY(inode, nr))) 108 if (!(ret = DQUOT_PREALLOC_SPACE_NODIRTY(inode, nr)))
@@ -110,7 +110,7 @@ static __inline__ int DQUOT_PREALLOC_SPACE(struct inode *inode, qsize_t nr)
110 return ret; 110 return ret;
111} 111}
112 112
113static __inline__ int DQUOT_ALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) 113static inline int DQUOT_ALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
114{ 114{
115 if (sb_any_quota_enabled(inode->i_sb)) { 115 if (sb_any_quota_enabled(inode->i_sb)) {
116 /* Used space is updated in alloc_space() */ 116 /* Used space is updated in alloc_space() */
@@ -122,7 +122,7 @@ static __inline__ int DQUOT_ALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
122 return 0; 122 return 0;
123} 123}
124 124
125static __inline__ int DQUOT_ALLOC_SPACE(struct inode *inode, qsize_t nr) 125static inline int DQUOT_ALLOC_SPACE(struct inode *inode, qsize_t nr)
126{ 126{
127 int ret; 127 int ret;
128 if (!(ret = DQUOT_ALLOC_SPACE_NODIRTY(inode, nr))) 128 if (!(ret = DQUOT_ALLOC_SPACE_NODIRTY(inode, nr)))
@@ -130,7 +130,7 @@ static __inline__ int DQUOT_ALLOC_SPACE(struct inode *inode, qsize_t nr)
130 return ret; 130 return ret;
131} 131}
132 132
133static __inline__ int DQUOT_ALLOC_INODE(struct inode *inode) 133static inline int DQUOT_ALLOC_INODE(struct inode *inode)
134{ 134{
135 if (sb_any_quota_enabled(inode->i_sb)) { 135 if (sb_any_quota_enabled(inode->i_sb)) {
136 DQUOT_INIT(inode); 136 DQUOT_INIT(inode);
@@ -140,7 +140,7 @@ static __inline__ int DQUOT_ALLOC_INODE(struct inode *inode)
140 return 0; 140 return 0;
141} 141}
142 142
143static __inline__ void DQUOT_FREE_SPACE_NODIRTY(struct inode *inode, qsize_t nr) 143static inline void DQUOT_FREE_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
144{ 144{
145 if (sb_any_quota_enabled(inode->i_sb)) 145 if (sb_any_quota_enabled(inode->i_sb))
146 inode->i_sb->dq_op->free_space(inode, nr); 146 inode->i_sb->dq_op->free_space(inode, nr);
@@ -148,19 +148,19 @@ static __inline__ void DQUOT_FREE_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
148 inode_sub_bytes(inode, nr); 148 inode_sub_bytes(inode, nr);
149} 149}
150 150
151static __inline__ void DQUOT_FREE_SPACE(struct inode *inode, qsize_t nr) 151static inline void DQUOT_FREE_SPACE(struct inode *inode, qsize_t nr)
152{ 152{
153 DQUOT_FREE_SPACE_NODIRTY(inode, nr); 153 DQUOT_FREE_SPACE_NODIRTY(inode, nr);
154 mark_inode_dirty(inode); 154 mark_inode_dirty(inode);
155} 155}
156 156
157static __inline__ void DQUOT_FREE_INODE(struct inode *inode) 157static inline void DQUOT_FREE_INODE(struct inode *inode)
158{ 158{
159 if (sb_any_quota_enabled(inode->i_sb)) 159 if (sb_any_quota_enabled(inode->i_sb))
160 inode->i_sb->dq_op->free_inode(inode, 1); 160 inode->i_sb->dq_op->free_inode(inode, 1);
161} 161}
162 162
163static __inline__ int DQUOT_TRANSFER(struct inode *inode, struct iattr *iattr) 163static inline int DQUOT_TRANSFER(struct inode *inode, struct iattr *iattr)
164{ 164{
165 if (sb_any_quota_enabled(inode->i_sb) && !IS_NOQUOTA(inode)) { 165 if (sb_any_quota_enabled(inode->i_sb) && !IS_NOQUOTA(inode)) {
166 DQUOT_INIT(inode); 166 DQUOT_INIT(inode);
@@ -171,14 +171,32 @@ static __inline__ int DQUOT_TRANSFER(struct inode *inode, struct iattr *iattr)
171} 171}
172 172
173/* The following two functions cannot be called inside a transaction */ 173/* The following two functions cannot be called inside a transaction */
174#define DQUOT_SYNC(sb) sync_dquots(sb, -1) 174static inline void DQUOT_SYNC(struct super_block *sb)
175{
176 sync_dquots(sb, -1);
177}
175 178
176static __inline__ int DQUOT_OFF(struct super_block *sb) 179static inline int DQUOT_OFF(struct super_block *sb, int remount)
177{ 180{
178 int ret = -ENOSYS; 181 int ret = -ENOSYS;
179 182
180 if (sb_any_quota_enabled(sb) && sb->s_qcop && sb->s_qcop->quota_off) 183 if (sb->s_qcop && sb->s_qcop->quota_off)
181 ret = sb->s_qcop->quota_off(sb, -1); 184 ret = sb->s_qcop->quota_off(sb, -1, remount);
185 return ret;
186}
187
188static inline int DQUOT_ON_REMOUNT(struct super_block *sb)
189{
190 int cnt;
191 int ret = 0, err;
192
193 if (!sb->s_qcop || !sb->s_qcop->quota_on)
194 return -ENOSYS;
195 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
196 err = sb->s_qcop->quota_on(sb, cnt, 0, NULL, 1);
197 if (err < 0 && !ret)
198 ret = err;
199 }
182 return ret; 200 return ret;
183} 201}
184 202
@@ -189,13 +207,43 @@ static __inline__ int DQUOT_OFF(struct super_block *sb)
189 */ 207 */
190#define sb_dquot_ops (NULL) 208#define sb_dquot_ops (NULL)
191#define sb_quotactl_ops (NULL) 209#define sb_quotactl_ops (NULL)
192#define DQUOT_INIT(inode) do { } while(0) 210
193#define DQUOT_DROP(inode) do { } while(0) 211static inline void DQUOT_INIT(struct inode *inode)
194#define DQUOT_ALLOC_INODE(inode) (0) 212{
195#define DQUOT_FREE_INODE(inode) do { } while(0) 213}
196#define DQUOT_SYNC(sb) do { } while(0) 214
197#define DQUOT_OFF(sb) do { } while(0) 215static inline void DQUOT_DROP(struct inode *inode)
198#define DQUOT_TRANSFER(inode, iattr) (0) 216{
217}
218
219static inline int DQUOT_ALLOC_INODE(struct inode *inode)
220{
221 return 0;
222}
223
224static inline void DQUOT_FREE_INODE(struct inode *inode)
225{
226}
227
228static inline void DQUOT_SYNC(struct super_block *sb)
229{
230}
231
232static inline int DQUOT_OFF(struct super_block *sb, int remount)
233{
234 return 0;
235}
236
237static inline int DQUOT_ON_REMOUNT(struct super_block *sb)
238{
239 return 0;
240}
241
242static inline int DQUOT_TRANSFER(struct inode *inode, struct iattr *iattr)
243{
244 return 0;
245}
246
199static inline int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) 247static inline int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
200{ 248{
201 inode_add_bytes(inode, nr); 249 inode_add_bytes(inode, nr);
@@ -235,11 +283,38 @@ static inline void DQUOT_FREE_SPACE(struct inode *inode, qsize_t nr)
235 283
236#endif /* CONFIG_QUOTA */ 284#endif /* CONFIG_QUOTA */
237 285
238#define DQUOT_PREALLOC_BLOCK_NODIRTY(inode, nr) DQUOT_PREALLOC_SPACE_NODIRTY(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits) 286static inline int DQUOT_PREALLOC_BLOCK_NODIRTY(struct inode *inode, qsize_t nr)
239#define DQUOT_PREALLOC_BLOCK(inode, nr) DQUOT_PREALLOC_SPACE(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits) 287{
240#define DQUOT_ALLOC_BLOCK_NODIRTY(inode, nr) DQUOT_ALLOC_SPACE_NODIRTY(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits) 288 return DQUOT_PREALLOC_SPACE_NODIRTY(inode,
241#define DQUOT_ALLOC_BLOCK(inode, nr) DQUOT_ALLOC_SPACE(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits) 289 nr << inode->i_sb->s_blocksize_bits);
242#define DQUOT_FREE_BLOCK_NODIRTY(inode, nr) DQUOT_FREE_SPACE_NODIRTY(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits) 290}
243#define DQUOT_FREE_BLOCK(inode, nr) DQUOT_FREE_SPACE(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits) 291
292static inline int DQUOT_PREALLOC_BLOCK(struct inode *inode, qsize_t nr)
293{
294 return DQUOT_PREALLOC_SPACE(inode,
295 nr << inode->i_sb->s_blocksize_bits);
296}
297
298static inline int DQUOT_ALLOC_BLOCK_NODIRTY(struct inode *inode, qsize_t nr)
299{
300 return DQUOT_ALLOC_SPACE_NODIRTY(inode,
301 nr << inode->i_sb->s_blocksize_bits);
302}
303
304static inline int DQUOT_ALLOC_BLOCK(struct inode *inode, qsize_t nr)
305{
306 return DQUOT_ALLOC_SPACE(inode,
307 nr << inode->i_sb->s_blocksize_bits);
308}
309
310static inline void DQUOT_FREE_BLOCK_NODIRTY(struct inode *inode, qsize_t nr)
311{
312 DQUOT_FREE_SPACE_NODIRTY(inode, nr << inode->i_sb->s_blocksize_bits);
313}
314
315static inline void DQUOT_FREE_BLOCK(struct inode *inode, qsize_t nr)
316{
317 DQUOT_FREE_SPACE(inode, nr << inode->i_sb->s_blocksize_bits);
318}
244 319
245#endif /* _LINUX_QUOTAOPS_ */ 320#endif /* _LINUX_QUOTAOPS_ */
diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h
index 93678f57ccbe..f0827d31ae6f 100644
--- a/include/linux/raid/raid5.h
+++ b/include/linux/raid/raid5.h
@@ -252,6 +252,8 @@ struct r6_state {
252#define STRIPE_EXPANDING 9 252#define STRIPE_EXPANDING 9
253#define STRIPE_EXPAND_SOURCE 10 253#define STRIPE_EXPAND_SOURCE 10
254#define STRIPE_EXPAND_READY 11 254#define STRIPE_EXPAND_READY 11
255#define STRIPE_IO_STARTED 12 /* do not count towards 'bypass_count' */
256#define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */
255/* 257/*
256 * Operations flags (in issue order) 258 * Operations flags (in issue order)
257 */ 259 */
@@ -316,12 +318,17 @@ struct raid5_private_data {
316 int previous_raid_disks; 318 int previous_raid_disks;
317 319
318 struct list_head handle_list; /* stripes needing handling */ 320 struct list_head handle_list; /* stripes needing handling */
321 struct list_head hold_list; /* preread ready stripes */
319 struct list_head delayed_list; /* stripes that have plugged requests */ 322 struct list_head delayed_list; /* stripes that have plugged requests */
320 struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */ 323 struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */
321 struct bio *retry_read_aligned; /* currently retrying aligned bios */ 324 struct bio *retry_read_aligned; /* currently retrying aligned bios */
322 struct bio *retry_read_aligned_list; /* aligned bios retry list */ 325 struct bio *retry_read_aligned_list; /* aligned bios retry list */
323 atomic_t preread_active_stripes; /* stripes with scheduled io */ 326 atomic_t preread_active_stripes; /* stripes with scheduled io */
324 atomic_t active_aligned_reads; 327 atomic_t active_aligned_reads;
328 atomic_t pending_full_writes; /* full write backlog */
329 int bypass_count; /* bypassed prereads */
330 int bypass_threshold; /* preread nice */
331 struct list_head *last_hold; /* detect hold_list promotions */
325 332
326 atomic_t reshape_stripes; /* stripes with pending writes for reshape */ 333 atomic_t reshape_stripes; /* stripes with pending writes for reshape */
327 /* unfortunately we need two cache names as we temporarily have 334 /* unfortunately we need two cache names as we temporarily have
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index 8e7eff2cd0ab..4aacaeecb56f 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -2176,6 +2176,7 @@ int reiserfs_ioctl(struct inode *inode, struct file *filp,
2176 unsigned int cmd, unsigned long arg); 2176 unsigned int cmd, unsigned long arg);
2177long reiserfs_compat_ioctl(struct file *filp, 2177long reiserfs_compat_ioctl(struct file *filp,
2178 unsigned int cmd, unsigned long arg); 2178 unsigned int cmd, unsigned long arg);
2179int reiserfs_unpack(struct inode *inode, struct file *filp);
2179 2180
2180/* ioctl's command */ 2181/* ioctl's command */
2181#define REISERFS_IOC_UNPACK _IOW(0xCD,1,long) 2182#define REISERFS_IOC_UNPACK _IOW(0xCD,1,long)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d0bd97044abd..024d72b47a0c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -68,7 +68,6 @@ struct sched_param {
68#include <linux/smp.h> 68#include <linux/smp.h>
69#include <linux/sem.h> 69#include <linux/sem.h>
70#include <linux/signal.h> 70#include <linux/signal.h>
71#include <linux/securebits.h>
72#include <linux/fs_struct.h> 71#include <linux/fs_struct.h>
73#include <linux/compiler.h> 72#include <linux/compiler.h>
74#include <linux/completion.h> 73#include <linux/completion.h>
@@ -1133,7 +1132,7 @@ struct task_struct {
1133 gid_t gid,egid,sgid,fsgid; 1132 gid_t gid,egid,sgid,fsgid;
1134 struct group_info *group_info; 1133 struct group_info *group_info;
1135 kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset; 1134 kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset;
1136 unsigned keep_capabilities:1; 1135 unsigned securebits;
1137 struct user_struct *user; 1136 struct user_struct *user;
1138#ifdef CONFIG_KEYS 1137#ifdef CONFIG_KEYS
1139 struct key *request_key_auth; /* assumed request_key authority */ 1138 struct key *request_key_auth; /* assumed request_key authority */
@@ -1798,6 +1797,8 @@ extern void mmput(struct mm_struct *);
1798extern struct mm_struct *get_task_mm(struct task_struct *task); 1797extern struct mm_struct *get_task_mm(struct task_struct *task);
1799/* Remove the current tasks stale references to the old mm_struct */ 1798/* Remove the current tasks stale references to the old mm_struct */
1800extern void mm_release(struct task_struct *, struct mm_struct *); 1799extern void mm_release(struct task_struct *, struct mm_struct *);
1800/* Allocate a new mm structure and copy contents from tsk->mm */
1801extern struct mm_struct *dup_mm(struct task_struct *tsk);
1801 1802
1802extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *); 1803extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);
1803extern void flush_thread(void); 1804extern void flush_thread(void);
diff --git a/include/linux/securebits.h b/include/linux/securebits.h
index 5b0617840fa4..c1f19dbceb05 100644
--- a/include/linux/securebits.h
+++ b/include/linux/securebits.h
@@ -3,28 +3,39 @@
3 3
4#define SECUREBITS_DEFAULT 0x00000000 4#define SECUREBITS_DEFAULT 0x00000000
5 5
6extern unsigned securebits;
7
8/* When set UID 0 has no special privileges. When unset, we support 6/* When set UID 0 has no special privileges. When unset, we support
9 inheritance of root-permissions and suid-root executable under 7 inheritance of root-permissions and suid-root executable under
10 compatibility mode. We raise the effective and inheritable bitmasks 8 compatibility mode. We raise the effective and inheritable bitmasks
11 *of the executable file* if the effective uid of the new process is 9 *of the executable file* if the effective uid of the new process is
12 0. If the real uid is 0, we raise the inheritable bitmask of the 10 0. If the real uid is 0, we raise the inheritable bitmask of the
13 executable file. */ 11 executable file. */
14#define SECURE_NOROOT 0 12#define SECURE_NOROOT 0
13#define SECURE_NOROOT_LOCKED 1 /* make bit-0 immutable */
15 14
16/* When set, setuid to/from uid 0 does not trigger capability-"fixes" 15/* When set, setuid to/from uid 0 does not trigger capability-"fixes"
17 to be compatible with old programs relying on set*uid to loose 16 to be compatible with old programs relying on set*uid to loose
18 privileges. When unset, setuid doesn't change privileges. */ 17 privileges. When unset, setuid doesn't change privileges. */
19#define SECURE_NO_SETUID_FIXUP 2 18#define SECURE_NO_SETUID_FIXUP 2
19#define SECURE_NO_SETUID_FIXUP_LOCKED 3 /* make bit-2 immutable */
20
21/* When set, a process can retain its capabilities even after
22 transitioning to a non-root user (the set-uid fixup suppressed by
23 bit 2). Bit-4 is cleared when a process calls exec(); setting both
24 bit 4 and 5 will create a barrier through exec that no exec()'d
25 child can use this feature again. */
26#define SECURE_KEEP_CAPS 4
27#define SECURE_KEEP_CAPS_LOCKED 5 /* make bit-4 immutable */
20 28
21/* Each securesetting is implemented using two bits. One bit specify 29/* Each securesetting is implemented using two bits. One bit specify
22 whether the setting is on or off. The other bit specify whether the 30 whether the setting is on or off. The other bit specify whether the
23 setting is fixed or not. A setting which is fixed cannot be changed 31 setting is fixed or not. A setting which is fixed cannot be changed
24 from user-level. */ 32 from user-level. */
33#define issecure_mask(X) (1 << (X))
34#define issecure(X) (issecure_mask(X) & current->securebits)
25 35
26#define issecure(X) ( (1 << (X+1)) & SECUREBITS_DEFAULT ? \ 36#define SECURE_ALL_BITS (issecure_mask(SECURE_NOROOT) | \
27 (1 << (X)) & SECUREBITS_DEFAULT : \ 37 issecure_mask(SECURE_NO_SETUID_FIXUP) | \
28 (1 << (X)) & securebits ) 38 issecure_mask(SECURE_KEEP_CAPS))
39#define SECURE_ALL_LOCKS (SECURE_ALL_BITS << 1)
29 40
30#endif /* !_LINUX_SECUREBITS_H */ 41#endif /* !_LINUX_SECUREBITS_H */
diff --git a/include/linux/security.h b/include/linux/security.h
index 53a34539382a..e6299e50e210 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -34,8 +34,6 @@
34#include <linux/xfrm.h> 34#include <linux/xfrm.h>
35#include <net/flow.h> 35#include <net/flow.h>
36 36
37extern unsigned securebits;
38
39/* Maximum number of letters for an LSM name string */ 37/* Maximum number of letters for an LSM name string */
40#define SECURITY_NAME_MAX 10 38#define SECURITY_NAME_MAX 10
41 39
@@ -61,6 +59,8 @@ extern int cap_inode_need_killpriv(struct dentry *dentry);
61extern int cap_inode_killpriv(struct dentry *dentry); 59extern int cap_inode_killpriv(struct dentry *dentry);
62extern int cap_task_post_setuid (uid_t old_ruid, uid_t old_euid, uid_t old_suid, int flags); 60extern int cap_task_post_setuid (uid_t old_ruid, uid_t old_euid, uid_t old_suid, int flags);
63extern void cap_task_reparent_to_init (struct task_struct *p); 61extern void cap_task_reparent_to_init (struct task_struct *p);
62extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
63 unsigned long arg4, unsigned long arg5, long *rc_p);
64extern int cap_task_setscheduler (struct task_struct *p, int policy, struct sched_param *lp); 64extern int cap_task_setscheduler (struct task_struct *p, int policy, struct sched_param *lp);
65extern int cap_task_setioprio (struct task_struct *p, int ioprio); 65extern int cap_task_setioprio (struct task_struct *p, int ioprio);
66extern int cap_task_setnice (struct task_struct *p, int nice); 66extern int cap_task_setnice (struct task_struct *p, int nice);
@@ -720,7 +720,9 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
720 * @arg3 contains a argument. 720 * @arg3 contains a argument.
721 * @arg4 contains a argument. 721 * @arg4 contains a argument.
722 * @arg5 contains a argument. 722 * @arg5 contains a argument.
723 * Return 0 if permission is granted. 723 * @rc_p contains a pointer to communicate back the forced return code
724 * Return 0 if permission is granted, and non-zero if the security module
725 * has taken responsibility (setting *rc_p) for the prctl call.
724 * @task_reparent_to_init: 726 * @task_reparent_to_init:
725 * Set the security attributes in @p->security for a kernel thread that 727 * Set the security attributes in @p->security for a kernel thread that
726 * is being reparented to the init task. 728 * is being reparented to the init task.
@@ -1420,7 +1422,7 @@ struct security_operations {
1420 int (*task_wait) (struct task_struct * p); 1422 int (*task_wait) (struct task_struct * p);
1421 int (*task_prctl) (int option, unsigned long arg2, 1423 int (*task_prctl) (int option, unsigned long arg2,
1422 unsigned long arg3, unsigned long arg4, 1424 unsigned long arg3, unsigned long arg4,
1423 unsigned long arg5); 1425 unsigned long arg5, long *rc_p);
1424 void (*task_reparent_to_init) (struct task_struct * p); 1426 void (*task_reparent_to_init) (struct task_struct * p);
1425 void (*task_to_inode)(struct task_struct *p, struct inode *inode); 1427 void (*task_to_inode)(struct task_struct *p, struct inode *inode);
1426 1428
@@ -1684,7 +1686,7 @@ int security_task_kill(struct task_struct *p, struct siginfo *info,
1684 int sig, u32 secid); 1686 int sig, u32 secid);
1685int security_task_wait(struct task_struct *p); 1687int security_task_wait(struct task_struct *p);
1686int security_task_prctl(int option, unsigned long arg2, unsigned long arg3, 1688int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
1687 unsigned long arg4, unsigned long arg5); 1689 unsigned long arg4, unsigned long arg5, long *rc_p);
1688void security_task_reparent_to_init(struct task_struct *p); 1690void security_task_reparent_to_init(struct task_struct *p);
1689void security_task_to_inode(struct task_struct *p, struct inode *inode); 1691void security_task_to_inode(struct task_struct *p, struct inode *inode);
1690int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag); 1692int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag);
@@ -2271,9 +2273,9 @@ static inline int security_task_wait (struct task_struct *p)
2271static inline int security_task_prctl (int option, unsigned long arg2, 2273static inline int security_task_prctl (int option, unsigned long arg2,
2272 unsigned long arg3, 2274 unsigned long arg3,
2273 unsigned long arg4, 2275 unsigned long arg4,
2274 unsigned long arg5) 2276 unsigned long arg5, long *rc_p)
2275{ 2277{
2276 return 0; 2278 return cap_task_prctl(option, arg2, arg3, arg3, arg5, rc_p);
2277} 2279}
2278 2280
2279static inline void security_task_reparent_to_init (struct task_struct *p) 2281static inline void security_task_reparent_to_init (struct task_struct *p)
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 00b65c0a82ca..3d37c94abbc8 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -46,6 +46,7 @@ enum {
46 PLAT8250_DEV_HUB6, 46 PLAT8250_DEV_HUB6,
47 PLAT8250_DEV_MCA, 47 PLAT8250_DEV_MCA,
48 PLAT8250_DEV_AU1X00, 48 PLAT8250_DEV_AU1X00,
49 PLAT8250_DEV_SM501,
49}; 50};
50 51
51/* 52/*
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 8d5fb36ea047..f2d12d5a21b8 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -34,8 +34,7 @@ struct shmem_sb_info {
34 uid_t uid; /* Mount uid for root directory */ 34 uid_t uid; /* Mount uid for root directory */
35 gid_t gid; /* Mount gid for root directory */ 35 gid_t gid; /* Mount gid for root directory */
36 mode_t mode; /* Mount mode for root directory */ 36 mode_t mode; /* Mount mode for root directory */
37 int policy; /* Default NUMA memory alloc policy */ 37 struct mempolicy *mpol; /* default memory policy for mappings */
38 nodemask_t policy_nodes; /* nodemask for preferred and bind */
39}; 38};
40 39
41static inline struct shmem_inode_info *SHMEM_I(struct inode *inode) 40static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 1d7d4c5797ee..a6977423baf7 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -12,11 +12,22 @@
12#include <asm/errno.h> 12#include <asm/errno.h>
13 13
14#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE) 14#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
15extern void pm_set_vt_switch(int);
15extern int pm_prepare_console(void); 16extern int pm_prepare_console(void);
16extern void pm_restore_console(void); 17extern void pm_restore_console(void);
17#else 18#else
18static inline int pm_prepare_console(void) { return 0; } 19static inline void pm_set_vt_switch(int do_switch)
19static inline void pm_restore_console(void) {} 20{
21}
22
23static inline int pm_prepare_console(void)
24{
25 return 0;
26}
27
28static inline void pm_restore_console(void)
29{
30}
20#endif 31#endif
21 32
22typedef int __bitwise suspend_state_t; 33typedef int __bitwise suspend_state_t;
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 878459ae0454..0b3377650c85 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -177,11 +177,11 @@ extern void activate_page(struct page *);
177extern void mark_page_accessed(struct page *); 177extern void mark_page_accessed(struct page *);
178extern void lru_add_drain(void); 178extern void lru_add_drain(void);
179extern int lru_add_drain_all(void); 179extern int lru_add_drain_all(void);
180extern int rotate_reclaimable_page(struct page *page); 180extern void rotate_reclaimable_page(struct page *page);
181extern void swap_setup(void); 181extern void swap_setup(void);
182 182
183/* linux/mm/vmscan.c */ 183/* linux/mm/vmscan.c */
184extern unsigned long try_to_free_pages(struct zone **zones, int order, 184extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
185 gfp_t gfp_mask); 185 gfp_t gfp_mask);
186extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, 186extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
187 gfp_t gfp_mask); 187 gfp_t gfp_mask);
diff --git a/include/linux/synclink.h b/include/linux/synclink.h
index 5562fbf72095..45f6bc82d317 100644
--- a/include/linux/synclink.h
+++ b/include/linux/synclink.h
@@ -13,10 +13,6 @@
13#define _SYNCLINK_H_ 13#define _SYNCLINK_H_
14#define SYNCLINK_H_VERSION 3.6 14#define SYNCLINK_H_VERSION 3.6
15 15
16#define BOOLEAN int
17#define TRUE 1
18#define FALSE 0
19
20#define BIT0 0x0001 16#define BIT0 0x0001
21#define BIT1 0x0002 17#define BIT1 0x0002
22#define BIT2 0x0004 18#define BIT2 0x0004
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 03378e3515b3..add3c5a40827 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -32,7 +32,7 @@ struct attribute {
32 32
33struct attribute_group { 33struct attribute_group {
34 const char *name; 34 const char *name;
35 int (*is_visible)(struct kobject *, 35 mode_t (*is_visible)(struct kobject *,
36 struct attribute *, int); 36 struct attribute *, int);
37 struct attribute **attrs; 37 struct attribute **attrs;
38}; 38};
@@ -105,6 +105,8 @@ void sysfs_remove_link(struct kobject *kobj, const char *name);
105 105
106int __must_check sysfs_create_group(struct kobject *kobj, 106int __must_check sysfs_create_group(struct kobject *kobj,
107 const struct attribute_group *grp); 107 const struct attribute_group *grp);
108int sysfs_update_group(struct kobject *kobj,
109 const struct attribute_group *grp);
108void sysfs_remove_group(struct kobject *kobj, 110void sysfs_remove_group(struct kobject *kobj,
109 const struct attribute_group *grp); 111 const struct attribute_group *grp);
110int sysfs_add_file_to_group(struct kobject *kobj, 112int sysfs_add_file_to_group(struct kobject *kobj,
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index ce8e7da05807..364789aae9f3 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -31,6 +31,7 @@ struct vm_struct {
31 struct page **pages; 31 struct page **pages;
32 unsigned int nr_pages; 32 unsigned int nr_pages;
33 unsigned long phys_addr; 33 unsigned long phys_addr;
34 void *caller;
34}; 35};
35 36
36/* 37/*
@@ -66,6 +67,8 @@ static inline size_t get_vm_area_size(const struct vm_struct *area)
66} 67}
67 68
68extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); 69extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
70extern struct vm_struct *get_vm_area_caller(unsigned long size,
71 unsigned long flags, void *caller);
69extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 72extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
70 unsigned long start, unsigned long end); 73 unsigned long start, unsigned long end);
71extern struct vm_struct *get_vm_area_node(unsigned long size, 74extern struct vm_struct *get_vm_area_node(unsigned long size,
@@ -87,4 +90,6 @@ extern void free_vm_area(struct vm_struct *area);
87extern rwlock_t vmlist_lock; 90extern rwlock_t vmlist_lock;
88extern struct vm_struct *vmlist; 91extern struct vm_struct *vmlist;
89 92
93extern const struct seq_operations vmalloc_op;
94
90#endif /* _LINUX_VMALLOC_H */ 95#endif /* _LINUX_VMALLOC_H */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 9f1b4b46151e..e83b69346d23 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -25,6 +25,7 @@
25#define HIGHMEM_ZONE(xx) 25#define HIGHMEM_ZONE(xx)
26#endif 26#endif
27 27
28
28#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE 29#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
29 30
30enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, 31enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
@@ -37,6 +38,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
37 FOR_ALL_ZONES(PGSCAN_DIRECT), 38 FOR_ALL_ZONES(PGSCAN_DIRECT),
38 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, 39 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
39 PAGEOUTRUN, ALLOCSTALL, PGROTATED, 40 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
41#ifdef CONFIG_HUGETLB_PAGE
42 HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
43#endif
40 NR_VM_EVENT_ITEMS 44 NR_VM_EVENT_ITEMS
41}; 45};
42 46
@@ -174,7 +178,7 @@ static inline unsigned long node_page_state(int node,
174 zone_page_state(&zones[ZONE_MOVABLE], item); 178 zone_page_state(&zones[ZONE_MOVABLE], item);
175} 179}
176 180
177extern void zone_statistics(struct zonelist *, struct zone *); 181extern void zone_statistics(struct zone *, struct zone *);
178 182
179#else 183#else
180 184
diff --git a/include/net/compat.h b/include/net/compat.h
index 406db242f73a..05fa5d0254ab 100644
--- a/include/net/compat.h
+++ b/include/net/compat.h
@@ -40,4 +40,7 @@ extern int put_cmsg_compat(struct msghdr*, int, int, int, void *);
40 40
41extern int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *, unsigned char *, int); 41extern int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *, unsigned char *, int);
42 42
43extern int compat_mc_setsockopt(struct sock *, int, int, char __user *, int,
44 int (*)(struct sock *, int, int, char __user *, int));
45
43#endif /* NET_COMPAT_H */ 46#endif /* NET_COMPAT_H */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index b8b19e2f57bb..f6a9fe0ef09c 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -181,7 +181,8 @@ struct scsi_device {
181 sdev_printk(prefix, (scmd)->device, fmt, ##a) 181 sdev_printk(prefix, (scmd)->device, fmt, ##a)
182 182
183enum scsi_target_state { 183enum scsi_target_state {
184 STARGET_RUNNING = 1, 184 STARGET_CREATED = 1,
185 STARGET_RUNNING,
185 STARGET_DEL, 186 STARGET_DEL,
186}; 187};
187 188
diff --git a/include/video/atmel_lcdc.h b/include/video/atmel_lcdc.h
index 336c20db87f8..ed64862c4e18 100644
--- a/include/video/atmel_lcdc.h
+++ b/include/video/atmel_lcdc.h
@@ -22,6 +22,15 @@
22#ifndef __ATMEL_LCDC_H__ 22#ifndef __ATMEL_LCDC_H__
23#define __ATMEL_LCDC_H__ 23#define __ATMEL_LCDC_H__
24 24
25
26/* Way LCD wires are connected to the chip:
27 * Some Atmel chips use BGR color mode (instead of standard RGB)
28 * A swapped wiring onboard can bring to RGB mode.
29 */
30#define ATMEL_LCDC_WIRING_BGR 0
31#define ATMEL_LCDC_WIRING_RGB 1
32
33
25 /* LCD Controller info data structure, stored in device platform_data */ 34 /* LCD Controller info data structure, stored in device platform_data */
26struct atmel_lcdfb_info { 35struct atmel_lcdfb_info {
27 spinlock_t lock; 36 spinlock_t lock;
@@ -39,8 +48,10 @@ struct atmel_lcdfb_info {
39 u8 bl_power; 48 u8 bl_power;
40#endif 49#endif
41 bool lcdcon_is_backlight; 50 bool lcdcon_is_backlight;
51 u8 saved_lcdcon;
42 52
43 u8 default_bpp; 53 u8 default_bpp;
54 u8 lcd_wiring_mode;
44 unsigned int default_lcdcon2; 55 unsigned int default_lcdcon2;
45 unsigned int default_dmacon; 56 unsigned int default_dmacon;
46 void (*atmel_lcdfb_power_control)(int on); 57 void (*atmel_lcdfb_power_control)(int on);
diff --git a/include/video/hecubafb.h b/include/video/hecubafb.h
new file mode 100644
index 000000000000..7b9952339762
--- /dev/null
+++ b/include/video/hecubafb.h
@@ -0,0 +1,51 @@
1/*
2 * hecubafb.h - definitions for the hecuba framebuffer driver
3 *
4 * Copyright (C) 2008 by Jaya Kumar
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive for
8 * more details.
9 *
10 */
11
12#ifndef _LINUX_HECUBAFB_H_
13#define _LINUX_HECUBAFB_H_
14
15/* Apollo controller specific defines */
16#define APOLLO_START_NEW_IMG 0xA0
17#define APOLLO_STOP_IMG_DATA 0xA1
18#define APOLLO_DISPLAY_IMG 0xA2
19#define APOLLO_ERASE_DISPLAY 0xA3
20#define APOLLO_INIT_DISPLAY 0xA4
21
22/* Hecuba interface specific defines */
23#define HCB_WUP_BIT 0x01
24#define HCB_DS_BIT 0x02
25#define HCB_RW_BIT 0x04
26#define HCB_CD_BIT 0x08
27#define HCB_ACK_BIT 0x80
28
29/* struct used by hecuba. board specific stuff comes from *board */
30struct hecubafb_par {
31 struct fb_info *info;
32 struct hecuba_board *board;
33 void (*send_command)(struct hecubafb_par *, unsigned char);
34 void (*send_data)(struct hecubafb_par *, unsigned char);
35};
36
37/* board specific routines
38board drivers can implement wait_for_ack with interrupts if desired. if
39wait_for_ack is called with clear=0, then go to sleep and return when ack
40goes hi or if wait_for_ack with clear=1, then return when ack goes lo */
41struct hecuba_board {
42 struct module *owner;
43 void (*remove)(struct hecubafb_par *);
44 void (*set_ctl)(struct hecubafb_par *, unsigned char, unsigned char);
45 void (*set_data)(struct hecubafb_par *, unsigned char);
46 void (*wait_for_ack)(struct hecubafb_par *, int);
47 int (*init)(struct hecubafb_par *);
48};
49
50
51#endif
diff --git a/include/video/metronomefb.h b/include/video/metronomefb.h
new file mode 100644
index 000000000000..dab04b4fad7f
--- /dev/null
+++ b/include/video/metronomefb.h
@@ -0,0 +1,62 @@
1/*
2 * metronomefb.h - definitions for the metronome framebuffer driver
3 *
4 * Copyright (C) 2008 by Jaya Kumar
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive for
8 * more details.
9 *
10 */
11
12#ifndef _LINUX_METRONOMEFB_H_
13#define _LINUX_METRONOMEFB_H_
14
15/* address and control descriptors used by metronome controller */
16struct metromem_desc {
17 u32 mFDADR0;
18 u32 mFSADR0;
19 u32 mFIDR0;
20 u32 mLDCMD0;
21};
22
23/* command structure used by metronome controller */
24struct metromem_cmd {
25 u16 opcode;
26 u16 args[((64-2)/2)];
27 u16 csum;
28};
29
30/* struct used by metronome. board specific stuff comes from *board */
31struct metronomefb_par {
32 unsigned char *metromem;
33 struct metromem_desc *metromem_desc;
34 struct metromem_cmd *metromem_cmd;
35 unsigned char *metromem_wfm;
36 unsigned char *metromem_img;
37 u16 *metromem_img_csum;
38 u16 *csum_table;
39 int metromemsize;
40 dma_addr_t metromem_dma;
41 dma_addr_t metromem_desc_dma;
42 struct fb_info *info;
43 struct metronome_board *board;
44 wait_queue_head_t waitq;
45 u8 frame_count;
46};
47
48/* board specific routines */
49struct metronome_board {
50 struct module *owner;
51 void (*free_irq)(struct fb_info *);
52 void (*init_gpio_regs)(struct metronomefb_par *);
53 void (*init_lcdc_regs)(struct metronomefb_par *);
54 void (*post_dma_setup)(struct metronomefb_par *);
55 void (*set_rst)(struct metronomefb_par *, int);
56 void (*set_stdby)(struct metronomefb_par *, int);
57 int (*met_wait_event)(struct metronomefb_par *);
58 int (*met_wait_event_intr)(struct metronomefb_par *);
59 int (*setup_irq)(struct fb_info *);
60};
61
62#endif