aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/platform/aclinux.h2
-rw-r--r--include/asm-alpha/dma-mapping.h4
-rw-r--r--include/asm-alpha/unistd.h182
-rw-r--r--include/asm-arm/dma-mapping.h2
-rw-r--r--include/asm-arm/setup.h104
-rw-r--r--include/asm-arm/unistd.h150
-rw-r--r--include/asm-arm26/pgalloc.h2
-rw-r--r--include/asm-arm26/setup.h4
-rw-r--r--include/asm-arm26/unistd.h133
-rw-r--r--include/asm-avr32/dma-mapping.h5
-rw-r--r--include/asm-avr32/setup.h4
-rw-r--r--include/asm-cris/arch-v10/bitops.h10
-rw-r--r--include/asm-cris/dma-mapping.h4
-rw-r--r--include/asm-cris/semaphore-helper.h8
-rw-r--r--include/asm-frv/dma-mapping.h4
-rw-r--r--include/asm-frv/highmem.h5
-rw-r--r--include/asm-frv/param.h1
-rw-r--r--include/asm-frv/setup.h6
-rw-r--r--include/asm-frv/unistd.h119
-rw-r--r--include/asm-generic/Kbuild1
-rw-r--r--include/asm-generic/Kbuild.asm1
-rw-r--r--include/asm-generic/atomic.h7
-rw-r--r--include/asm-generic/dma-mapping.h4
-rw-r--r--include/asm-generic/futex.h4
-rw-r--r--include/asm-generic/vmlinux.lds.h23
-rw-r--r--include/asm-h8300/delay.h4
-rw-r--r--include/asm-h8300/mmu_context.h4
-rw-r--r--include/asm-h8300/pci.h4
-rw-r--r--include/asm-h8300/tlbflush.h4
-rw-r--r--include/asm-h8300/unistd.h166
-rw-r--r--include/asm-i386/Kbuild1
-rw-r--r--include/asm-i386/alternative.h13
-rw-r--r--include/asm-i386/apic.h15
-rw-r--r--include/asm-i386/atomic.h6
-rw-r--r--include/asm-i386/boot.h6
-rw-r--r--include/asm-i386/bugs.h4
-rw-r--r--include/asm-i386/cpu.h3
-rw-r--r--include/asm-i386/cpufeature.h8
-rw-r--r--include/asm-i386/current.h7
-rw-r--r--include/asm-i386/delay.h13
-rw-r--r--include/asm-i386/desc.h95
-rw-r--r--include/asm-i386/dma-mapping.h4
-rw-r--r--include/asm-i386/e820.h5
-rw-r--r--include/asm-i386/elf.h2
-rw-r--r--include/asm-i386/futex.h4
-rw-r--r--include/asm-i386/genapic.h2
-rw-r--r--include/asm-i386/i387.h5
-rw-r--r--include/asm-i386/io.h8
-rw-r--r--include/asm-i386/irq.h5
-rw-r--r--include/asm-i386/irq_regs.h28
-rw-r--r--include/asm-i386/irqflags.h42
-rw-r--r--include/asm-i386/mach-default/setup_arch.h2
-rw-r--r--include/asm-i386/math_emu.h1
-rw-r--r--include/asm-i386/mmu_context.h8
-rw-r--r--include/asm-i386/mmzone.h27
-rw-r--r--include/asm-i386/module.h10
-rw-r--r--include/asm-i386/mpspec_def.h2
-rw-r--r--include/asm-i386/msr.h18
-rw-r--r--include/asm-i386/nmi.h8
-rw-r--r--include/asm-i386/page.h8
-rw-r--r--include/asm-i386/param.h1
-rw-r--r--include/asm-i386/paravirt.h505
-rw-r--r--include/asm-i386/pda.h100
-rw-r--r--include/asm-i386/percpu.h25
-rw-r--r--include/asm-i386/pgtable-2level.h10
-rw-r--r--include/asm-i386/pgtable-3level.h45
-rw-r--r--include/asm-i386/pgtable.h28
-rw-r--r--include/asm-i386/processor.h204
-rw-r--r--include/asm-i386/ptrace.h2
-rw-r--r--include/asm-i386/rwsem.h4
-rw-r--r--include/asm-i386/segment.h7
-rw-r--r--include/asm-i386/setup.h7
-rw-r--r--include/asm-i386/smp.h3
-rw-r--r--include/asm-i386/spinlock.h19
-rw-r--r--include/asm-i386/suspend.h21
-rw-r--r--include/asm-i386/system.h16
-rw-r--r--include/asm-i386/thread_info.h10
-rw-r--r--include/asm-i386/time.h41
-rw-r--r--include/asm-i386/tlbflush.h18
-rw-r--r--include/asm-i386/unistd.h98
-rw-r--r--include/asm-i386/unwind.h13
-rw-r--r--include/asm-i386/vm86.h17
-rw-r--r--include/asm-ia64/Kbuild1
-rw-r--r--include/asm-ia64/dma-mapping.h5
-rw-r--r--include/asm-ia64/futex.h4
-rw-r--r--include/asm-ia64/pgalloc.h2
-rw-r--r--include/asm-m32r/setup.h9
-rw-r--r--include/asm-m32r/unistd.h111
-rw-r--r--include/asm-m68k/dma-mapping.h4
-rw-r--r--include/asm-m68k/setup.h6
-rw-r--r--include/asm-m68k/unistd.h97
-rw-r--r--include/asm-m68knommu/setup.h5
-rw-r--r--include/asm-m68knommu/unistd.h150
-rw-r--r--include/asm-mips/dma-mapping.h4
-rw-r--r--include/asm-mips/futex.h4
-rw-r--r--include/asm-mips/highmem.h10
-rw-r--r--include/asm-mips/setup.h2
-rw-r--r--include/asm-mips/unistd.h262
-rw-r--r--include/asm-parisc/dma-mapping.h4
-rw-r--r--include/asm-parisc/futex.h4
-rw-r--r--include/asm-powerpc/dma-mapping.h6
-rw-r--r--include/asm-powerpc/elf.h2
-rw-r--r--include/asm-powerpc/futex.h4
-rw-r--r--include/asm-powerpc/pgalloc.h2
-rw-r--r--include/asm-powerpc/setup.h3
-rw-r--r--include/asm-powerpc/unistd.h109
-rw-r--r--include/asm-ppc/highmem.h8
-rw-r--r--include/asm-s390/setup.h3
-rw-r--r--include/asm-s390/unistd.h154
-rw-r--r--include/asm-sh/dma-mapping.h2
-rw-r--r--include/asm-sh/setup.h6
-rw-r--r--include/asm-sh/unistd.h137
-rw-r--r--include/asm-sh64/dma-mapping.h2
-rw-r--r--include/asm-sh64/setup.h6
-rw-r--r--include/asm-sh64/unistd.h142
-rw-r--r--include/asm-sparc/unistd.h130
-rw-r--r--include/asm-sparc64/dma-mapping.h4
-rw-r--r--include/asm-sparc64/futex.h4
-rw-r--r--include/asm-sparc64/pgalloc.h2
-rw-r--r--include/asm-sparc64/unistd.h118
-rw-r--r--include/asm-um/dma-mapping.h4
-rw-r--r--include/asm-v850/irq.h2
-rw-r--r--include/asm-v850/unistd.h160
-rw-r--r--include/asm-x86_64/Kbuild2
-rw-r--r--include/asm-x86_64/alternative.h12
-rw-r--r--include/asm-x86_64/atomic.h6
-rw-r--r--include/asm-x86_64/calgary.h2
-rw-r--r--include/asm-x86_64/cpufeature.h7
-rw-r--r--include/asm-x86_64/delay.h7
-rw-r--r--include/asm-x86_64/desc.h53
-rw-r--r--include/asm-x86_64/desc_defs.h69
-rw-r--r--include/asm-x86_64/dma-mapping.h5
-rw-r--r--include/asm-x86_64/futex.h4
-rw-r--r--include/asm-x86_64/genapic.h2
-rw-r--r--include/asm-x86_64/msr.h17
-rw-r--r--include/asm-x86_64/nmi.h3
-rw-r--r--include/asm-x86_64/pci-direct.h1
-rw-r--r--include/asm-x86_64/pgtable.h22
-rw-r--r--include/asm-x86_64/processor.h8
-rw-r--r--include/asm-x86_64/proto.h2
-rw-r--r--include/asm-x86_64/rio.h74
-rw-r--r--include/asm-x86_64/smp.h12
-rw-r--r--include/asm-x86_64/spinlock.h29
-rw-r--r--include/asm-x86_64/stacktrace.h2
-rw-r--r--include/asm-x86_64/unistd.h99
-rw-r--r--include/asm-x86_64/unwind.h8
-rw-r--r--include/asm-x86_64/vsyscall.h1
-rw-r--r--include/asm-xtensa/dma-mapping.h4
-rw-r--r--include/asm-xtensa/unistd.h184
-rw-r--r--include/crypto/b128ops.h80
-rw-r--r--include/crypto/gf128mul.h198
-rw-r--r--include/linux/Kbuild3
-rw-r--r--include/linux/aio.h2
-rw-r--r--include/linux/audit.h6
-rw-r--r--include/linux/bootmem.h3
-rw-r--r--include/linux/bottom_half.h10
-rw-r--r--include/linux/carta_random32.h29
-rw-r--r--include/linux/cciss_ioctl.h2
-rw-r--r--include/linux/cdev.h4
-rw-r--r--include/linux/cpu.h31
-rw-r--r--include/linux/cpuset.h4
-rw-r--r--include/linux/crypto.h22
-rw-r--r--include/linux/debug_locks.h2
-rw-r--r--include/linux/delayacct.h2
-rw-r--r--include/linux/device.h22
-rw-r--r--include/linux/efi.h3
-rw-r--r--include/linux/elf.h4
-rw-r--r--include/linux/ext3_jbd.h76
-rw-r--r--include/linux/ext4_jbd2.h76
-rw-r--r--include/linux/file.h4
-rw-r--r--include/linux/freezer.h87
-rw-r--r--include/linux/fs.h20
-rw-r--r--include/linux/fs_struct.h2
-rw-r--r--include/linux/fuse.h24
-rw-r--r--include/linux/genetlink.h6
-rw-r--r--include/linux/gfp.h3
-rw-r--r--include/linux/gfs2_ondisk.h138
-rw-r--r--include/linux/highmem.h8
-rw-r--r--include/linux/hugetlb.h1
-rw-r--r--include/linux/i2o.h20
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/interrupt.h7
-rw-r--r--include/linux/ipmi.h45
-rw-r--r--include/linux/ipmi_msgdefs.h13
-rw-r--r--include/linux/ipmi_smi.h8
-rw-r--r--include/linux/jbd.h3
-rw-r--r--include/linux/jbd2.h3
-rw-r--r--include/linux/kexec.h1
-rw-r--r--include/linux/kprobes.h2
-rw-r--r--include/linux/ktime.h4
-rw-r--r--include/linux/lockd/lockd.h2
-rw-r--r--include/linux/lockdep.h11
-rw-r--r--include/linux/mm.h81
-rw-r--r--include/linux/mmzone.h87
-rw-r--r--include/linux/moduleparam.h3
-rw-r--r--include/linux/msg.h6
-rw-r--r--include/linux/mutex.h2
-rw-r--r--include/linux/nbd.h1
-rw-r--r--include/linux/netfilter/nf_conntrack_pptp.h3
-rw-r--r--include/linux/nmi.h5
-rw-r--r--include/linux/pci_ids.h9
-rw-r--r--include/linux/pfkeyv2.h1
-rw-r--r--include/linux/profile.h24
-rw-r--r--include/linux/quotaops.h3
-rw-r--r--include/linux/radix-tree.h101
-rw-r--r--include/linux/raid/raid5.h2
-rw-r--r--include/linux/reiserfs_fs.h2
-rw-r--r--include/linux/relay.h2
-rw-r--r--include/linux/rmap.h4
-rw-r--r--include/linux/rtmutex.h2
-rw-r--r--include/linux/rwsem-spinlock.h3
-rw-r--r--include/linux/sched.h111
-rw-r--r--include/linux/screen_info.h3
-rw-r--r--include/linux/seq_file.h4
-rw-r--r--include/linux/serial_8250.h1
-rw-r--r--include/linux/serial_core.h2
-rw-r--r--include/linux/signal.h2
-rw-r--r--include/linux/skbuff.h8
-rw-r--r--include/linux/slab.h92
-rw-r--r--include/linux/smp.h7
-rw-r--r--include/linux/spinlock.h1
-rw-r--r--include/linux/start_kernel.h12
-rw-r--r--include/linux/sunrpc/sched.h4
-rw-r--r--include/linux/suspend.h9
-rw-r--r--include/linux/swap.h6
-rw-r--r--include/linux/taskstats_kern.h43
-rw-r--r--include/linux/uaccess.h49
-rw-r--r--include/linux/workqueue.h9
-rw-r--r--include/net/dst.h2
-rw-r--r--include/net/inet_hashtables.h6
-rw-r--r--include/net/irda/irlan_filter.h2
-rw-r--r--include/net/neighbour.h2
-rw-r--r--include/net/netfilter/nf_conntrack_expect.h2
-rw-r--r--include/net/request_sock.h4
-rw-r--r--include/net/sock.h21
-rw-r--r--include/net/timewait_sock.h2
-rw-r--r--include/net/xfrm.h24
-rw-r--r--include/scsi/libsas.h4
238 files changed, 2854 insertions, 3766 deletions
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 47faf27913a..7f1e92930b6 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -64,7 +64,7 @@
64/* Host-dependent types and defines */ 64/* Host-dependent types and defines */
65 65
66#define ACPI_MACHINE_WIDTH BITS_PER_LONG 66#define ACPI_MACHINE_WIDTH BITS_PER_LONG
67#define acpi_cache_t kmem_cache_t 67#define acpi_cache_t struct kmem_cache
68#define acpi_spinlock spinlock_t * 68#define acpi_spinlock spinlock_t *
69#define ACPI_EXPORT_SYMBOL(symbol) EXPORT_SYMBOL(symbol); 69#define ACPI_EXPORT_SYMBOL(symbol) EXPORT_SYMBOL(symbol);
70#define strtoul simple_strtoul 70#define strtoul simple_strtoul
diff --git a/include/asm-alpha/dma-mapping.h b/include/asm-alpha/dma-mapping.h
index b9ff4d8cb33..57e09f5e342 100644
--- a/include/asm-alpha/dma-mapping.h
+++ b/include/asm-alpha/dma-mapping.h
@@ -51,7 +51,7 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
51 51
52#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 52#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
53#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 53#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
54#define dma_is_consistent(dev) (1) 54#define dma_is_consistent(d, h) (1)
55 55
56int dma_set_mask(struct device *dev, u64 mask); 56int dma_set_mask(struct device *dev, u64 mask);
57 57
@@ -60,7 +60,7 @@ int dma_set_mask(struct device *dev, u64 mask);
60#define dma_sync_single_range(dev, addr, off, size, dir) do { } while (0) 60#define dma_sync_single_range(dev, addr, off, size, dir) do { } while (0)
61#define dma_sync_sg_for_cpu(dev, sg, nents, dir) do { } while (0) 61#define dma_sync_sg_for_cpu(dev, sg, nents, dir) do { } while (0)
62#define dma_sync_sg_for_device(dev, sg, nents, dir) do { } while (0) 62#define dma_sync_sg_for_device(dev, sg, nents, dir) do { } while (0)
63#define dma_cache_sync(va, size, dir) do { } while (0) 63#define dma_cache_sync(dev, va, size, dir) do { } while (0)
64 64
65#define dma_get_cache_alignment() L1_CACHE_BYTES 65#define dma_get_cache_alignment() L1_CACHE_BYTES
66 66
diff --git a/include/asm-alpha/unistd.h b/include/asm-alpha/unistd.h
index 2cabbd465c0..84313d14e78 100644
--- a/include/asm-alpha/unistd.h
+++ b/include/asm-alpha/unistd.h
@@ -387,188 +387,6 @@
387 387
388#define NR_SYSCALLS 447 388#define NR_SYSCALLS 447
389 389
390#if defined(__GNUC__)
391
392#define _syscall_return(type) \
393 return (_sc_err ? errno = _sc_ret, _sc_ret = -1L : 0), (type) _sc_ret
394
395#define _syscall_clobbers \
396 "$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8", \
397 "$22", "$23", "$24", "$25", "$27", "$28" \
398
399#define _syscall0(type, name) \
400type name(void) \
401{ \
402 long _sc_ret, _sc_err; \
403 { \
404 register long _sc_0 __asm__("$0"); \
405 register long _sc_19 __asm__("$19"); \
406 \
407 _sc_0 = __NR_##name; \
408 __asm__("callsys # %0 %1 %2" \
409 : "=r"(_sc_0), "=r"(_sc_19) \
410 : "0"(_sc_0) \
411 : _syscall_clobbers); \
412 _sc_ret = _sc_0, _sc_err = _sc_19; \
413 } \
414 _syscall_return(type); \
415}
416
417#define _syscall1(type,name,type1,arg1) \
418type name(type1 arg1) \
419{ \
420 long _sc_ret, _sc_err; \
421 { \
422 register long _sc_0 __asm__("$0"); \
423 register long _sc_16 __asm__("$16"); \
424 register long _sc_19 __asm__("$19"); \
425 \
426 _sc_0 = __NR_##name; \
427 _sc_16 = (long) (arg1); \
428 __asm__("callsys # %0 %1 %2 %3" \
429 : "=r"(_sc_0), "=r"(_sc_19) \
430 : "0"(_sc_0), "r"(_sc_16) \
431 : _syscall_clobbers); \
432 _sc_ret = _sc_0, _sc_err = _sc_19; \
433 } \
434 _syscall_return(type); \
435}
436
437#define _syscall2(type,name,type1,arg1,type2,arg2) \
438type name(type1 arg1,type2 arg2) \
439{ \
440 long _sc_ret, _sc_err; \
441 { \
442 register long _sc_0 __asm__("$0"); \
443 register long _sc_16 __asm__("$16"); \
444 register long _sc_17 __asm__("$17"); \
445 register long _sc_19 __asm__("$19"); \
446 \
447 _sc_0 = __NR_##name; \
448 _sc_16 = (long) (arg1); \
449 _sc_17 = (long) (arg2); \
450 __asm__("callsys # %0 %1 %2 %3 %4" \
451 : "=r"(_sc_0), "=r"(_sc_19) \
452 : "0"(_sc_0), "r"(_sc_16), "r"(_sc_17) \
453 : _syscall_clobbers); \
454 _sc_ret = _sc_0, _sc_err = _sc_19; \
455 } \
456 _syscall_return(type); \
457}
458
459#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
460type name(type1 arg1,type2 arg2,type3 arg3) \
461{ \
462 long _sc_ret, _sc_err; \
463 { \
464 register long _sc_0 __asm__("$0"); \
465 register long _sc_16 __asm__("$16"); \
466 register long _sc_17 __asm__("$17"); \
467 register long _sc_18 __asm__("$18"); \
468 register long _sc_19 __asm__("$19"); \
469 \
470 _sc_0 = __NR_##name; \
471 _sc_16 = (long) (arg1); \
472 _sc_17 = (long) (arg2); \
473 _sc_18 = (long) (arg3); \
474 __asm__("callsys # %0 %1 %2 %3 %4 %5" \
475 : "=r"(_sc_0), "=r"(_sc_19) \
476 : "0"(_sc_0), "r"(_sc_16), "r"(_sc_17), \
477 "r"(_sc_18) \
478 : _syscall_clobbers); \
479 _sc_ret = _sc_0, _sc_err = _sc_19; \
480 } \
481 _syscall_return(type); \
482}
483
484#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
485type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
486{ \
487 long _sc_ret, _sc_err; \
488 { \
489 register long _sc_0 __asm__("$0"); \
490 register long _sc_16 __asm__("$16"); \
491 register long _sc_17 __asm__("$17"); \
492 register long _sc_18 __asm__("$18"); \
493 register long _sc_19 __asm__("$19"); \
494 \
495 _sc_0 = __NR_##name; \
496 _sc_16 = (long) (arg1); \
497 _sc_17 = (long) (arg2); \
498 _sc_18 = (long) (arg3); \
499 _sc_19 = (long) (arg4); \
500 __asm__("callsys # %0 %1 %2 %3 %4 %5 %6" \
501 : "=r"(_sc_0), "=r"(_sc_19) \
502 : "0"(_sc_0), "r"(_sc_16), "r"(_sc_17), \
503 "r"(_sc_18), "1"(_sc_19) \
504 : _syscall_clobbers); \
505 _sc_ret = _sc_0, _sc_err = _sc_19; \
506 } \
507 _syscall_return(type); \
508}
509
510#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
511 type5,arg5) \
512type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
513{ \
514 long _sc_ret, _sc_err; \
515 { \
516 register long _sc_0 __asm__("$0"); \
517 register long _sc_16 __asm__("$16"); \
518 register long _sc_17 __asm__("$17"); \
519 register long _sc_18 __asm__("$18"); \
520 register long _sc_19 __asm__("$19"); \
521 register long _sc_20 __asm__("$20"); \
522 \
523 _sc_0 = __NR_##name; \
524 _sc_16 = (long) (arg1); \
525 _sc_17 = (long) (arg2); \
526 _sc_18 = (long) (arg3); \
527 _sc_19 = (long) (arg4); \
528 _sc_20 = (long) (arg5); \
529 __asm__("callsys # %0 %1 %2 %3 %4 %5 %6 %7" \
530 : "=r"(_sc_0), "=r"(_sc_19) \
531 : "0"(_sc_0), "r"(_sc_16), "r"(_sc_17), \
532 "r"(_sc_18), "1"(_sc_19), "r"(_sc_20) \
533 : _syscall_clobbers); \
534 _sc_ret = _sc_0, _sc_err = _sc_19; \
535 } \
536 _syscall_return(type); \
537}
538
539#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
540 type5,arg5,type6,arg6) \
541type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, type6 arg6)\
542{ \
543 long _sc_ret, _sc_err; \
544 { \
545 register long _sc_0 __asm__("$0"); \
546 register long _sc_16 __asm__("$16"); \
547 register long _sc_17 __asm__("$17"); \
548 register long _sc_18 __asm__("$18"); \
549 register long _sc_19 __asm__("$19"); \
550 register long _sc_20 __asm__("$20"); \
551 register long _sc_21 __asm__("$21"); \
552 \
553 _sc_0 = __NR_##name; \
554 _sc_16 = (long) (arg1); \
555 _sc_17 = (long) (arg2); \
556 _sc_18 = (long) (arg3); \
557 _sc_19 = (long) (arg4); \
558 _sc_20 = (long) (arg5); \
559 _sc_21 = (long) (arg6); \
560 __asm__("callsys # %0 %1 %2 %3 %4 %5 %6 %7 %8" \
561 : "=r"(_sc_0), "=r"(_sc_19) \
562 : "0"(_sc_0), "r"(_sc_16), "r"(_sc_17), \
563 "r"(_sc_18), "1"(_sc_19), "r"(_sc_20), "r"(_sc_21) \
564 : _syscall_clobbers); \
565 _sc_ret = _sc_0, _sc_err = _sc_19; \
566 } \
567 _syscall_return(type); \
568}
569
570#endif /* __GNUC__ */
571
572#define __ARCH_WANT_IPC_PARSE_VERSION 390#define __ARCH_WANT_IPC_PARSE_VERSION
573#define __ARCH_WANT_OLD_READDIR 391#define __ARCH_WANT_OLD_READDIR
574#define __ARCH_WANT_STAT64 392#define __ARCH_WANT_STAT64
diff --git a/include/asm-arm/dma-mapping.h b/include/asm-arm/dma-mapping.h
index 666617711c8..9bc46b486af 100644
--- a/include/asm-arm/dma-mapping.h
+++ b/include/asm-arm/dma-mapping.h
@@ -48,7 +48,7 @@ static inline int dma_get_cache_alignment(void)
48 return 32; 48 return 32;
49} 49}
50 50
51static inline int dma_is_consistent(dma_addr_t handle) 51static inline int dma_is_consistent(struct device *dev, dma_addr_t handle)
52{ 52{
53 return !!arch_is_coherent(); 53 return !!arch_is_coherent();
54} 54}
diff --git a/include/asm-arm/setup.h b/include/asm-arm/setup.h
index aa4b5782f0c..e5407392afc 100644
--- a/include/asm-arm/setup.h
+++ b/include/asm-arm/setup.h
@@ -14,55 +14,57 @@
14#ifndef __ASMARM_SETUP_H 14#ifndef __ASMARM_SETUP_H
15#define __ASMARM_SETUP_H 15#define __ASMARM_SETUP_H
16 16
17#include <asm/types.h>
18
17#define COMMAND_LINE_SIZE 1024 19#define COMMAND_LINE_SIZE 1024
18 20
19/* The list ends with an ATAG_NONE node. */ 21/* The list ends with an ATAG_NONE node. */
20#define ATAG_NONE 0x00000000 22#define ATAG_NONE 0x00000000
21 23
22struct tag_header { 24struct tag_header {
23 u32 size; 25 __u32 size;
24 u32 tag; 26 __u32 tag;
25}; 27};
26 28
27/* The list must start with an ATAG_CORE node */ 29/* The list must start with an ATAG_CORE node */
28#define ATAG_CORE 0x54410001 30#define ATAG_CORE 0x54410001
29 31
30struct tag_core { 32struct tag_core {
31 u32 flags; /* bit 0 = read-only */ 33 __u32 flags; /* bit 0 = read-only */
32 u32 pagesize; 34 __u32 pagesize;
33 u32 rootdev; 35 __u32 rootdev;
34}; 36};
35 37
36/* it is allowed to have multiple ATAG_MEM nodes */ 38/* it is allowed to have multiple ATAG_MEM nodes */
37#define ATAG_MEM 0x54410002 39#define ATAG_MEM 0x54410002
38 40
39struct tag_mem32 { 41struct tag_mem32 {
40 u32 size; 42 __u32 size;
41 u32 start; /* physical start address */ 43 __u32 start; /* physical start address */
42}; 44};
43 45
44/* VGA text type displays */ 46/* VGA text type displays */
45#define ATAG_VIDEOTEXT 0x54410003 47#define ATAG_VIDEOTEXT 0x54410003
46 48
47struct tag_videotext { 49struct tag_videotext {
48 u8 x; 50 __u8 x;
49 u8 y; 51 __u8 y;
50 u16 video_page; 52 __u16 video_page;
51 u8 video_mode; 53 __u8 video_mode;
52 u8 video_cols; 54 __u8 video_cols;
53 u16 video_ega_bx; 55 __u16 video_ega_bx;
54 u8 video_lines; 56 __u8 video_lines;
55 u8 video_isvga; 57 __u8 video_isvga;
56 u16 video_points; 58 __u16 video_points;
57}; 59};
58 60
59/* describes how the ramdisk will be used in kernel */ 61/* describes how the ramdisk will be used in kernel */
60#define ATAG_RAMDISK 0x54410004 62#define ATAG_RAMDISK 0x54410004
61 63
62struct tag_ramdisk { 64struct tag_ramdisk {
63 u32 flags; /* bit 0 = load, bit 1 = prompt */ 65 __u32 flags; /* bit 0 = load, bit 1 = prompt */
64 u32 size; /* decompressed ramdisk size in _kilo_ bytes */ 66 __u32 size; /* decompressed ramdisk size in _kilo_ bytes */
65 u32 start; /* starting block of floppy-based RAM disk image */ 67 __u32 start; /* starting block of floppy-based RAM disk image */
66}; 68};
67 69
68/* describes where the compressed ramdisk image lives (virtual address) */ 70/* describes where the compressed ramdisk image lives (virtual address) */
@@ -76,23 +78,23 @@ struct tag_ramdisk {
76#define ATAG_INITRD2 0x54420005 78#define ATAG_INITRD2 0x54420005
77 79
78struct tag_initrd { 80struct tag_initrd {
79 u32 start; /* physical start address */ 81 __u32 start; /* physical start address */
80 u32 size; /* size of compressed ramdisk image in bytes */ 82 __u32 size; /* size of compressed ramdisk image in bytes */
81}; 83};
82 84
83/* board serial number. "64 bits should be enough for everybody" */ 85/* board serial number. "64 bits should be enough for everybody" */
84#define ATAG_SERIAL 0x54410006 86#define ATAG_SERIAL 0x54410006
85 87
86struct tag_serialnr { 88struct tag_serialnr {
87 u32 low; 89 __u32 low;
88 u32 high; 90 __u32 high;
89}; 91};
90 92
91/* board revision */ 93/* board revision */
92#define ATAG_REVISION 0x54410007 94#define ATAG_REVISION 0x54410007
93 95
94struct tag_revision { 96struct tag_revision {
95 u32 rev; 97 __u32 rev;
96}; 98};
97 99
98/* initial values for vesafb-type framebuffers. see struct screen_info 100/* initial values for vesafb-type framebuffers. see struct screen_info
@@ -101,20 +103,20 @@ struct tag_revision {
101#define ATAG_VIDEOLFB 0x54410008 103#define ATAG_VIDEOLFB 0x54410008
102 104
103struct tag_videolfb { 105struct tag_videolfb {
104 u16 lfb_width; 106 __u16 lfb_width;
105 u16 lfb_height; 107 __u16 lfb_height;
106 u16 lfb_depth; 108 __u16 lfb_depth;
107 u16 lfb_linelength; 109 __u16 lfb_linelength;
108 u32 lfb_base; 110 __u32 lfb_base;
109 u32 lfb_size; 111 __u32 lfb_size;
110 u8 red_size; 112 __u8 red_size;
111 u8 red_pos; 113 __u8 red_pos;
112 u8 green_size; 114 __u8 green_size;
113 u8 green_pos; 115 __u8 green_pos;
114 u8 blue_size; 116 __u8 blue_size;
115 u8 blue_pos; 117 __u8 blue_pos;
116 u8 rsvd_size; 118 __u8 rsvd_size;
117 u8 rsvd_pos; 119 __u8 rsvd_pos;
118}; 120};
119 121
120/* command line: \0 terminated string */ 122/* command line: \0 terminated string */
@@ -128,17 +130,17 @@ struct tag_cmdline {
128#define ATAG_ACORN 0x41000101 130#define ATAG_ACORN 0x41000101
129 131
130struct tag_acorn { 132struct tag_acorn {
131 u32 memc_control_reg; 133 __u32 memc_control_reg;
132 u32 vram_pages; 134 __u32 vram_pages;
133 u8 sounddefault; 135 __u8 sounddefault;
134 u8 adfsdrives; 136 __u8 adfsdrives;
135}; 137};
136 138
137/* footbridge memory clock, see arch/arm/mach-footbridge/arch.c */ 139/* footbridge memory clock, see arch/arm/mach-footbridge/arch.c */
138#define ATAG_MEMCLK 0x41000402 140#define ATAG_MEMCLK 0x41000402
139 141
140struct tag_memclk { 142struct tag_memclk {
141 u32 fmemclk; 143 __u32 fmemclk;
142}; 144};
143 145
144struct tag { 146struct tag {
@@ -167,24 +169,26 @@ struct tag {
167}; 169};
168 170
169struct tagtable { 171struct tagtable {
170 u32 tag; 172 __u32 tag;
171 int (*parse)(const struct tag *); 173 int (*parse)(const struct tag *);
172}; 174};
173 175
174#define __tag __attribute_used__ __attribute__((__section__(".taglist.init")))
175#define __tagtable(tag, fn) \
176static struct tagtable __tagtable_##fn __tag = { tag, fn }
177
178#define tag_member_present(tag,member) \ 176#define tag_member_present(tag,member) \
179 ((unsigned long)(&((struct tag *)0L)->member + 1) \ 177 ((unsigned long)(&((struct tag *)0L)->member + 1) \
180 <= (tag)->hdr.size * 4) 178 <= (tag)->hdr.size * 4)
181 179
182#define tag_next(t) ((struct tag *)((u32 *)(t) + (t)->hdr.size)) 180#define tag_next(t) ((struct tag *)((__u32 *)(t) + (t)->hdr.size))
183#define tag_size(type) ((sizeof(struct tag_header) + sizeof(struct type)) >> 2) 181#define tag_size(type) ((sizeof(struct tag_header) + sizeof(struct type)) >> 2)
184 182
185#define for_each_tag(t,base) \ 183#define for_each_tag(t,base) \
186 for (t = base; t->hdr.size; t = tag_next(t)) 184 for (t = base; t->hdr.size; t = tag_next(t))
187 185
186#ifdef __KERNEL__
187
188#define __tag __attribute_used__ __attribute__((__section__(".taglist.init")))
189#define __tagtable(tag, fn) \
190static struct tagtable __tagtable_##fn __tag = { tag, fn }
191
188/* 192/*
189 * Memory map description 193 * Memory map description
190 */ 194 */
@@ -217,4 +221,6 @@ struct early_params {
217static struct early_params __early_##fn __attribute_used__ \ 221static struct early_params __early_##fn __attribute_used__ \
218__attribute__((__section__(".early_param.init"))) = { name, fn } 222__attribute__((__section__(".early_param.init"))) = { name, fn }
219 223
224#endif /* __KERNEL__ */
225
220#endif 226#endif
diff --git a/include/asm-arm/unistd.h b/include/asm-arm/unistd.h
index 14a87eec5a2..d44c629d842 100644
--- a/include/asm-arm/unistd.h
+++ b/include/asm-arm/unistd.h
@@ -377,156 +377,6 @@
377#endif 377#endif
378 378
379#ifdef __KERNEL__ 379#ifdef __KERNEL__
380#include <linux/err.h>
381#include <linux/linkage.h>
382
383#define __sys2(x) #x
384#define __sys1(x) __sys2(x)
385
386#ifndef __syscall
387#if defined(__thumb__) || defined(__ARM_EABI__)
388#define __SYS_REG(name) register long __sysreg __asm__("r7") = __NR_##name;
389#define __SYS_REG_LIST(regs...) "r" (__sysreg) , ##regs
390#define __syscall(name) "swi\t0"
391#else
392#define __SYS_REG(name)
393#define __SYS_REG_LIST(regs...) regs
394#define __syscall(name) "swi\t" __sys1(__NR_##name) ""
395#endif
396#endif
397
398#define __syscall_return(type, res) \
399do { \
400 if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
401 errno = -(res); \
402 res = -1; \
403 } \
404 return (type) (res); \
405} while (0)
406
407#define _syscall0(type,name) \
408type name(void) { \
409 __SYS_REG(name) \
410 register long __res_r0 __asm__("r0"); \
411 long __res; \
412 __asm__ __volatile__ ( \
413 __syscall(name) \
414 : "=r" (__res_r0) \
415 : __SYS_REG_LIST() \
416 : "memory" ); \
417 __res = __res_r0; \
418 __syscall_return(type,__res); \
419}
420
421#define _syscall1(type,name,type1,arg1) \
422type name(type1 arg1) { \
423 __SYS_REG(name) \
424 register long __r0 __asm__("r0") = (long)arg1; \
425 register long __res_r0 __asm__("r0"); \
426 long __res; \
427 __asm__ __volatile__ ( \
428 __syscall(name) \
429 : "=r" (__res_r0) \
430 : __SYS_REG_LIST( "0" (__r0) ) \
431 : "memory" ); \
432 __res = __res_r0; \
433 __syscall_return(type,__res); \
434}
435
436#define _syscall2(type,name,type1,arg1,type2,arg2) \
437type name(type1 arg1,type2 arg2) { \
438 __SYS_REG(name) \
439 register long __r0 __asm__("r0") = (long)arg1; \
440 register long __r1 __asm__("r1") = (long)arg2; \
441 register long __res_r0 __asm__("r0"); \
442 long __res; \
443 __asm__ __volatile__ ( \
444 __syscall(name) \
445 : "=r" (__res_r0) \
446 : __SYS_REG_LIST( "0" (__r0), "r" (__r1) ) \
447 : "memory" ); \
448 __res = __res_r0; \
449 __syscall_return(type,__res); \
450}
451
452
453#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
454type name(type1 arg1,type2 arg2,type3 arg3) { \
455 __SYS_REG(name) \
456 register long __r0 __asm__("r0") = (long)arg1; \
457 register long __r1 __asm__("r1") = (long)arg2; \
458 register long __r2 __asm__("r2") = (long)arg3; \
459 register long __res_r0 __asm__("r0"); \
460 long __res; \
461 __asm__ __volatile__ ( \
462 __syscall(name) \
463 : "=r" (__res_r0) \
464 : __SYS_REG_LIST( "0" (__r0), "r" (__r1), "r" (__r2) ) \
465 : "memory" ); \
466 __res = __res_r0; \
467 __syscall_return(type,__res); \
468}
469
470
471#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)\
472type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \
473 __SYS_REG(name) \
474 register long __r0 __asm__("r0") = (long)arg1; \
475 register long __r1 __asm__("r1") = (long)arg2; \
476 register long __r2 __asm__("r2") = (long)arg3; \
477 register long __r3 __asm__("r3") = (long)arg4; \
478 register long __res_r0 __asm__("r0"); \
479 long __res; \
480 __asm__ __volatile__ ( \
481 __syscall(name) \
482 : "=r" (__res_r0) \
483 : __SYS_REG_LIST( "0" (__r0), "r" (__r1), "r" (__r2), "r" (__r3) ) \
484 : "memory" ); \
485 __res = __res_r0; \
486 __syscall_return(type,__res); \
487}
488
489
490#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
491type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) { \
492 __SYS_REG(name) \
493 register long __r0 __asm__("r0") = (long)arg1; \
494 register long __r1 __asm__("r1") = (long)arg2; \
495 register long __r2 __asm__("r2") = (long)arg3; \
496 register long __r3 __asm__("r3") = (long)arg4; \
497 register long __r4 __asm__("r4") = (long)arg5; \
498 register long __res_r0 __asm__("r0"); \
499 long __res; \
500 __asm__ __volatile__ ( \
501 __syscall(name) \
502 : "=r" (__res_r0) \
503 : __SYS_REG_LIST( "0" (__r0), "r" (__r1), "r" (__r2), \
504 "r" (__r3), "r" (__r4) ) \
505 : "memory" ); \
506 __res = __res_r0; \
507 __syscall_return(type,__res); \
508}
509
510#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \
511type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) { \
512 __SYS_REG(name) \
513 register long __r0 __asm__("r0") = (long)arg1; \
514 register long __r1 __asm__("r1") = (long)arg2; \
515 register long __r2 __asm__("r2") = (long)arg3; \
516 register long __r3 __asm__("r3") = (long)arg4; \
517 register long __r4 __asm__("r4") = (long)arg5; \
518 register long __r5 __asm__("r5") = (long)arg6; \
519 register long __res_r0 __asm__("r0"); \
520 long __res; \
521 __asm__ __volatile__ ( \
522 __syscall(name) \
523 : "=r" (__res_r0) \
524 : __SYS_REG_LIST( "0" (__r0), "r" (__r1), "r" (__r2), \
525 "r" (__r3), "r" (__r4), "r" (__r5) ) \
526 : "memory" ); \
527 __res = __res_r0; \
528 __syscall_return(type,__res); \
529}
530 380
531#define __ARCH_WANT_IPC_PARSE_VERSION 381#define __ARCH_WANT_IPC_PARSE_VERSION
532#define __ARCH_WANT_STAT64 382#define __ARCH_WANT_STAT64
diff --git a/include/asm-arm26/pgalloc.h b/include/asm-arm26/pgalloc.h
index 6437167b1ff..7725af3ddb4 100644
--- a/include/asm-arm26/pgalloc.h
+++ b/include/asm-arm26/pgalloc.h
@@ -15,7 +15,7 @@
15#include <asm/tlbflush.h> 15#include <asm/tlbflush.h>
16#include <linux/slab.h> 16#include <linux/slab.h>
17 17
18extern kmem_cache_t *pte_cache; 18extern struct kmem_cache *pte_cache;
19 19
20static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr){ 20static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr){
21 return kmem_cache_alloc(pte_cache, GFP_KERNEL); 21 return kmem_cache_alloc(pte_cache, GFP_KERNEL);
diff --git a/include/asm-arm26/setup.h b/include/asm-arm26/setup.h
index 6348931be65..1a867b4e8d5 100644
--- a/include/asm-arm26/setup.h
+++ b/include/asm-arm26/setup.h
@@ -16,6 +16,8 @@
16 16
17#define COMMAND_LINE_SIZE 1024 17#define COMMAND_LINE_SIZE 1024
18 18
19#ifdef __KERNEL__
20
19/* The list ends with an ATAG_NONE node. */ 21/* The list ends with an ATAG_NONE node. */
20#define ATAG_NONE 0x00000000 22#define ATAG_NONE 0x00000000
21 23
@@ -202,4 +204,6 @@ struct meminfo {
202 204
203extern struct meminfo meminfo; 205extern struct meminfo meminfo;
204 206
207#endif /* __KERNEL__ */
208
205#endif 209#endif
diff --git a/include/asm-arm26/unistd.h b/include/asm-arm26/unistd.h
index 25a5eead85b..4c3b919177e 100644
--- a/include/asm-arm26/unistd.h
+++ b/include/asm-arm26/unistd.h
@@ -311,139 +311,6 @@
311#define __ARM_NR_usr26 (__ARM_NR_BASE+3) 311#define __ARM_NR_usr26 (__ARM_NR_BASE+3)
312 312
313#ifdef __KERNEL__ 313#ifdef __KERNEL__
314#include <linux/err.h>
315#include <linux/linkage.h>
316
317#define __sys2(x) #x
318#define __sys1(x) __sys2(x)
319
320#ifndef __syscall
321#define __syscall(name) "swi\t" __sys1(__NR_##name) ""
322#endif
323
324#define __syscall_return(type, res) \
325do { \
326 if ((unsigned long)(res) >= (unsigned long)-MAX_ERRNO) { \
327 errno = -(res); \
328 res = -1; \
329 } \
330 return (type) (res); \
331} while (0)
332
333#define _syscall0(type,name) \
334type name(void) { \
335 register long __res_r0 __asm__("r0"); \
336 long __res; \
337 __asm__ __volatile__ ( \
338 __syscall(name) \
339 : "=r" (__res_r0) \
340 : \
341 : "lr"); \
342 __res = __res_r0; \
343 __syscall_return(type,__res); \
344}
345
346#define _syscall1(type,name,type1,arg1) \
347type name(type1 arg1) { \
348 register long __r0 __asm__("r0") = (long)arg1; \
349 register long __res_r0 __asm__("r0"); \
350 long __res; \
351 __asm__ __volatile__ ( \
352 __syscall(name) \
353 : "=r" (__res_r0) \
354 : "r" (__r0) \
355 : "lr"); \
356 __res = __res_r0; \
357 __syscall_return(type,__res); \
358}
359
360#define _syscall2(type,name,type1,arg1,type2,arg2) \
361type name(type1 arg1,type2 arg2) { \
362 register long __r0 __asm__("r0") = (long)arg1; \
363 register long __r1 __asm__("r1") = (long)arg2; \
364 register long __res_r0 __asm__("r0"); \
365 long __res; \
366 __asm__ __volatile__ ( \
367 __syscall(name) \
368 : "=r" (__res_r0) \
369 : "r" (__r0),"r" (__r1) \
370 : "lr"); \
371 __res = __res_r0; \
372 __syscall_return(type,__res); \
373}
374
375
376#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
377type name(type1 arg1,type2 arg2,type3 arg3) { \
378 register long __r0 __asm__("r0") = (long)arg1; \
379 register long __r1 __asm__("r1") = (long)arg2; \
380 register long __r2 __asm__("r2") = (long)arg3; \
381 register long __res_r0 __asm__("r0"); \
382 long __res; \
383 __asm__ __volatile__ ( \
384 __syscall(name) \
385 : "=r" (__res_r0) \
386 : "r" (__r0),"r" (__r1),"r" (__r2) \
387 : "lr"); \
388 __res = __res_r0; \
389 __syscall_return(type,__res); \
390}
391
392
393#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)\
394type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \
395 register long __r0 __asm__("r0") = (long)arg1; \
396 register long __r1 __asm__("r1") = (long)arg2; \
397 register long __r2 __asm__("r2") = (long)arg3; \
398 register long __r3 __asm__("r3") = (long)arg4; \
399 register long __res_r0 __asm__("r0"); \
400 long __res; \
401 __asm__ __volatile__ ( \
402 __syscall(name) \
403 : "=r" (__res_r0) \
404 : "r" (__r0),"r" (__r1),"r" (__r2),"r" (__r3) \
405 : "lr"); \
406 __res = __res_r0; \
407 __syscall_return(type,__res); \
408}
409
410
411#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
412type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) { \
413 register long __r0 __asm__("r0") = (long)arg1; \
414 register long __r1 __asm__("r1") = (long)arg2; \
415 register long __r2 __asm__("r2") = (long)arg3; \
416 register long __r3 __asm__("r3") = (long)arg4; \
417 register long __r4 __asm__("r4") = (long)arg5; \
418 register long __res_r0 __asm__("r0"); \
419 long __res; \
420 __asm__ __volatile__ ( \
421 __syscall(name) \
422 : "=r" (__res_r0) \
423 : "r" (__r0),"r" (__r1),"r" (__r2),"r" (__r3),"r" (__r4) \
424 : "lr"); \
425 __res = __res_r0; \
426 __syscall_return(type,__res); \
427}
428
429#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \
430type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) { \
431 register long __r0 __asm__("r0") = (long)arg1; \
432 register long __r1 __asm__("r1") = (long)arg2; \
433 register long __r2 __asm__("r2") = (long)arg3; \
434 register long __r3 __asm__("r3") = (long)arg4; \
435 register long __r4 __asm__("r4") = (long)arg5; \
436 register long __r5 __asm__("r5") = (long)arg6; \
437 register long __res_r0 __asm__("r0"); \
438 long __res; \
439 __asm__ __volatile__ ( \
440 __syscall(name) \
441 : "=r" (__res_r0) \
442 : "r" (__r0),"r" (__r1),"r" (__r2),"r" (__r3), "r" (__r4),"r" (__r5) \
443 : "lr"); \
444 __res = __res_r0; \
445 __syscall_return(type,__res); \
446}
447 314
448#define __ARCH_WANT_IPC_PARSE_VERSION 315#define __ARCH_WANT_IPC_PARSE_VERSION
449#define __ARCH_WANT_OLD_READDIR 316#define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-avr32/dma-mapping.h b/include/asm-avr32/dma-mapping.h
index 4c40cb41cdf..0580b5d62bb 100644
--- a/include/asm-avr32/dma-mapping.h
+++ b/include/asm-avr32/dma-mapping.h
@@ -8,7 +8,8 @@
8#include <asm/cacheflush.h> 8#include <asm/cacheflush.h>
9#include <asm/io.h> 9#include <asm/io.h>
10 10
11extern void dma_cache_sync(void *vaddr, size_t size, int direction); 11extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
12 int direction);
12 13
13/* 14/*
14 * Return whether the given device DMA address mask can be supported 15 * Return whether the given device DMA address mask can be supported
@@ -307,7 +308,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
307#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 308#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
308#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 309#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
309 310
310static inline int dma_is_consistent(dma_addr_t dma_addr) 311static inline int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
311{ 312{
312 return 1; 313 return 1;
313} 314}
diff --git a/include/asm-avr32/setup.h b/include/asm-avr32/setup.h
index 10193da4113..0a5224245e4 100644
--- a/include/asm-avr32/setup.h
+++ b/include/asm-avr32/setup.h
@@ -13,6 +13,8 @@
13 13
14#define COMMAND_LINE_SIZE 256 14#define COMMAND_LINE_SIZE 256
15 15
16#ifdef __KERNEL__
17
16/* Magic number indicating that a tag table is present */ 18/* Magic number indicating that a tag table is present */
17#define ATAG_MAGIC 0xa2a25441 19#define ATAG_MAGIC 0xa2a25441
18 20
@@ -138,4 +140,6 @@ void chip_enable_sdram(void);
138 140
139#endif /* !__ASSEMBLY__ */ 141#endif /* !__ASSEMBLY__ */
140 142
143#endif /* __KERNEL__ */
144
141#endif /* __ASM_AVR32_SETUP_H__ */ 145#endif /* __ASM_AVR32_SETUP_H__ */
diff --git a/include/asm-cris/arch-v10/bitops.h b/include/asm-cris/arch-v10/bitops.h
index b73f5396e5a..be85f6de25d 100644
--- a/include/asm-cris/arch-v10/bitops.h
+++ b/include/asm-cris/arch-v10/bitops.h
@@ -10,7 +10,7 @@
10 * number. They differ in that the first function also inverts all bits 10 * number. They differ in that the first function also inverts all bits
11 * in the input. 11 * in the input.
12 */ 12 */
13extern inline unsigned long cris_swapnwbrlz(unsigned long w) 13static inline unsigned long cris_swapnwbrlz(unsigned long w)
14{ 14{
15 /* Let's just say we return the result in the same register as the 15 /* Let's just say we return the result in the same register as the
16 input. Saying we clobber the input but can return the result 16 input. Saying we clobber the input but can return the result
@@ -26,7 +26,7 @@ extern inline unsigned long cris_swapnwbrlz(unsigned long w)
26 return res; 26 return res;
27} 27}
28 28
29extern inline unsigned long cris_swapwbrlz(unsigned long w) 29static inline unsigned long cris_swapwbrlz(unsigned long w)
30{ 30{
31 unsigned res; 31 unsigned res;
32 __asm__ ("swapwbr %0 \n\t" 32 __asm__ ("swapwbr %0 \n\t"
@@ -40,7 +40,7 @@ extern inline unsigned long cris_swapwbrlz(unsigned long w)
40 * ffz = Find First Zero in word. Undefined if no zero exists, 40 * ffz = Find First Zero in word. Undefined if no zero exists,
41 * so code should check against ~0UL first.. 41 * so code should check against ~0UL first..
42 */ 42 */
43extern inline unsigned long ffz(unsigned long w) 43static inline unsigned long ffz(unsigned long w)
44{ 44{
45 return cris_swapnwbrlz(w); 45 return cris_swapnwbrlz(w);
46} 46}
@@ -51,7 +51,7 @@ extern inline unsigned long ffz(unsigned long w)
51 * 51 *
52 * Undefined if no bit exists, so code should check against 0 first. 52 * Undefined if no bit exists, so code should check against 0 first.
53 */ 53 */
54extern inline unsigned long __ffs(unsigned long word) 54static inline unsigned long __ffs(unsigned long word)
55{ 55{
56 return cris_swapnwbrlz(~word); 56 return cris_swapnwbrlz(~word);
57} 57}
@@ -65,7 +65,7 @@ extern inline unsigned long __ffs(unsigned long word)
65 * differs in spirit from the above ffz (man ffs). 65 * differs in spirit from the above ffz (man ffs).
66 */ 66 */
67 67
68extern inline unsigned long kernel_ffs(unsigned long w) 68static inline unsigned long kernel_ffs(unsigned long w)
69{ 69{
70 return w ? cris_swapwbrlz (w) + 1 : 0; 70 return w ? cris_swapwbrlz (w) + 1 : 0;
71} 71}
diff --git a/include/asm-cris/dma-mapping.h b/include/asm-cris/dma-mapping.h
index cbf1a98f012..662cea70152 100644
--- a/include/asm-cris/dma-mapping.h
+++ b/include/asm-cris/dma-mapping.h
@@ -156,10 +156,10 @@ dma_get_cache_alignment(void)
156 return (1 << INTERNODE_CACHE_SHIFT); 156 return (1 << INTERNODE_CACHE_SHIFT);
157} 157}
158 158
159#define dma_is_consistent(d) (1) 159#define dma_is_consistent(d, h) (1)
160 160
161static inline void 161static inline void
162dma_cache_sync(void *vaddr, size_t size, 162dma_cache_sync(struct device *dev, void *vaddr, size_t size,
163 enum dma_data_direction direction) 163 enum dma_data_direction direction)
164{ 164{
165} 165}
diff --git a/include/asm-cris/semaphore-helper.h b/include/asm-cris/semaphore-helper.h
index dbd0f30b85b..a8e1e6cb7cd 100644
--- a/include/asm-cris/semaphore-helper.h
+++ b/include/asm-cris/semaphore-helper.h
@@ -20,12 +20,12 @@
20/* 20/*
21 * These two _must_ execute atomically wrt each other. 21 * These two _must_ execute atomically wrt each other.
22 */ 22 */
23extern inline void wake_one_more(struct semaphore * sem) 23static inline void wake_one_more(struct semaphore * sem)
24{ 24{
25 atomic_inc(&sem->waking); 25 atomic_inc(&sem->waking);
26} 26}
27 27
28extern inline int waking_non_zero(struct semaphore *sem) 28static inline int waking_non_zero(struct semaphore *sem)
29{ 29{
30 unsigned long flags; 30 unsigned long flags;
31 int ret = 0; 31 int ret = 0;
@@ -40,7 +40,7 @@ extern inline int waking_non_zero(struct semaphore *sem)
40 return ret; 40 return ret;
41} 41}
42 42
43extern inline int waking_non_zero_interruptible(struct semaphore *sem, 43static inline int waking_non_zero_interruptible(struct semaphore *sem,
44 struct task_struct *tsk) 44 struct task_struct *tsk)
45{ 45{
46 int ret = 0; 46 int ret = 0;
@@ -59,7 +59,7 @@ extern inline int waking_non_zero_interruptible(struct semaphore *sem,
59 return ret; 59 return ret;
60} 60}
61 61
62extern inline int waking_non_zero_trylock(struct semaphore *sem) 62static inline int waking_non_zero_trylock(struct semaphore *sem)
63{ 63{
64 int ret = 1; 64 int ret = 1;
65 unsigned long flags; 65 unsigned long flags;
diff --git a/include/asm-frv/dma-mapping.h b/include/asm-frv/dma-mapping.h
index e9fc1d47797..bcb2df68496 100644
--- a/include/asm-frv/dma-mapping.h
+++ b/include/asm-frv/dma-mapping.h
@@ -172,10 +172,10 @@ int dma_get_cache_alignment(void)
172 return 1 << L1_CACHE_SHIFT; 172 return 1 << L1_CACHE_SHIFT;
173} 173}
174 174
175#define dma_is_consistent(d) (1) 175#define dma_is_consistent(d, h) (1)
176 176
177static inline 177static inline
178void dma_cache_sync(void *vaddr, size_t size, 178void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
179 enum dma_data_direction direction) 179 enum dma_data_direction direction)
180{ 180{
181 flush_write_buffers(); 181 flush_write_buffers();
diff --git a/include/asm-frv/highmem.h b/include/asm-frv/highmem.h
index 0f390f41f81..ff4d6cdeb15 100644
--- a/include/asm-frv/highmem.h
+++ b/include/asm-frv/highmem.h
@@ -115,7 +115,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
115{ 115{
116 unsigned long paddr; 116 unsigned long paddr;
117 117
118 inc_preempt_count(); 118 pagefault_disable();
119 paddr = page_to_phys(page); 119 paddr = page_to_phys(page);
120 120
121 switch (type) { 121 switch (type) {
@@ -170,8 +170,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
170 default: 170 default:
171 BUG(); 171 BUG();
172 } 172 }
173 dec_preempt_count(); 173 pagefault_enable();
174 preempt_check_resched();
175} 174}
176 175
177#endif /* !__ASSEMBLY__ */ 176#endif /* !__ASSEMBLY__ */
diff --git a/include/asm-frv/param.h b/include/asm-frv/param.h
index 168381ebb41..365653b1726 100644
--- a/include/asm-frv/param.h
+++ b/include/asm-frv/param.h
@@ -18,6 +18,5 @@
18#endif 18#endif
19 19
20#define MAXHOSTNAMELEN 64 /* max length of hostname */ 20#define MAXHOSTNAMELEN 64 /* max length of hostname */
21#define COMMAND_LINE_SIZE 512
22 21
23#endif /* _ASM_PARAM_H */ 22#endif /* _ASM_PARAM_H */
diff --git a/include/asm-frv/setup.h b/include/asm-frv/setup.h
index 0d293b9a585..afd787ceede 100644
--- a/include/asm-frv/setup.h
+++ b/include/asm-frv/setup.h
@@ -12,6 +12,10 @@
12#ifndef _ASM_SETUP_H 12#ifndef _ASM_SETUP_H
13#define _ASM_SETUP_H 13#define _ASM_SETUP_H
14 14
15#define COMMAND_LINE_SIZE 512
16
17#ifdef __KERNEL__
18
15#include <linux/init.h> 19#include <linux/init.h>
16 20
17#ifndef __ASSEMBLY__ 21#ifndef __ASSEMBLY__
@@ -22,4 +26,6 @@ extern unsigned long __initdata num_mappedpages;
22 26
23#endif /* !__ASSEMBLY__ */ 27#endif /* !__ASSEMBLY__ */
24 28
29#endif /* __KERNEL__ */
30
25#endif /* _ASM_SETUP_H */ 31#endif /* _ASM_SETUP_H */
diff --git a/include/asm-frv/unistd.h b/include/asm-frv/unistd.h
index 725e854928c..584c0417ae4 100644
--- a/include/asm-frv/unistd.h
+++ b/include/asm-frv/unistd.h
@@ -320,125 +320,6 @@
320#ifdef __KERNEL__ 320#ifdef __KERNEL__
321 321
322#define NR_syscalls 310 322#define NR_syscalls 310
323#include <linux/err.h>
324
325/*
326 * process the return value of a syscall, consigning it to one of two possible fates
327 * - user-visible error numbers are in the range -1 - -4095: see <asm-frv/errno.h>
328 */
329#undef __syscall_return
330#define __syscall_return(type, res) \
331do { \
332 unsigned long __sr2 = (res); \
333 if (__builtin_expect(__sr2 >= (unsigned long)(-MAX_ERRNO), 0)) { \
334 errno = (-__sr2); \
335 __sr2 = ~0UL; \
336 } \
337 return (type) __sr2; \
338} while (0)
339
340/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
341
342#undef _syscall0
343#define _syscall0(type,name) \
344type name(void) \
345{ \
346 register unsigned long __scnum __asm__ ("gr7") = (__NR_##name); \
347 register unsigned long __sc0 __asm__ ("gr8"); \
348 __asm__ __volatile__ ("tira gr0,#0" \
349 : "=r" (__sc0) \
350 : "r" (__scnum)); \
351 __syscall_return(type, __sc0); \
352}
353
354#undef _syscall1
355#define _syscall1(type,name,type1,arg1) \
356type name(type1 arg1) \
357{ \
358 register unsigned long __scnum __asm__ ("gr7") = (__NR_##name); \
359 register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1; \
360 __asm__ __volatile__ ("tira gr0,#0" \
361 : "+r" (__sc0) \
362 : "r" (__scnum)); \
363 __syscall_return(type, __sc0); \
364}
365
366#undef _syscall2
367#define _syscall2(type,name,type1,arg1,type2,arg2) \
368type name(type1 arg1,type2 arg2) \
369{ \
370 register unsigned long __scnum __asm__ ("gr7") = (__NR_##name); \
371 register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1; \
372 register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2; \
373 __asm__ __volatile__ ("tira gr0,#0" \
374 : "+r" (__sc0) \
375 : "r" (__scnum), "r" (__sc1)); \
376 __syscall_return(type, __sc0); \
377}
378
379#undef _syscall3
380#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
381type name(type1 arg1,type2 arg2,type3 arg3) \
382{ \
383 register unsigned long __scnum __asm__ ("gr7") = (__NR_##name); \
384 register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1; \
385 register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2; \
386 register unsigned long __sc2 __asm__ ("gr10") = (unsigned long) arg3; \
387 __asm__ __volatile__ ("tira gr0,#0" \
388 : "+r" (__sc0) \
389 : "r" (__scnum), "r" (__sc1), "r" (__sc2)); \
390 __syscall_return(type, __sc0); \
391}
392
393#undef _syscall4
394#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
395type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
396{ \
397 register unsigned long __scnum __asm__ ("gr7") = (__NR_##name); \
398 register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1; \
399 register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2; \
400 register unsigned long __sc2 __asm__ ("gr10") = (unsigned long) arg3; \
401 register unsigned long __sc3 __asm__ ("gr11") = (unsigned long) arg4; \
402 __asm__ __volatile__ ("tira gr0,#0" \
403 : "+r" (__sc0) \
404 : "r" (__scnum), "r" (__sc1), "r" (__sc2), "r" (__sc3)); \
405 __syscall_return(type, __sc0); \
406}
407
408#undef _syscall5
409#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
410type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
411{ \
412 register unsigned long __scnum __asm__ ("gr7") = (__NR_##name); \
413 register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1; \
414 register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2; \
415 register unsigned long __sc2 __asm__ ("gr10") = (unsigned long) arg3; \
416 register unsigned long __sc3 __asm__ ("gr11") = (unsigned long) arg4; \
417 register unsigned long __sc4 __asm__ ("gr12") = (unsigned long) arg5; \
418 __asm__ __volatile__ ("tira gr0,#0" \
419 : "+r" (__sc0) \
420 : "r" (__scnum), "r" (__sc1), "r" (__sc2), \
421 "r" (__sc3), "r" (__sc4)); \
422 __syscall_return(type, __sc0); \
423}
424
425#undef _syscall6
426#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5, type6, arg6) \
427type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
428{ \
429 register unsigned long __scnum __asm__ ("gr7") = (__NR_##name); \
430 register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1; \
431 register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2; \
432 register unsigned long __sc2 __asm__ ("gr10") = (unsigned long) arg3; \
433 register unsigned long __sc3 __asm__ ("gr11") = (unsigned long) arg4; \
434 register unsigned long __sc4 __asm__ ("gr12") = (unsigned long) arg5; \
435 register unsigned long __sc5 __asm__ ("gr13") = (unsigned long) arg6; \
436 __asm__ __volatile__ ("tira gr0,#0" \
437 : "+r" (__sc0) \
438 : "r" (__scnum), "r" (__sc1), "r" (__sc2), \
439 "r" (__sc3), "r" (__sc4), "r" (__sc5)); \
440 __syscall_return(type, __sc0); \
441}
442 323
443#define __ARCH_WANT_IPC_PARSE_VERSION 324#define __ARCH_WANT_IPC_PARSE_VERSION
444/* #define __ARCH_WANT_OLD_READDIR */ 325/* #define __ARCH_WANT_OLD_READDIR */
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
index 3c06be38170..fa14f8cd30c 100644
--- a/include/asm-generic/Kbuild
+++ b/include/asm-generic/Kbuild
@@ -1,4 +1,3 @@
1header-y += atomic.h
2header-y += errno-base.h 1header-y += errno-base.h
3header-y += errno.h 2header-y += errno.h
4header-y += fcntl.h 3header-y += fcntl.h
diff --git a/include/asm-generic/Kbuild.asm b/include/asm-generic/Kbuild.asm
index a84c3d88a18..a37e95fe58d 100644
--- a/include/asm-generic/Kbuild.asm
+++ b/include/asm-generic/Kbuild.asm
@@ -14,6 +14,7 @@ unifdef-y += posix_types.h
14unifdef-y += ptrace.h 14unifdef-y += ptrace.h
15unifdef-y += resource.h 15unifdef-y += resource.h
16unifdef-y += sembuf.h 16unifdef-y += sembuf.h
17unifdef-y += setup.h
17unifdef-y += shmbuf.h 18unifdef-y += shmbuf.h
18unifdef-y += sigcontext.h 19unifdef-y += sigcontext.h
19unifdef-y += siginfo.h 20unifdef-y += siginfo.h
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 42a95d9a064..b7e4a0467cb 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -66,7 +66,7 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
66 atomic64_sub(i, v); 66 atomic64_sub(i, v);
67} 67}
68 68
69#else 69#else /* BITS_PER_LONG == 64 */
70 70
71typedef atomic_t atomic_long_t; 71typedef atomic_t atomic_long_t;
72 72
@@ -113,5 +113,6 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
113 atomic_sub(i, v); 113 atomic_sub(i, v);
114} 114}
115 115
116#endif 116#endif /* BITS_PER_LONG == 64 */
117#endif 117
118#endif /* _ASM_GENERIC_ATOMIC_H */
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h
index b541e48cc54..783ab9944d7 100644
--- a/include/asm-generic/dma-mapping.h
+++ b/include/asm-generic/dma-mapping.h
@@ -266,7 +266,7 @@ dma_error(dma_addr_t dma_addr)
266 266
267#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 267#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
268#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 268#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
269#define dma_is_consistent(d) (1) 269#define dma_is_consistent(d, h) (1)
270 270
271static inline int 271static inline int
272dma_get_cache_alignment(void) 272dma_get_cache_alignment(void)
@@ -295,7 +295,7 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
295} 295}
296 296
297static inline void 297static inline void
298dma_cache_sync(void *vaddr, size_t size, 298dma_cache_sync(struct device *dev, void *vaddr, size_t size,
299 enum dma_data_direction direction) 299 enum dma_data_direction direction)
300{ 300{
301 /* could define this in terms of the dma_cache ... operations, 301 /* could define this in terms of the dma_cache ... operations,
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index df893c16031..f422df0956a 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -21,7 +21,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) 21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
22 return -EFAULT; 22 return -EFAULT;
23 23
24 inc_preempt_count(); 24 pagefault_disable();
25 25
26 switch (op) { 26 switch (op) {
27 case FUTEX_OP_SET: 27 case FUTEX_OP_SET:
@@ -33,7 +33,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
33 ret = -ENOSYS; 33 ret = -ENOSYS;
34 } 34 }
35 35
36 dec_preempt_count(); 36 pagefault_enable();
37 37
38 if (!ret) { 38 if (!ret) {
39 switch (cmp) { 39 switch (cmp) {
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index e60d6f21fa6..4d4c62d1105 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -11,8 +11,8 @@
11 11
12#define RODATA \ 12#define RODATA \
13 . = ALIGN(4096); \ 13 . = ALIGN(4096); \
14 __start_rodata = .; \
15 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ 14 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
15 VMLINUX_SYMBOL(__start_rodata) = .; \
16 *(.rodata) *(.rodata.*) \ 16 *(.rodata) *(.rodata.*) \
17 *(__vermagic) /* Kernel version magic */ \ 17 *(__vermagic) /* Kernel version magic */ \
18 } \ 18 } \
@@ -119,17 +119,16 @@
119 *(__ksymtab_strings) \ 119 *(__ksymtab_strings) \
120 } \ 120 } \
121 \ 121 \
122 EH_FRAME \
123 \
122 /* Built-in module parameters. */ \ 124 /* Built-in module parameters. */ \
123 __param : AT(ADDR(__param) - LOAD_OFFSET) { \ 125 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
124 VMLINUX_SYMBOL(__start___param) = .; \ 126 VMLINUX_SYMBOL(__start___param) = .; \
125 *(__param) \ 127 *(__param) \
126 VMLINUX_SYMBOL(__stop___param) = .; \ 128 VMLINUX_SYMBOL(__stop___param) = .; \
129 VMLINUX_SYMBOL(__end_rodata) = .; \
127 } \ 130 } \
128 \ 131 \
129 /* Unwind data binary search table */ \
130 EH_FRAME_HDR \
131 \
132 __end_rodata = .; \
133 . = ALIGN(4096); 132 . = ALIGN(4096);
134 133
135#define SECURITY_INIT \ 134#define SECURITY_INIT \
@@ -162,15 +161,23 @@
162 VMLINUX_SYMBOL(__kprobes_text_end) = .; 161 VMLINUX_SYMBOL(__kprobes_text_end) = .;
163 162
164#ifdef CONFIG_STACK_UNWIND 163#ifdef CONFIG_STACK_UNWIND
165 /* Unwind data binary search table */ 164#define EH_FRAME \
166#define EH_FRAME_HDR \ 165 /* Unwind data binary search table */ \
166 . = ALIGN(8); \
167 .eh_frame_hdr : AT(ADDR(.eh_frame_hdr) - LOAD_OFFSET) { \ 167 .eh_frame_hdr : AT(ADDR(.eh_frame_hdr) - LOAD_OFFSET) { \
168 VMLINUX_SYMBOL(__start_unwind_hdr) = .; \ 168 VMLINUX_SYMBOL(__start_unwind_hdr) = .; \
169 *(.eh_frame_hdr) \ 169 *(.eh_frame_hdr) \
170 VMLINUX_SYMBOL(__end_unwind_hdr) = .; \ 170 VMLINUX_SYMBOL(__end_unwind_hdr) = .; \
171 } \
172 /* Unwind data */ \
173 . = ALIGN(8); \
174 .eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) { \
175 VMLINUX_SYMBOL(__start_unwind) = .; \
176 *(.eh_frame) \
177 VMLINUX_SYMBOL(__end_unwind) = .; \
171 } 178 }
172#else 179#else
173#define EH_FRAME_HDR 180#define EH_FRAME
174#endif 181#endif
175 182
176 /* DWARF debug sections. 183 /* DWARF debug sections.
diff --git a/include/asm-h8300/delay.h b/include/asm-h8300/delay.h
index cbccbbdd640..743beba70f8 100644
--- a/include/asm-h8300/delay.h
+++ b/include/asm-h8300/delay.h
@@ -9,7 +9,7 @@
9 * Delay routines, using a pre-computed "loops_per_second" value. 9 * Delay routines, using a pre-computed "loops_per_second" value.
10 */ 10 */
11 11
12extern __inline__ void __delay(unsigned long loops) 12static inline void __delay(unsigned long loops)
13{ 13{
14 __asm__ __volatile__ ("1:\n\t" 14 __asm__ __volatile__ ("1:\n\t"
15 "dec.l #1,%0\n\t" 15 "dec.l #1,%0\n\t"
@@ -27,7 +27,7 @@ extern __inline__ void __delay(unsigned long loops)
27 27
28extern unsigned long loops_per_jiffy; 28extern unsigned long loops_per_jiffy;
29 29
30extern __inline__ void udelay(unsigned long usecs) 30static inline void udelay(unsigned long usecs)
31{ 31{
32 usecs *= 4295; /* 2**32 / 1000000 */ 32 usecs *= 4295; /* 2**32 / 1000000 */
33 usecs /= (loops_per_jiffy*HZ); 33 usecs /= (loops_per_jiffy*HZ);
diff --git a/include/asm-h8300/mmu_context.h b/include/asm-h8300/mmu_context.h
index 855721a5dcc..5c165f7bee0 100644
--- a/include/asm-h8300/mmu_context.h
+++ b/include/asm-h8300/mmu_context.h
@@ -9,7 +9,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
9{ 9{
10} 10}
11 11
12extern inline int 12static inline int
13init_new_context(struct task_struct *tsk, struct mm_struct *mm) 13init_new_context(struct task_struct *tsk, struct mm_struct *mm)
14{ 14{
15 // mm->context = virt_to_phys(mm->pgd); 15 // mm->context = virt_to_phys(mm->pgd);
@@ -23,7 +23,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, str
23{ 23{
24} 24}
25 25
26extern inline void activate_mm(struct mm_struct *prev_mm, 26static inline void activate_mm(struct mm_struct *prev_mm,
27 struct mm_struct *next_mm) 27 struct mm_struct *next_mm)
28{ 28{
29} 29}
diff --git a/include/asm-h8300/pci.h b/include/asm-h8300/pci.h
index 5edad5b70fd..0c771b05fdd 100644
--- a/include/asm-h8300/pci.h
+++ b/include/asm-h8300/pci.h
@@ -10,12 +10,12 @@
10#define pcibios_assign_all_busses() 0 10#define pcibios_assign_all_busses() 0
11#define pcibios_scan_all_fns(a, b) 0 11#define pcibios_scan_all_fns(a, b) 0
12 12
13extern inline void pcibios_set_master(struct pci_dev *dev) 13static inline void pcibios_set_master(struct pci_dev *dev)
14{ 14{
15 /* No special bus mastering setup handling */ 15 /* No special bus mastering setup handling */
16} 16}
17 17
18extern inline void pcibios_penalize_isa_irq(int irq, int active) 18static inline void pcibios_penalize_isa_irq(int irq, int active)
19{ 19{
20 /* We don't do dynamic PCI IRQ allocation */ 20 /* We don't do dynamic PCI IRQ allocation */
21} 21}
diff --git a/include/asm-h8300/tlbflush.h b/include/asm-h8300/tlbflush.h
index bbdffbeeede..9a2c5c9fd70 100644
--- a/include/asm-h8300/tlbflush.h
+++ b/include/asm-h8300/tlbflush.h
@@ -47,12 +47,12 @@ static inline void flush_tlb_range(struct mm_struct *mm,
47 BUG(); 47 BUG();
48} 48}
49 49
50extern inline void flush_tlb_kernel_page(unsigned long addr) 50static inline void flush_tlb_kernel_page(unsigned long addr)
51{ 51{
52 BUG(); 52 BUG();
53} 53}
54 54
55extern inline void flush_tlb_pgtables(struct mm_struct *mm, 55static inline void flush_tlb_pgtables(struct mm_struct *mm,
56 unsigned long start, unsigned long end) 56 unsigned long start, unsigned long end)
57{ 57{
58 BUG(); 58 BUG();
diff --git a/include/asm-h8300/unistd.h b/include/asm-h8300/unistd.h
index 747788d629a..7ddd414f8d1 100644
--- a/include/asm-h8300/unistd.h
+++ b/include/asm-h8300/unistd.h
@@ -295,172 +295,6 @@
295#ifdef __KERNEL__ 295#ifdef __KERNEL__
296 296
297#define NR_syscalls 289 297#define NR_syscalls 289
298#include <linux/err.h>
299
300/* user-visible error numbers are in the range -1 - -MAX_ERRNO: see
301 <asm-m68k/errno.h> */
302
303#define __syscall_return(type, res) \
304do { \
305 if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
306 /* avoid using res which is declared to be in register d0; \
307 errno might expand to a function call and clobber it. */ \
308 int __err = -(res); \
309 errno = __err; \
310 res = -1; \
311 } \
312 return (type) (res); \
313} while (0)
314
315#define _syscall0(type, name) \
316type name(void) \
317{ \
318 register long __res __asm__("er0"); \
319 __asm__ __volatile__ ("mov.l %1,er0\n\t" \
320 "trapa #0\n\t" \
321 : "=r" (__res) \
322 : "g" (__NR_##name) \
323 : "cc", "memory"); \
324 __syscall_return(type, __res); \
325}
326
327#define _syscall1(type, name, atype, a) \
328type name(atype a) \
329{ \
330 register long __res __asm__("er0"); \
331 register long _a __asm__("er1"); \
332 _a = (long)a; \
333 __asm__ __volatile__ ("mov.l %1,er0\n\t" \
334 "trapa #0\n\t" \
335 : "=r" (__res) \
336 : "g" (__NR_##name), \
337 "g" (_a) \
338 : "cc", "memory"); \
339 __syscall_return(type, __res); \
340}
341
342#define _syscall2(type, name, atype, a, btype, b) \
343type name(atype a, btype b) \
344{ \
345 register long __res __asm__("er0"); \
346 register long _a __asm__("er1"); \
347 register long _b __asm__("er2"); \
348 _a = (long)a; \
349 _b = (long)b; \
350 __asm__ __volatile__ ("mov.l %1,er0\n\t" \
351 "trapa #0\n\t" \
352 : "=r" (__res) \
353 : "g" (__NR_##name), \
354 "g" (_a), \
355 "g" (_b) \
356 : "cc", "memory"); \
357 __syscall_return(type, __res); \
358}
359
360#define _syscall3(type, name, atype, a, btype, b, ctype, c) \
361type name(atype a, btype b, ctype c) \
362{ \
363 register long __res __asm__("er0"); \
364 register long _a __asm__("er1"); \
365 register long _b __asm__("er2"); \
366 register long _c __asm__("er3"); \
367 _a = (long)a; \
368 _b = (long)b; \
369 _c = (long)c; \
370 __asm__ __volatile__ ("mov.l %1,er0\n\t" \
371 "trapa #0\n\t" \
372 : "=r" (__res) \
373 : "g" (__NR_##name), \
374 "g" (_a), \
375 "g" (_b), \
376 "g" (_c) \
377 : "cc", "memory"); \
378 __syscall_return(type, __res); \
379}
380
381#define _syscall4(type, name, atype, a, btype, b, \
382 ctype, c, dtype, d) \
383type name(atype a, btype b, ctype c, dtype d) \
384{ \
385 register long __res __asm__("er0"); \
386 register long _a __asm__("er1"); \
387 register long _b __asm__("er2"); \
388 register long _c __asm__("er3"); \
389 register long _d __asm__("er4"); \
390 _a = (long)a; \
391 _b = (long)b; \
392 _c = (long)c; \
393 _d = (long)d; \
394 __asm__ __volatile__ ("mov.l %1,er0\n\t" \
395 "trapa #0\n\t" \
396 : "=r" (__res) \
397 : "g" (__NR_##name), \
398 "g" (_a), \
399 "g" (_b), \
400 "g" (_c), \
401 "g" (_d) \
402 : "cc", "memory"); \
403 __syscall_return(type, __res); \
404}
405
406#define _syscall5(type, name, atype, a, btype, b, \
407 ctype, c, dtype, d, etype, e) \
408type name(atype a, btype b, ctype c, dtype d, etype e) \
409{ \
410 register long __res __asm__("er0"); \
411 register long _a __asm__("er1"); \
412 register long _b __asm__("er2"); \
413 register long _c __asm__("er3"); \
414 register long _d __asm__("er4"); \
415 register long _e __asm__("er5"); \
416 _a = (long)a; \
417 _b = (long)b; \
418 _c = (long)c; \
419 _d = (long)d; \
420 _e = (long)e; \
421 __asm__ __volatile__ ("mov.l %1,er0\n\t" \
422 "trapa #0\n\t" \
423 : "=r" (__res) \
424 : "g" (__NR_##name), \
425 "g" (_a), \
426 "g" (_b), \
427 "g" (_c), \
428 "g" (_d), \
429 "g" (_e) \
430 : "cc", "memory"); \
431 __syscall_return(type, __res); \
432}
433
434#define _syscall6(type, name, atype, a, btype, b, \
435 ctype, c, dtype, d, etype, e, ftype, f) \
436type name(atype a, btype b, ctype c, dtype d, etype e, ftype f) \
437{ \
438 register long __res __asm__("er0"); \
439 register long _a __asm__("er1"); \
440 register long _b __asm__("er2"); \
441 register long _c __asm__("er3"); \
442 register long _d __asm__("er4"); \
443 register long _e __asm__("er5"); \
444 register long _f __asm__("er6"); \
445 _a = (long)a; \
446 _b = (long)b; \
447 _c = (long)c; \
448 _d = (long)d; \
449 _e = (long)e; \
450 _f = (long)f; \
451 __asm__ __volatile__ ("mov.l %1,er0\n\t" \
452 "trapa #0\n\t" \
453 : "=r" (__res) \
454 : "g" (__NR_##name), \
455 "g" (_a), \
456 "g" (_b), \
457 "g" (_c), \
458 "g" (_d), \
459 "g" (_e) \
460 "g" (_f) \
461 : "cc", "memory"); \
462 __syscall_return(type, __res); \
463}
464 298
465#define __ARCH_WANT_IPC_PARSE_VERSION 299#define __ARCH_WANT_IPC_PARSE_VERSION
466#define __ARCH_WANT_OLD_READDIR 300#define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-i386/Kbuild b/include/asm-i386/Kbuild
index 147e4ac1ebf..5ae93afc67e 100644
--- a/include/asm-i386/Kbuild
+++ b/include/asm-i386/Kbuild
@@ -7,5 +7,4 @@ header-y += ptrace-abi.h
7header-y += ucontext.h 7header-y += ucontext.h
8 8
9unifdef-y += mtrr.h 9unifdef-y += mtrr.h
10unifdef-y += setup.h
11unifdef-y += vm86.h 10unifdef-y += vm86.h
diff --git a/include/asm-i386/alternative.h b/include/asm-i386/alternative.h
index b01a7ec409c..b8fa9557c53 100644
--- a/include/asm-i386/alternative.h
+++ b/include/asm-i386/alternative.h
@@ -4,7 +4,7 @@
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6#include <asm/types.h> 6#include <asm/types.h>
7 7#include <linux/stddef.h>
8#include <linux/types.h> 8#include <linux/types.h>
9 9
10struct alt_instr { 10struct alt_instr {
@@ -118,4 +118,15 @@ static inline void alternatives_smp_switch(int smp) {}
118#define LOCK_PREFIX "" 118#define LOCK_PREFIX ""
119#endif 119#endif
120 120
121struct paravirt_patch;
122#ifdef CONFIG_PARAVIRT
123void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end);
124#else
125static inline void
126apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end)
127{}
128#define __start_parainstructions NULL
129#define __stop_parainstructions NULL
130#endif
131
121#endif /* _I386_ALTERNATIVE_H */ 132#endif /* _I386_ALTERNATIVE_H */
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h
index b9529578fc3..41a44319905 100644
--- a/include/asm-i386/apic.h
+++ b/include/asm-i386/apic.h
@@ -37,18 +37,27 @@ extern void generic_apic_probe(void);
37/* 37/*
38 * Basic functions accessing APICs. 38 * Basic functions accessing APICs.
39 */ 39 */
40#ifdef CONFIG_PARAVIRT
41#include <asm/paravirt.h>
42#else
43#define apic_write native_apic_write
44#define apic_write_atomic native_apic_write_atomic
45#define apic_read native_apic_read
46#endif
40 47
41static __inline void apic_write(unsigned long reg, unsigned long v) 48static __inline fastcall void native_apic_write(unsigned long reg,
49 unsigned long v)
42{ 50{
43 *((volatile unsigned long *)(APIC_BASE+reg)) = v; 51 *((volatile unsigned long *)(APIC_BASE+reg)) = v;
44} 52}
45 53
46static __inline void apic_write_atomic(unsigned long reg, unsigned long v) 54static __inline fastcall void native_apic_write_atomic(unsigned long reg,
55 unsigned long v)
47{ 56{
48 xchg((volatile unsigned long *)(APIC_BASE+reg), v); 57 xchg((volatile unsigned long *)(APIC_BASE+reg), v);
49} 58}
50 59
51static __inline unsigned long apic_read(unsigned long reg) 60static __inline fastcall unsigned long native_apic_read(unsigned long reg)
52{ 61{
53 return *((volatile unsigned long *)(APIC_BASE+reg)); 62 return *((volatile unsigned long *)(APIC_BASE+reg));
54} 63}
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index a6c024e2506..c57441bb290 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -187,9 +187,9 @@ static __inline__ int atomic_add_return(int i, atomic_t *v)
187 /* Modern 486+ processor */ 187 /* Modern 486+ processor */
188 __i = i; 188 __i = i;
189 __asm__ __volatile__( 189 __asm__ __volatile__(
190 LOCK_PREFIX "xaddl %0, %1;" 190 LOCK_PREFIX "xaddl %0, %1"
191 :"=r"(i) 191 :"+r" (i), "+m" (v->counter)
192 :"m"(v->counter), "0"(i)); 192 : : "memory");
193 return i + __i; 193 return i + __i;
194 194
195#ifdef CONFIG_M386 195#ifdef CONFIG_M386
diff --git a/include/asm-i386/boot.h b/include/asm-i386/boot.h
index 96b228e6e79..8ce79a6fa89 100644
--- a/include/asm-i386/boot.h
+++ b/include/asm-i386/boot.h
@@ -12,4 +12,8 @@
12#define EXTENDED_VGA 0xfffe /* 80x50 mode */ 12#define EXTENDED_VGA 0xfffe /* 80x50 mode */
13#define ASK_VGA 0xfffd /* ask for it at bootup */ 13#define ASK_VGA 0xfffd /* ask for it at bootup */
14 14
15#endif 15/* Physical address where kenrel should be loaded. */
16#define LOAD_PHYSICAL_ADDR ((0x100000 + CONFIG_PHYSICAL_ALIGN - 1) \
17 & ~(CONFIG_PHYSICAL_ALIGN - 1))
18
19#endif /* _LINUX_BOOT_H */
diff --git a/include/asm-i386/bugs.h b/include/asm-i386/bugs.h
index 592ffeeda45..38f1aebbbdb 100644
--- a/include/asm-i386/bugs.h
+++ b/include/asm-i386/bugs.h
@@ -21,6 +21,7 @@
21#include <asm/processor.h> 21#include <asm/processor.h>
22#include <asm/i387.h> 22#include <asm/i387.h>
23#include <asm/msr.h> 23#include <asm/msr.h>
24#include <asm/paravirt.h>
24 25
25static int __init no_halt(char *s) 26static int __init no_halt(char *s)
26{ 27{
@@ -91,6 +92,9 @@ static void __init check_fpu(void)
91 92
92static void __init check_hlt(void) 93static void __init check_hlt(void)
93{ 94{
95 if (paravirt_enabled())
96 return;
97
94 printk(KERN_INFO "Checking 'hlt' instruction... "); 98 printk(KERN_INFO "Checking 'hlt' instruction... ");
95 if (!boot_cpu_data.hlt_works_ok) { 99 if (!boot_cpu_data.hlt_works_ok) {
96 printk("disabled\n"); 100 printk("disabled\n");
diff --git a/include/asm-i386/cpu.h b/include/asm-i386/cpu.h
index b1bc7b1b64b..9d914e1e4aa 100644
--- a/include/asm-i386/cpu.h
+++ b/include/asm-i386/cpu.h
@@ -13,6 +13,9 @@ struct i386_cpu {
13extern int arch_register_cpu(int num); 13extern int arch_register_cpu(int num);
14#ifdef CONFIG_HOTPLUG_CPU 14#ifdef CONFIG_HOTPLUG_CPU
15extern void arch_unregister_cpu(int); 15extern void arch_unregister_cpu(int);
16extern int enable_cpu_hotplug;
17#else
18#define enable_cpu_hotplug 0
16#endif 19#endif
17 20
18DECLARE_PER_CPU(int, cpu_state); 21DECLARE_PER_CPU(int, cpu_state);
diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h
index d314ebb3d59..3f92b94e0d7 100644
--- a/include/asm-i386/cpufeature.h
+++ b/include/asm-i386/cpufeature.h
@@ -31,7 +31,7 @@
31#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ 31#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */
32#define X86_FEATURE_PN (0*32+18) /* Processor serial number */ 32#define X86_FEATURE_PN (0*32+18) /* Processor serial number */
33#define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */ 33#define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */
34#define X86_FEATURE_DTES (0*32+21) /* Debug Trace Store */ 34#define X86_FEATURE_DS (0*32+21) /* Debug Store */
35#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ 35#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */
36#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ 36#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */
37#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */ 37#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */
@@ -73,6 +73,8 @@
73#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ 73#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */
74#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ 74#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */
75#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ 75#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
76#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */
77#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */
76 78
77/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 79/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
78#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ 80#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
@@ -134,6 +136,10 @@
134#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN) 136#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN)
135#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM) 137#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM)
136#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN) 138#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN)
139#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS)
140#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS)
141#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH)
142#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS)
137 143
138#endif /* __ASM_I386_CPUFEATURE_H */ 144#endif /* __ASM_I386_CPUFEATURE_H */
139 145
diff --git a/include/asm-i386/current.h b/include/asm-i386/current.h
index 3cbbecd7901..5252ee0f6d7 100644
--- a/include/asm-i386/current.h
+++ b/include/asm-i386/current.h
@@ -1,13 +1,14 @@
1#ifndef _I386_CURRENT_H 1#ifndef _I386_CURRENT_H
2#define _I386_CURRENT_H 2#define _I386_CURRENT_H
3 3
4#include <linux/thread_info.h> 4#include <asm/pda.h>
5#include <linux/compiler.h>
5 6
6struct task_struct; 7struct task_struct;
7 8
8static __always_inline struct task_struct * get_current(void) 9static __always_inline struct task_struct *get_current(void)
9{ 10{
10 return current_thread_info()->task; 11 return read_pda(pcurrent);
11} 12}
12 13
13#define current get_current() 14#define current get_current()
diff --git a/include/asm-i386/delay.h b/include/asm-i386/delay.h
index b1c7650dc7b..32d6678d0bb 100644
--- a/include/asm-i386/delay.h
+++ b/include/asm-i386/delay.h
@@ -7,6 +7,7 @@
7 * Delay routines calling functions in arch/i386/lib/delay.c 7 * Delay routines calling functions in arch/i386/lib/delay.c
8 */ 8 */
9 9
10/* Undefined functions to get compile-time errors */
10extern void __bad_udelay(void); 11extern void __bad_udelay(void);
11extern void __bad_ndelay(void); 12extern void __bad_ndelay(void);
12 13
@@ -15,13 +16,23 @@ extern void __ndelay(unsigned long nsecs);
15extern void __const_udelay(unsigned long usecs); 16extern void __const_udelay(unsigned long usecs);
16extern void __delay(unsigned long loops); 17extern void __delay(unsigned long loops);
17 18
19#if defined(CONFIG_PARAVIRT) && !defined(USE_REAL_TIME_DELAY)
20#define udelay(n) paravirt_ops.const_udelay((n) * 0x10c7ul)
21
22#define ndelay(n) paravirt_ops.const_udelay((n) * 5ul)
23
24#else /* !PARAVIRT || USE_REAL_TIME_DELAY */
25
26/* 0x10c7 is 2**32 / 1000000 (rounded up) */
18#define udelay(n) (__builtin_constant_p(n) ? \ 27#define udelay(n) (__builtin_constant_p(n) ? \
19 ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \ 28 ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
20 __udelay(n)) 29 __udelay(n))
21 30
31/* 0x5 is 2**32 / 1000000000 (rounded up) */
22#define ndelay(n) (__builtin_constant_p(n) ? \ 32#define ndelay(n) (__builtin_constant_p(n) ? \
23 ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ 33 ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
24 __ndelay(n)) 34 __ndelay(n))
35#endif
25 36
26void use_tsc_delay(void); 37void use_tsc_delay(void);
27 38
diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h
index 5874ef119ff..f398cc45644 100644
--- a/include/asm-i386/desc.h
+++ b/include/asm-i386/desc.h
@@ -4,8 +4,6 @@
4#include <asm/ldt.h> 4#include <asm/ldt.h>
5#include <asm/segment.h> 5#include <asm/segment.h>
6 6
7#define CPU_16BIT_STACK_SIZE 1024
8
9#ifndef __ASSEMBLY__ 7#ifndef __ASSEMBLY__
10 8
11#include <linux/preempt.h> 9#include <linux/preempt.h>
@@ -16,8 +14,6 @@
16 14
17extern struct desc_struct cpu_gdt_table[GDT_ENTRIES]; 15extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
18 16
19DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
20
21struct Xgt_desc_struct { 17struct Xgt_desc_struct {
22 unsigned short size; 18 unsigned short size;
23 unsigned long address __attribute__((packed)); 19 unsigned long address __attribute__((packed));
@@ -33,11 +29,6 @@ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
33 return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address; 29 return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address;
34} 30}
35 31
36/*
37 * This is the ldt that every process will get unless we need
38 * something other than this.
39 */
40extern struct desc_struct default_ldt[];
41extern struct desc_struct idt_table[]; 32extern struct desc_struct idt_table[];
42extern void set_intr_gate(unsigned int irq, void * addr); 33extern void set_intr_gate(unsigned int irq, void * addr);
43 34
@@ -64,8 +55,10 @@ static inline void pack_gate(__u32 *a, __u32 *b,
64#define DESCTYPE_DPL3 0x60 /* DPL-3 */ 55#define DESCTYPE_DPL3 0x60 /* DPL-3 */
65#define DESCTYPE_S 0x10 /* !system */ 56#define DESCTYPE_S 0x10 /* !system */
66 57
58#ifdef CONFIG_PARAVIRT
59#include <asm/paravirt.h>
60#else
67#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8)) 61#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
68#define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8))
69 62
70#define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr)) 63#define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr))
71#define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr)) 64#define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr))
@@ -88,6 +81,10 @@ static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
88#undef C 81#undef C
89} 82}
90 83
84#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
85#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
86#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
87
91static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entry_b) 88static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entry_b)
92{ 89{
93 __u32 *lp = (__u32 *)((char *)dt + entry*8); 90 __u32 *lp = (__u32 *)((char *)dt + entry*8);
@@ -95,9 +92,25 @@ static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entr
95 *(lp+1) = entry_b; 92 *(lp+1) = entry_b;
96} 93}
97 94
98#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) 95#define set_ldt native_set_ldt
99#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) 96#endif /* CONFIG_PARAVIRT */
100#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) 97
98static inline fastcall void native_set_ldt(const void *addr,
99 unsigned int entries)
100{
101 if (likely(entries == 0))
102 __asm__ __volatile__("lldt %w0"::"q" (0));
103 else {
104 unsigned cpu = smp_processor_id();
105 __u32 a, b;
106
107 pack_descriptor(&a, &b, (unsigned long)addr,
108 entries * sizeof(struct desc_struct) - 1,
109 DESCTYPE_LDT, 0);
110 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b);
111 __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
112 }
113}
101 114
102static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg) 115static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg)
103{ 116{
@@ -115,14 +128,6 @@ static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, const vo
115 write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b); 128 write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b);
116} 129}
117 130
118static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int entries)
119{
120 __u32 a, b;
121 pack_descriptor(&a, &b, (unsigned long)addr,
122 entries * sizeof(struct desc_struct) - 1,
123 DESCTYPE_LDT, 0);
124 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b);
125}
126 131
127#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr) 132#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
128 133
@@ -153,35 +158,22 @@ static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int entri
153 158
154static inline void clear_LDT(void) 159static inline void clear_LDT(void)
155{ 160{
156 int cpu = get_cpu(); 161 set_ldt(NULL, 0);
157
158 set_ldt_desc(cpu, &default_ldt[0], 5);
159 load_LDT_desc();
160 put_cpu();
161} 162}
162 163
163/* 164/*
164 * load one particular LDT into the current CPU 165 * load one particular LDT into the current CPU
165 */ 166 */
166static inline void load_LDT_nolock(mm_context_t *pc, int cpu) 167static inline void load_LDT_nolock(mm_context_t *pc)
167{ 168{
168 void *segments = pc->ldt; 169 set_ldt(pc->ldt, pc->size);
169 int count = pc->size;
170
171 if (likely(!count)) {
172 segments = &default_ldt[0];
173 count = 5;
174 }
175
176 set_ldt_desc(cpu, segments, count);
177 load_LDT_desc();
178} 170}
179 171
180static inline void load_LDT(mm_context_t *pc) 172static inline void load_LDT(mm_context_t *pc)
181{ 173{
182 int cpu = get_cpu(); 174 preempt_disable();
183 load_LDT_nolock(pc, cpu); 175 load_LDT_nolock(pc);
184 put_cpu(); 176 preempt_enable();
185} 177}
186 178
187static inline unsigned long get_desc_base(unsigned long *desc) 179static inline unsigned long get_desc_base(unsigned long *desc)
@@ -193,6 +185,29 @@ static inline unsigned long get_desc_base(unsigned long *desc)
193 return base; 185 return base;
194} 186}
195 187
188#else /* __ASSEMBLY__ */
189
190/*
191 * GET_DESC_BASE reads the descriptor base of the specified segment.
192 *
193 * Args:
194 * idx - descriptor index
195 * gdt - GDT pointer
196 * base - 32bit register to which the base will be written
197 * lo_w - lo word of the "base" register
198 * lo_b - lo byte of the "base" register
199 * hi_b - hi byte of the low word of the "base" register
200 *
201 * Example:
202 * GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah)
203 * Will read the base address of GDT_ENTRY_ESPFIX_SS and put it into %eax.
204 */
205#define GET_DESC_BASE(idx, gdt, base, lo_w, lo_b, hi_b) \
206 movb idx*8+4(gdt), lo_b; \
207 movb idx*8+7(gdt), hi_b; \
208 shll $16, base; \
209 movw idx*8+2(gdt), lo_w;
210
196#endif /* !__ASSEMBLY__ */ 211#endif /* !__ASSEMBLY__ */
197 212
198#endif 213#endif
diff --git a/include/asm-i386/dma-mapping.h b/include/asm-i386/dma-mapping.h
index 81999a3ebe7..183eebeebbd 100644
--- a/include/asm-i386/dma-mapping.h
+++ b/include/asm-i386/dma-mapping.h
@@ -156,10 +156,10 @@ dma_get_cache_alignment(void)
156 return (1 << INTERNODE_CACHE_SHIFT); 156 return (1 << INTERNODE_CACHE_SHIFT);
157} 157}
158 158
159#define dma_is_consistent(d) (1) 159#define dma_is_consistent(d, h) (1)
160 160
161static inline void 161static inline void
162dma_cache_sync(void *vaddr, size_t size, 162dma_cache_sync(struct device *dev, void *vaddr, size_t size,
163 enum dma_data_direction direction) 163 enum dma_data_direction direction)
164{ 164{
165 flush_write_buffers(); 165 flush_write_buffers();
diff --git a/include/asm-i386/e820.h b/include/asm-i386/e820.h
index f7514fb6e8e..395077aba58 100644
--- a/include/asm-i386/e820.h
+++ b/include/asm-i386/e820.h
@@ -38,6 +38,11 @@ extern struct e820map e820;
38 38
39extern int e820_all_mapped(unsigned long start, unsigned long end, 39extern int e820_all_mapped(unsigned long start, unsigned long end,
40 unsigned type); 40 unsigned type);
41extern void find_max_pfn(void);
42extern void register_bootmem_low_pages(unsigned long max_low_pfn);
43extern void register_memory(void);
44extern void limit_regions(unsigned long long size);
45extern void print_memory_map(char *who);
41 46
42#endif/*!__ASSEMBLY__*/ 47#endif/*!__ASSEMBLY__*/
43 48
diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h
index 3a05436f31c..45d21a0c95b 100644
--- a/include/asm-i386/elf.h
+++ b/include/asm-i386/elf.h
@@ -91,7 +91,7 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
91 pr_reg[7] = regs->xds; \ 91 pr_reg[7] = regs->xds; \
92 pr_reg[8] = regs->xes; \ 92 pr_reg[8] = regs->xes; \
93 savesegment(fs,pr_reg[9]); \ 93 savesegment(fs,pr_reg[9]); \
94 savesegment(gs,pr_reg[10]); \ 94 pr_reg[10] = regs->xgs; \
95 pr_reg[11] = regs->orig_eax; \ 95 pr_reg[11] = regs->orig_eax; \
96 pr_reg[12] = regs->eip; \ 96 pr_reg[12] = regs->eip; \
97 pr_reg[13] = regs->xcs; \ 97 pr_reg[13] = regs->xcs; \
diff --git a/include/asm-i386/futex.h b/include/asm-i386/futex.h
index 946d97cfea2..438ef0ec710 100644
--- a/include/asm-i386/futex.h
+++ b/include/asm-i386/futex.h
@@ -56,7 +56,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
56 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) 56 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
57 return -EFAULT; 57 return -EFAULT;
58 58
59 inc_preempt_count(); 59 pagefault_disable();
60 60
61 if (op == FUTEX_OP_SET) 61 if (op == FUTEX_OP_SET)
62 __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); 62 __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
@@ -88,7 +88,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
88 } 88 }
89 } 89 }
90 90
91 dec_preempt_count(); 91 pagefault_enable();
92 92
93 if (!ret) { 93 if (!ret) {
94 switch (cmp) { 94 switch (cmp) {
diff --git a/include/asm-i386/genapic.h b/include/asm-i386/genapic.h
index 8ffbb0f0745..fd2be593b06 100644
--- a/include/asm-i386/genapic.h
+++ b/include/asm-i386/genapic.h
@@ -122,6 +122,6 @@ struct genapic {
122 APICFUNC(phys_pkg_id) \ 122 APICFUNC(phys_pkg_id) \
123 } 123 }
124 124
125extern struct genapic *genapic; 125extern struct genapic *genapic, apic_default;
126 126
127#endif 127#endif
diff --git a/include/asm-i386/i387.h b/include/asm-i386/i387.h
index bc1d6edae1e..434936c732d 100644
--- a/include/asm-i386/i387.h
+++ b/include/asm-i386/i387.h
@@ -76,7 +76,9 @@ static inline void __save_init_fpu( struct task_struct *tsk )
76 76
77#define __unlazy_fpu( tsk ) do { \ 77#define __unlazy_fpu( tsk ) do { \
78 if (task_thread_info(tsk)->status & TS_USEDFPU) \ 78 if (task_thread_info(tsk)->status & TS_USEDFPU) \
79 save_init_fpu( tsk ); \ 79 save_init_fpu( tsk ); \
80 else \
81 tsk->fpu_counter = 0; \
80} while (0) 82} while (0)
81 83
82#define __clear_fpu( tsk ) \ 84#define __clear_fpu( tsk ) \
@@ -118,6 +120,7 @@ static inline void save_init_fpu( struct task_struct *tsk )
118extern unsigned short get_fpu_cwd( struct task_struct *tsk ); 120extern unsigned short get_fpu_cwd( struct task_struct *tsk );
119extern unsigned short get_fpu_swd( struct task_struct *tsk ); 121extern unsigned short get_fpu_swd( struct task_struct *tsk );
120extern unsigned short get_fpu_mxcsr( struct task_struct *tsk ); 122extern unsigned short get_fpu_mxcsr( struct task_struct *tsk );
123extern asmlinkage void math_state_restore(void);
121 124
122/* 125/*
123 * Signal frame handlers... 126 * Signal frame handlers...
diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h
index 68df0dc3ab8..86ff5e83be2 100644
--- a/include/asm-i386/io.h
+++ b/include/asm-i386/io.h
@@ -256,11 +256,11 @@ static inline void flush_write_buffers(void)
256 256
257#endif /* __KERNEL__ */ 257#endif /* __KERNEL__ */
258 258
259#ifdef SLOW_IO_BY_JUMPING 259#if defined(CONFIG_PARAVIRT)
260#define __SLOW_DOWN_IO "jmp 1f; 1: jmp 1f; 1:" 260#include <asm/paravirt.h>
261#else 261#else
262
262#define __SLOW_DOWN_IO "outb %%al,$0x80;" 263#define __SLOW_DOWN_IO "outb %%al,$0x80;"
263#endif
264 264
265static inline void slow_down_io(void) { 265static inline void slow_down_io(void) {
266 __asm__ __volatile__( 266 __asm__ __volatile__(
@@ -271,6 +271,8 @@ static inline void slow_down_io(void) {
271 : : ); 271 : : );
272} 272}
273 273
274#endif
275
274#ifdef CONFIG_X86_NUMAQ 276#ifdef CONFIG_X86_NUMAQ
275extern void *xquad_portio; /* Where the IO area was mapped */ 277extern void *xquad_portio; /* Where the IO area was mapped */
276#define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port) 278#define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port)
diff --git a/include/asm-i386/irq.h b/include/asm-i386/irq.h
index 331726b4112..11761cdaae1 100644
--- a/include/asm-i386/irq.h
+++ b/include/asm-i386/irq.h
@@ -37,8 +37,13 @@ static __inline__ int irq_canonicalize(int irq)
37extern int irqbalance_disable(char *str); 37extern int irqbalance_disable(char *str);
38#endif 38#endif
39 39
40extern void quirk_intel_irqbalance(void);
41
40#ifdef CONFIG_HOTPLUG_CPU 42#ifdef CONFIG_HOTPLUG_CPU
41extern void fixup_irqs(cpumask_t map); 43extern void fixup_irqs(cpumask_t map);
42#endif 44#endif
43 45
46void init_IRQ(void);
47void __init native_init_IRQ(void);
48
44#endif /* _ASM_IRQ_H */ 49#endif /* _ASM_IRQ_H */
diff --git a/include/asm-i386/irq_regs.h b/include/asm-i386/irq_regs.h
index 3dd9c0b7027..a1b3f7f594a 100644
--- a/include/asm-i386/irq_regs.h
+++ b/include/asm-i386/irq_regs.h
@@ -1 +1,27 @@
1#include <asm-generic/irq_regs.h> 1/*
2 * Per-cpu current frame pointer - the location of the last exception frame on
3 * the stack, stored in the PDA.
4 *
5 * Jeremy Fitzhardinge <jeremy@goop.org>
6 */
7#ifndef _ASM_I386_IRQ_REGS_H
8#define _ASM_I386_IRQ_REGS_H
9
10#include <asm/pda.h>
11
12static inline struct pt_regs *get_irq_regs(void)
13{
14 return read_pda(irq_regs);
15}
16
17static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
18{
19 struct pt_regs *old_regs;
20
21 old_regs = read_pda(irq_regs);
22 write_pda(irq_regs, new_regs);
23
24 return old_regs;
25}
26
27#endif /* _ASM_I386_IRQ_REGS_H */
diff --git a/include/asm-i386/irqflags.h b/include/asm-i386/irqflags.h
index e1bdb97c07f..17b18cf4fe9 100644
--- a/include/asm-i386/irqflags.h
+++ b/include/asm-i386/irqflags.h
@@ -10,6 +10,9 @@
10#ifndef _ASM_IRQFLAGS_H 10#ifndef _ASM_IRQFLAGS_H
11#define _ASM_IRQFLAGS_H 11#define _ASM_IRQFLAGS_H
12 12
13#ifdef CONFIG_PARAVIRT
14#include <asm/paravirt.h>
15#else
13#ifndef __ASSEMBLY__ 16#ifndef __ASSEMBLY__
14 17
15static inline unsigned long __raw_local_save_flags(void) 18static inline unsigned long __raw_local_save_flags(void)
@@ -25,9 +28,6 @@ static inline unsigned long __raw_local_save_flags(void)
25 return flags; 28 return flags;
26} 29}
27 30
28#define raw_local_save_flags(flags) \
29 do { (flags) = __raw_local_save_flags(); } while (0)
30
31static inline void raw_local_irq_restore(unsigned long flags) 31static inline void raw_local_irq_restore(unsigned long flags)
32{ 32{
33 __asm__ __volatile__( 33 __asm__ __volatile__(
@@ -66,18 +66,6 @@ static inline void halt(void)
66 __asm__ __volatile__("hlt": : :"memory"); 66 __asm__ __volatile__("hlt": : :"memory");
67} 67}
68 68
69static inline int raw_irqs_disabled_flags(unsigned long flags)
70{
71 return !(flags & (1 << 9));
72}
73
74static inline int raw_irqs_disabled(void)
75{
76 unsigned long flags = __raw_local_save_flags();
77
78 return raw_irqs_disabled_flags(flags);
79}
80
81/* 69/*
82 * For spinlocks, etc: 70 * For spinlocks, etc:
83 */ 71 */
@@ -90,9 +78,33 @@ static inline unsigned long __raw_local_irq_save(void)
90 return flags; 78 return flags;
91} 79}
92 80
81#else
82#define DISABLE_INTERRUPTS(clobbers) cli
83#define ENABLE_INTERRUPTS(clobbers) sti
84#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
85#define INTERRUPT_RETURN iret
86#define GET_CR0_INTO_EAX movl %cr0, %eax
87#endif /* __ASSEMBLY__ */
88#endif /* CONFIG_PARAVIRT */
89
90#ifndef __ASSEMBLY__
91#define raw_local_save_flags(flags) \
92 do { (flags) = __raw_local_save_flags(); } while (0)
93
93#define raw_local_irq_save(flags) \ 94#define raw_local_irq_save(flags) \
94 do { (flags) = __raw_local_irq_save(); } while (0) 95 do { (flags) = __raw_local_irq_save(); } while (0)
95 96
97static inline int raw_irqs_disabled_flags(unsigned long flags)
98{
99 return !(flags & (1 << 9));
100}
101
102static inline int raw_irqs_disabled(void)
103{
104 unsigned long flags = __raw_local_save_flags();
105
106 return raw_irqs_disabled_flags(flags);
107}
96#endif /* __ASSEMBLY__ */ 108#endif /* __ASSEMBLY__ */
97 109
98/* 110/*
diff --git a/include/asm-i386/mach-default/setup_arch.h b/include/asm-i386/mach-default/setup_arch.h
index fb42099e7bd..605e3ccb991 100644
--- a/include/asm-i386/mach-default/setup_arch.h
+++ b/include/asm-i386/mach-default/setup_arch.h
@@ -2,4 +2,6 @@
2 2
3/* no action for generic */ 3/* no action for generic */
4 4
5#ifndef ARCH_SETUP
5#define ARCH_SETUP 6#define ARCH_SETUP
7#endif
diff --git a/include/asm-i386/math_emu.h b/include/asm-i386/math_emu.h
index 697673b555c..a4b0aa3320e 100644
--- a/include/asm-i386/math_emu.h
+++ b/include/asm-i386/math_emu.h
@@ -21,6 +21,7 @@ struct info {
21 long ___eax; 21 long ___eax;
22 long ___ds; 22 long ___ds;
23 long ___es; 23 long ___es;
24 long ___fs;
24 long ___orig_eax; 25 long ___orig_eax;
25 long ___eip; 26 long ___eip;
26 long ___cs; 27 long ___cs;
diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h
index 62b7bf18409..68ff102d6f5 100644
--- a/include/asm-i386/mmu_context.h
+++ b/include/asm-i386/mmu_context.h
@@ -44,7 +44,7 @@ static inline void switch_mm(struct mm_struct *prev,
44 * load the LDT, if the LDT is different: 44 * load the LDT, if the LDT is different:
45 */ 45 */
46 if (unlikely(prev->context.ldt != next->context.ldt)) 46 if (unlikely(prev->context.ldt != next->context.ldt))
47 load_LDT_nolock(&next->context, cpu); 47 load_LDT_nolock(&next->context);
48 } 48 }
49#ifdef CONFIG_SMP 49#ifdef CONFIG_SMP
50 else { 50 else {
@@ -56,14 +56,14 @@ static inline void switch_mm(struct mm_struct *prev,
56 * tlb flush IPI delivery. We must reload %cr3. 56 * tlb flush IPI delivery. We must reload %cr3.
57 */ 57 */
58 load_cr3(next->pgd); 58 load_cr3(next->pgd);
59 load_LDT_nolock(&next->context, cpu); 59 load_LDT_nolock(&next->context);
60 } 60 }
61 } 61 }
62#endif 62#endif
63} 63}
64 64
65#define deactivate_mm(tsk, mm) \ 65#define deactivate_mm(tsk, mm) \
66 asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0)) 66 asm("movl %0,%%fs": :"r" (0));
67 67
68#define activate_mm(prev, next) \ 68#define activate_mm(prev, next) \
69 switch_mm((prev),(next),NULL) 69 switch_mm((prev),(next),NULL)
diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h
index 61b07332200..3503ad66945 100644
--- a/include/asm-i386/mmzone.h
+++ b/include/asm-i386/mmzone.h
@@ -120,13 +120,26 @@ static inline int pfn_valid(int pfn)
120 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) 120 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
121#define alloc_bootmem_low_pages(x) \ 121#define alloc_bootmem_low_pages(x) \
122 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0) 122 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
123#define alloc_bootmem_node(ignore, x) \ 123#define alloc_bootmem_node(pgdat, x) \
124 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 124({ \
125#define alloc_bootmem_pages_node(ignore, x) \ 125 struct pglist_data __attribute__ ((unused)) \
126 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) 126 *__alloc_bootmem_node__pgdat = (pgdat); \
127#define alloc_bootmem_low_pages_node(ignore, x) \ 127 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, \
128 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0) 128 __pa(MAX_DMA_ADDRESS)); \
129 129})
130#define alloc_bootmem_pages_node(pgdat, x) \
131({ \
132 struct pglist_data __attribute__ ((unused)) \
133 *__alloc_bootmem_node__pgdat = (pgdat); \
134 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, \
135 __pa(MAX_DMA_ADDRESS)) \
136})
137#define alloc_bootmem_low_pages_node(pgdat, x) \
138({ \
139 struct pglist_data __attribute__ ((unused)) \
140 *__alloc_bootmem_node__pgdat = (pgdat); \
141 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0); \
142})
130#endif /* CONFIG_NEED_MULTIPLE_NODES */ 143#endif /* CONFIG_NEED_MULTIPLE_NODES */
131 144
132#endif /* _ASM_MMZONE_H_ */ 145#endif /* _ASM_MMZONE_H_ */
diff --git a/include/asm-i386/module.h b/include/asm-i386/module.h
index 424661d25bd..02f8f541cbe 100644
--- a/include/asm-i386/module.h
+++ b/include/asm-i386/module.h
@@ -20,6 +20,8 @@ struct mod_arch_specific
20#define MODULE_PROC_FAMILY "586TSC " 20#define MODULE_PROC_FAMILY "586TSC "
21#elif defined CONFIG_M586MMX 21#elif defined CONFIG_M586MMX
22#define MODULE_PROC_FAMILY "586MMX " 22#define MODULE_PROC_FAMILY "586MMX "
23#elif defined CONFIG_MCORE2
24#define MODULE_PROC_FAMILY "CORE2 "
23#elif defined CONFIG_M686 25#elif defined CONFIG_M686
24#define MODULE_PROC_FAMILY "686 " 26#define MODULE_PROC_FAMILY "686 "
25#elif defined CONFIG_MPENTIUMII 27#elif defined CONFIG_MPENTIUMII
@@ -60,18 +62,12 @@ struct mod_arch_specific
60#error unknown processor family 62#error unknown processor family
61#endif 63#endif
62 64
63#ifdef CONFIG_REGPARM
64#define MODULE_REGPARM "REGPARM "
65#else
66#define MODULE_REGPARM ""
67#endif
68
69#ifdef CONFIG_4KSTACKS 65#ifdef CONFIG_4KSTACKS
70#define MODULE_STACKSIZE "4KSTACKS " 66#define MODULE_STACKSIZE "4KSTACKS "
71#else 67#else
72#define MODULE_STACKSIZE "" 68#define MODULE_STACKSIZE ""
73#endif 69#endif
74 70
75#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_REGPARM MODULE_STACKSIZE 71#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
76 72
77#endif /* _ASM_I386_MODULE_H */ 73#endif /* _ASM_I386_MODULE_H */
diff --git a/include/asm-i386/mpspec_def.h b/include/asm-i386/mpspec_def.h
index 76feedf85a8..13bafb16e7a 100644
--- a/include/asm-i386/mpspec_def.h
+++ b/include/asm-i386/mpspec_def.h
@@ -97,7 +97,6 @@ struct mpc_config_bus
97#define BUSTYPE_TC "TC" 97#define BUSTYPE_TC "TC"
98#define BUSTYPE_VME "VME" 98#define BUSTYPE_VME "VME"
99#define BUSTYPE_XPRESS "XPRESS" 99#define BUSTYPE_XPRESS "XPRESS"
100#define BUSTYPE_NEC98 "NEC98"
101 100
102struct mpc_config_ioapic 101struct mpc_config_ioapic
103{ 102{
@@ -182,7 +181,6 @@ enum mp_bustype {
182 MP_BUS_EISA, 181 MP_BUS_EISA,
183 MP_BUS_PCI, 182 MP_BUS_PCI,
184 MP_BUS_MCA, 183 MP_BUS_MCA,
185 MP_BUS_NEC98
186}; 184};
187#endif 185#endif
188 186
diff --git a/include/asm-i386/msr.h b/include/asm-i386/msr.h
index 62b76cd9695..5679d499307 100644
--- a/include/asm-i386/msr.h
+++ b/include/asm-i386/msr.h
@@ -1,6 +1,10 @@
1#ifndef __ASM_MSR_H 1#ifndef __ASM_MSR_H
2#define __ASM_MSR_H 2#define __ASM_MSR_H
3 3
4#ifdef CONFIG_PARAVIRT
5#include <asm/paravirt.h>
6#else
7
4/* 8/*
5 * Access to machine-specific registers (available on 586 and better only) 9 * Access to machine-specific registers (available on 586 and better only)
6 * Note: the rd* operations modify the parameters directly (without using 10 * Note: the rd* operations modify the parameters directly (without using
@@ -77,6 +81,7 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val)
77 __asm__ __volatile__("rdpmc" \ 81 __asm__ __volatile__("rdpmc" \
78 : "=a" (low), "=d" (high) \ 82 : "=a" (low), "=d" (high) \
79 : "c" (counter)) 83 : "c" (counter))
84#endif /* !CONFIG_PARAVIRT */
80 85
81/* symbolic names for some interesting MSRs */ 86/* symbolic names for some interesting MSRs */
82/* Intel defined MSRs. */ 87/* Intel defined MSRs. */
@@ -141,6 +146,10 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val)
141#define MSR_IA32_MC0_ADDR 0x402 146#define MSR_IA32_MC0_ADDR 0x402
142#define MSR_IA32_MC0_MISC 0x403 147#define MSR_IA32_MC0_MISC 0x403
143 148
149#define MSR_IA32_PEBS_ENABLE 0x3f1
150#define MSR_IA32_DS_AREA 0x600
151#define MSR_IA32_PERF_CAPABILITIES 0x345
152
144/* Pentium IV performance counter MSRs */ 153/* Pentium IV performance counter MSRs */
145#define MSR_P4_BPU_PERFCTR0 0x300 154#define MSR_P4_BPU_PERFCTR0 0x300
146#define MSR_P4_BPU_PERFCTR1 0x301 155#define MSR_P4_BPU_PERFCTR1 0x301
@@ -284,4 +293,13 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val)
284#define MSR_TMTA_LRTI_READOUT 0x80868018 293#define MSR_TMTA_LRTI_READOUT 0x80868018
285#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a 294#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a
286 295
296/* Intel Core-based CPU performance counters */
297#define MSR_CORE_PERF_FIXED_CTR0 0x309
298#define MSR_CORE_PERF_FIXED_CTR1 0x30a
299#define MSR_CORE_PERF_FIXED_CTR2 0x30b
300#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d
301#define MSR_CORE_PERF_GLOBAL_STATUS 0x38e
302#define MSR_CORE_PERF_GLOBAL_CTRL 0x38f
303#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390
304
287#endif /* __ASM_MSR_H */ 305#endif /* __ASM_MSR_H */
diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h
index 269d315719c..b04333ea6f3 100644
--- a/include/asm-i386/nmi.h
+++ b/include/asm-i386/nmi.h
@@ -5,6 +5,9 @@
5#define ASM_NMI_H 5#define ASM_NMI_H
6 6
7#include <linux/pm.h> 7#include <linux/pm.h>
8#include <asm/irq.h>
9
10#ifdef ARCH_HAS_NMI_WATCHDOG
8 11
9/** 12/**
10 * do_nmi_callback 13 * do_nmi_callback
@@ -42,4 +45,9 @@ extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
42 void __user *, size_t *, loff_t *); 45 void __user *, size_t *, loff_t *);
43extern int unknown_nmi_panic; 46extern int unknown_nmi_panic;
44 47
48void __trigger_all_cpu_backtrace(void);
49#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
50
51#endif
52
45#endif /* ASM_NMI_H */ 53#endif /* ASM_NMI_H */
diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h
index f5bf544c729..fd3f64ace24 100644
--- a/include/asm-i386/page.h
+++ b/include/asm-i386/page.h
@@ -52,6 +52,7 @@ typedef struct { unsigned long long pgprot; } pgprot_t;
52#define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) 52#define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
53#define __pmd(x) ((pmd_t) { (x) } ) 53#define __pmd(x) ((pmd_t) { (x) } )
54#define HPAGE_SHIFT 21 54#define HPAGE_SHIFT 21
55#include <asm-generic/pgtable-nopud.h>
55#else 56#else
56typedef struct { unsigned long pte_low; } pte_t; 57typedef struct { unsigned long pte_low; } pte_t;
57typedef struct { unsigned long pgd; } pgd_t; 58typedef struct { unsigned long pgd; } pgd_t;
@@ -59,6 +60,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
59#define boot_pte_t pte_t /* or would you rather have a typedef */ 60#define boot_pte_t pte_t /* or would you rather have a typedef */
60#define pte_val(x) ((x).pte_low) 61#define pte_val(x) ((x).pte_low)
61#define HPAGE_SHIFT 22 62#define HPAGE_SHIFT 22
63#include <asm-generic/pgtable-nopmd.h>
62#endif 64#endif
63#define PTE_MASK PAGE_MASK 65#define PTE_MASK PAGE_MASK
64 66
@@ -112,18 +114,18 @@ extern int page_is_ram(unsigned long pagenr);
112 114
113#ifdef __ASSEMBLY__ 115#ifdef __ASSEMBLY__
114#define __PAGE_OFFSET CONFIG_PAGE_OFFSET 116#define __PAGE_OFFSET CONFIG_PAGE_OFFSET
115#define __PHYSICAL_START CONFIG_PHYSICAL_START
116#else 117#else
117#define __PAGE_OFFSET ((unsigned long)CONFIG_PAGE_OFFSET) 118#define __PAGE_OFFSET ((unsigned long)CONFIG_PAGE_OFFSET)
118#define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START)
119#endif 119#endif
120#define __KERNEL_START (__PAGE_OFFSET + __PHYSICAL_START)
121 120
122 121
123#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) 122#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
124#define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE) 123#define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
125#define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE) 124#define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE)
126#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) 125#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
126/* __pa_symbol should be used for C visible symbols.
127 This seems to be the official gcc blessed way to do such arithmetic. */
128#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x),0))
127#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) 129#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
128#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 130#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
129#ifdef CONFIG_FLATMEM 131#ifdef CONFIG_FLATMEM
diff --git a/include/asm-i386/param.h b/include/asm-i386/param.h
index 745dc5bd0fb..21b32466fcd 100644
--- a/include/asm-i386/param.h
+++ b/include/asm-i386/param.h
@@ -18,6 +18,5 @@
18#endif 18#endif
19 19
20#define MAXHOSTNAMELEN 64 /* max length of hostname */ 20#define MAXHOSTNAMELEN 64 /* max length of hostname */
21#define COMMAND_LINE_SIZE 256
22 21
23#endif 22#endif
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h
new file mode 100644
index 00000000000..9f06265065f
--- /dev/null
+++ b/include/asm-i386/paravirt.h
@@ -0,0 +1,505 @@
1#ifndef __ASM_PARAVIRT_H
2#define __ASM_PARAVIRT_H
3/* Various instructions on x86 need to be replaced for
4 * para-virtualization: those hooks are defined here. */
5#include <linux/linkage.h>
6#include <linux/stringify.h>
7#include <asm/page.h>
8
9#ifdef CONFIG_PARAVIRT
10/* These are the most performance critical ops, so we want to be able to patch
11 * callers */
12#define PARAVIRT_IRQ_DISABLE 0
13#define PARAVIRT_IRQ_ENABLE 1
14#define PARAVIRT_RESTORE_FLAGS 2
15#define PARAVIRT_SAVE_FLAGS 3
16#define PARAVIRT_SAVE_FLAGS_IRQ_DISABLE 4
17#define PARAVIRT_INTERRUPT_RETURN 5
18#define PARAVIRT_STI_SYSEXIT 6
19
20/* Bitmask of what can be clobbered: usually at least eax. */
21#define CLBR_NONE 0x0
22#define CLBR_EAX 0x1
23#define CLBR_ECX 0x2
24#define CLBR_EDX 0x4
25#define CLBR_ANY 0x7
26
27#ifndef __ASSEMBLY__
28struct thread_struct;
29struct Xgt_desc_struct;
30struct tss_struct;
31struct mm_struct;
32struct paravirt_ops
33{
34 unsigned int kernel_rpl;
35 int paravirt_enabled;
36 const char *name;
37
38 /*
39 * Patch may replace one of the defined code sequences with arbitrary
40 * code, subject to the same register constraints. This generally
41 * means the code is not free to clobber any registers other than EAX.
42 * The patch function should return the number of bytes of code
43 * generated, as we nop pad the rest in generic code.
44 */
45 unsigned (*patch)(u8 type, u16 clobber, void *firstinsn, unsigned len);
46
47 void (*arch_setup)(void);
48 char *(*memory_setup)(void);
49 void (*init_IRQ)(void);
50
51 void (*banner)(void);
52
53 unsigned long (*get_wallclock)(void);
54 int (*set_wallclock)(unsigned long);
55 void (*time_init)(void);
56
57 /* All the function pointers here are declared as "fastcall"
58 so that we get a specific register-based calling
59 convention. This makes it easier to implement inline
60 assembler replacements. */
61
62 void (fastcall *cpuid)(unsigned int *eax, unsigned int *ebx,
63 unsigned int *ecx, unsigned int *edx);
64
65 unsigned long (fastcall *get_debugreg)(int regno);
66 void (fastcall *set_debugreg)(int regno, unsigned long value);
67
68 void (fastcall *clts)(void);
69
70 unsigned long (fastcall *read_cr0)(void);
71 void (fastcall *write_cr0)(unsigned long);
72
73 unsigned long (fastcall *read_cr2)(void);
74 void (fastcall *write_cr2)(unsigned long);
75
76 unsigned long (fastcall *read_cr3)(void);
77 void (fastcall *write_cr3)(unsigned long);
78
79 unsigned long (fastcall *read_cr4_safe)(void);
80 unsigned long (fastcall *read_cr4)(void);
81 void (fastcall *write_cr4)(unsigned long);
82
83 unsigned long (fastcall *save_fl)(void);
84 void (fastcall *restore_fl)(unsigned long);
85 void (fastcall *irq_disable)(void);
86 void (fastcall *irq_enable)(void);
87 void (fastcall *safe_halt)(void);
88 void (fastcall *halt)(void);
89 void (fastcall *wbinvd)(void);
90
91 /* err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
92 u64 (fastcall *read_msr)(unsigned int msr, int *err);
93 int (fastcall *write_msr)(unsigned int msr, u64 val);
94
95 u64 (fastcall *read_tsc)(void);
96 u64 (fastcall *read_pmc)(void);
97
98 void (fastcall *load_tr_desc)(void);
99 void (fastcall *load_gdt)(const struct Xgt_desc_struct *);
100 void (fastcall *load_idt)(const struct Xgt_desc_struct *);
101 void (fastcall *store_gdt)(struct Xgt_desc_struct *);
102 void (fastcall *store_idt)(struct Xgt_desc_struct *);
103 void (fastcall *set_ldt)(const void *desc, unsigned entries);
104 unsigned long (fastcall *store_tr)(void);
105 void (fastcall *load_tls)(struct thread_struct *t, unsigned int cpu);
106 void (fastcall *write_ldt_entry)(void *dt, int entrynum,
107 u32 low, u32 high);
108 void (fastcall *write_gdt_entry)(void *dt, int entrynum,
109 u32 low, u32 high);
110 void (fastcall *write_idt_entry)(void *dt, int entrynum,
111 u32 low, u32 high);
112 void (fastcall *load_esp0)(struct tss_struct *tss,
113 struct thread_struct *thread);
114
115 void (fastcall *set_iopl_mask)(unsigned mask);
116
117 void (fastcall *io_delay)(void);
118 void (*const_udelay)(unsigned long loops);
119
120#ifdef CONFIG_X86_LOCAL_APIC
121 void (fastcall *apic_write)(unsigned long reg, unsigned long v);
122 void (fastcall *apic_write_atomic)(unsigned long reg, unsigned long v);
123 unsigned long (fastcall *apic_read)(unsigned long reg);
124#endif
125
126 void (fastcall *flush_tlb_user)(void);
127 void (fastcall *flush_tlb_kernel)(void);
128 void (fastcall *flush_tlb_single)(u32 addr);
129
130 void (fastcall *set_pte)(pte_t *ptep, pte_t pteval);
131 void (fastcall *set_pte_at)(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pteval);
132 void (fastcall *set_pmd)(pmd_t *pmdp, pmd_t pmdval);
133 void (fastcall *pte_update)(struct mm_struct *mm, u32 addr, pte_t *ptep);
134 void (fastcall *pte_update_defer)(struct mm_struct *mm, u32 addr, pte_t *ptep);
135#ifdef CONFIG_X86_PAE
136 void (fastcall *set_pte_atomic)(pte_t *ptep, pte_t pteval);
137 void (fastcall *set_pte_present)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte);
138 void (fastcall *set_pud)(pud_t *pudp, pud_t pudval);
139 void (fastcall *pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
140 void (fastcall *pmd_clear)(pmd_t *pmdp);
141#endif
142
143 /* These two are jmp to, not actually called. */
144 void (fastcall *irq_enable_sysexit)(void);
145 void (fastcall *iret)(void);
146};
147
148/* Mark a paravirt probe function. */
149#define paravirt_probe(fn) \
150 static asmlinkage void (*__paravirtprobe_##fn)(void) __attribute_used__ \
151 __attribute__((__section__(".paravirtprobe"))) = fn
152
153extern struct paravirt_ops paravirt_ops;
154
155#define paravirt_enabled() (paravirt_ops.paravirt_enabled)
156
157static inline void load_esp0(struct tss_struct *tss,
158 struct thread_struct *thread)
159{
160 paravirt_ops.load_esp0(tss, thread);
161}
162
163#define ARCH_SETUP paravirt_ops.arch_setup();
164static inline unsigned long get_wallclock(void)
165{
166 return paravirt_ops.get_wallclock();
167}
168
169static inline int set_wallclock(unsigned long nowtime)
170{
171 return paravirt_ops.set_wallclock(nowtime);
172}
173
174static inline void do_time_init(void)
175{
176 return paravirt_ops.time_init();
177}
178
179/* The paravirtualized CPUID instruction. */
180static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
181 unsigned int *ecx, unsigned int *edx)
182{
183 paravirt_ops.cpuid(eax, ebx, ecx, edx);
184}
185
186/*
187 * These special macros can be used to get or set a debugging register
188 */
189#define get_debugreg(var, reg) var = paravirt_ops.get_debugreg(reg)
190#define set_debugreg(val, reg) paravirt_ops.set_debugreg(reg, val)
191
192#define clts() paravirt_ops.clts()
193
194#define read_cr0() paravirt_ops.read_cr0()
195#define write_cr0(x) paravirt_ops.write_cr0(x)
196
197#define read_cr2() paravirt_ops.read_cr2()
198#define write_cr2(x) paravirt_ops.write_cr2(x)
199
200#define read_cr3() paravirt_ops.read_cr3()
201#define write_cr3(x) paravirt_ops.write_cr3(x)
202
203#define read_cr4() paravirt_ops.read_cr4()
204#define read_cr4_safe(x) paravirt_ops.read_cr4_safe()
205#define write_cr4(x) paravirt_ops.write_cr4(x)
206
207static inline void raw_safe_halt(void)
208{
209 paravirt_ops.safe_halt();
210}
211
212static inline void halt(void)
213{
214 paravirt_ops.safe_halt();
215}
216#define wbinvd() paravirt_ops.wbinvd()
217
218#define get_kernel_rpl() (paravirt_ops.kernel_rpl)
219
220#define rdmsr(msr,val1,val2) do { \
221 int _err; \
222 u64 _l = paravirt_ops.read_msr(msr,&_err); \
223 val1 = (u32)_l; \
224 val2 = _l >> 32; \
225} while(0)
226
227#define wrmsr(msr,val1,val2) do { \
228 u64 _l = ((u64)(val2) << 32) | (val1); \
229 paravirt_ops.write_msr((msr), _l); \
230} while(0)
231
232#define rdmsrl(msr,val) do { \
233 int _err; \
234 val = paravirt_ops.read_msr((msr),&_err); \
235} while(0)
236
237#define wrmsrl(msr,val) (paravirt_ops.write_msr((msr),(val)))
238#define wrmsr_safe(msr,a,b) ({ \
239 u64 _l = ((u64)(b) << 32) | (a); \
240 paravirt_ops.write_msr((msr),_l); \
241})
242
243/* rdmsr with exception handling */
244#define rdmsr_safe(msr,a,b) ({ \
245 int _err; \
246 u64 _l = paravirt_ops.read_msr(msr,&_err); \
247 (*a) = (u32)_l; \
248 (*b) = _l >> 32; \
249 _err; })
250
251#define rdtsc(low,high) do { \
252 u64 _l = paravirt_ops.read_tsc(); \
253 low = (u32)_l; \
254 high = _l >> 32; \
255} while(0)
256
257#define rdtscl(low) do { \
258 u64 _l = paravirt_ops.read_tsc(); \
259 low = (int)_l; \
260} while(0)
261
262#define rdtscll(val) (val = paravirt_ops.read_tsc())
263
264#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
265
266#define rdpmc(counter,low,high) do { \
267 u64 _l = paravirt_ops.read_pmc(); \
268 low = (u32)_l; \
269 high = _l >> 32; \
270} while(0)
271
272#define load_TR_desc() (paravirt_ops.load_tr_desc())
273#define load_gdt(dtr) (paravirt_ops.load_gdt(dtr))
274#define load_idt(dtr) (paravirt_ops.load_idt(dtr))
275#define set_ldt(addr, entries) (paravirt_ops.set_ldt((addr), (entries)))
276#define store_gdt(dtr) (paravirt_ops.store_gdt(dtr))
277#define store_idt(dtr) (paravirt_ops.store_idt(dtr))
278#define store_tr(tr) ((tr) = paravirt_ops.store_tr())
279#define load_TLS(t,cpu) (paravirt_ops.load_tls((t),(cpu)))
280#define write_ldt_entry(dt, entry, low, high) \
281 (paravirt_ops.write_ldt_entry((dt), (entry), (low), (high)))
282#define write_gdt_entry(dt, entry, low, high) \
283 (paravirt_ops.write_gdt_entry((dt), (entry), (low), (high)))
284#define write_idt_entry(dt, entry, low, high) \
285 (paravirt_ops.write_idt_entry((dt), (entry), (low), (high)))
286#define set_iopl_mask(mask) (paravirt_ops.set_iopl_mask(mask))
287
288/* The paravirtualized I/O functions */
289static inline void slow_down_io(void) {
290 paravirt_ops.io_delay();
291#ifdef REALLY_SLOW_IO
292 paravirt_ops.io_delay();
293 paravirt_ops.io_delay();
294 paravirt_ops.io_delay();
295#endif
296}
297
298#ifdef CONFIG_X86_LOCAL_APIC
299/*
300 * Basic functions accessing APICs.
301 */
302static inline void apic_write(unsigned long reg, unsigned long v)
303{
304 paravirt_ops.apic_write(reg,v);
305}
306
307static inline void apic_write_atomic(unsigned long reg, unsigned long v)
308{
309 paravirt_ops.apic_write_atomic(reg,v);
310}
311
312static inline unsigned long apic_read(unsigned long reg)
313{
314 return paravirt_ops.apic_read(reg);
315}
316#endif
317
318
319#define __flush_tlb() paravirt_ops.flush_tlb_user()
320#define __flush_tlb_global() paravirt_ops.flush_tlb_kernel()
321#define __flush_tlb_single(addr) paravirt_ops.flush_tlb_single(addr)
322
323static inline void set_pte(pte_t *ptep, pte_t pteval)
324{
325 paravirt_ops.set_pte(ptep, pteval);
326}
327
328static inline void set_pte_at(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pteval)
329{
330 paravirt_ops.set_pte_at(mm, addr, ptep, pteval);
331}
332
333static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
334{
335 paravirt_ops.set_pmd(pmdp, pmdval);
336}
337
338static inline void pte_update(struct mm_struct *mm, u32 addr, pte_t *ptep)
339{
340 paravirt_ops.pte_update(mm, addr, ptep);
341}
342
343static inline void pte_update_defer(struct mm_struct *mm, u32 addr, pte_t *ptep)
344{
345 paravirt_ops.pte_update_defer(mm, addr, ptep);
346}
347
348#ifdef CONFIG_X86_PAE
349static inline void set_pte_atomic(pte_t *ptep, pte_t pteval)
350{
351 paravirt_ops.set_pte_atomic(ptep, pteval);
352}
353
354static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
355{
356 paravirt_ops.set_pte_present(mm, addr, ptep, pte);
357}
358
359static inline void set_pud(pud_t *pudp, pud_t pudval)
360{
361 paravirt_ops.set_pud(pudp, pudval);
362}
363
364static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
365{
366 paravirt_ops.pte_clear(mm, addr, ptep);
367}
368
369static inline void pmd_clear(pmd_t *pmdp)
370{
371 paravirt_ops.pmd_clear(pmdp);
372}
373#endif
374
375/* These all sit in the .parainstructions section to tell us what to patch. */
376struct paravirt_patch {
377 u8 *instr; /* original instructions */
378 u8 instrtype; /* type of this instruction */
379 u8 len; /* length of original instruction */
380 u16 clobbers; /* what registers you may clobber */
381};
382
383#define paravirt_alt(insn_string, typenum, clobber) \
384 "771:\n\t" insn_string "\n" "772:\n" \
385 ".pushsection .parainstructions,\"a\"\n" \
386 " .long 771b\n" \
387 " .byte " __stringify(typenum) "\n" \
388 " .byte 772b-771b\n" \
389 " .short " __stringify(clobber) "\n" \
390 ".popsection"
391
392static inline unsigned long __raw_local_save_flags(void)
393{
394 unsigned long f;
395
396 __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
397 "call *%1;"
398 "popl %%edx; popl %%ecx",
399 PARAVIRT_SAVE_FLAGS, CLBR_NONE)
400 : "=a"(f): "m"(paravirt_ops.save_fl)
401 : "memory", "cc");
402 return f;
403}
404
405static inline void raw_local_irq_restore(unsigned long f)
406{
407 __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
408 "call *%1;"
409 "popl %%edx; popl %%ecx",
410 PARAVIRT_RESTORE_FLAGS, CLBR_EAX)
411 : "=a"(f) : "m" (paravirt_ops.restore_fl), "0"(f)
412 : "memory", "cc");
413}
414
415static inline void raw_local_irq_disable(void)
416{
417 __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
418 "call *%0;"
419 "popl %%edx; popl %%ecx",
420 PARAVIRT_IRQ_DISABLE, CLBR_EAX)
421 : : "m" (paravirt_ops.irq_disable)
422 : "memory", "eax", "cc");
423}
424
425static inline void raw_local_irq_enable(void)
426{
427 __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
428 "call *%0;"
429 "popl %%edx; popl %%ecx",
430 PARAVIRT_IRQ_ENABLE, CLBR_EAX)
431 : : "m" (paravirt_ops.irq_enable)
432 : "memory", "eax", "cc");
433}
434
435static inline unsigned long __raw_local_irq_save(void)
436{
437 unsigned long f;
438
439 __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
440 "call *%1; pushl %%eax;"
441 "call *%2; popl %%eax;"
442 "popl %%edx; popl %%ecx",
443 PARAVIRT_SAVE_FLAGS_IRQ_DISABLE,
444 CLBR_NONE)
445 : "=a"(f)
446 : "m" (paravirt_ops.save_fl),
447 "m" (paravirt_ops.irq_disable)
448 : "memory", "cc");
449 return f;
450}
451
452#define CLI_STRING paravirt_alt("pushl %%ecx; pushl %%edx;" \
453 "call *paravirt_ops+%c[irq_disable];" \
454 "popl %%edx; popl %%ecx", \
455 PARAVIRT_IRQ_DISABLE, CLBR_EAX)
456
457#define STI_STRING paravirt_alt("pushl %%ecx; pushl %%edx;" \
458 "call *paravirt_ops+%c[irq_enable];" \
459 "popl %%edx; popl %%ecx", \
460 PARAVIRT_IRQ_ENABLE, CLBR_EAX)
461#define CLI_STI_CLOBBERS , "%eax"
462#define CLI_STI_INPUT_ARGS \
463 , \
464 [irq_disable] "i" (offsetof(struct paravirt_ops, irq_disable)), \
465 [irq_enable] "i" (offsetof(struct paravirt_ops, irq_enable))
466
467#else /* __ASSEMBLY__ */
468
469#define PARA_PATCH(ptype, clobbers, ops) \
470771:; \
471 ops; \
472772:; \
473 .pushsection .parainstructions,"a"; \
474 .long 771b; \
475 .byte ptype; \
476 .byte 772b-771b; \
477 .short clobbers; \
478 .popsection
479
480#define INTERRUPT_RETURN \
481 PARA_PATCH(PARAVIRT_INTERRUPT_RETURN, CLBR_ANY, \
482 jmp *%cs:paravirt_ops+PARAVIRT_iret)
483
484#define DISABLE_INTERRUPTS(clobbers) \
485 PARA_PATCH(PARAVIRT_IRQ_DISABLE, clobbers, \
486 pushl %ecx; pushl %edx; \
487 call *paravirt_ops+PARAVIRT_irq_disable; \
488 popl %edx; popl %ecx) \
489
490#define ENABLE_INTERRUPTS(clobbers) \
491 PARA_PATCH(PARAVIRT_IRQ_ENABLE, clobbers, \
492 pushl %ecx; pushl %edx; \
493 call *%cs:paravirt_ops+PARAVIRT_irq_enable; \
494 popl %edx; popl %ecx)
495
496#define ENABLE_INTERRUPTS_SYSEXIT \
497 PARA_PATCH(PARAVIRT_STI_SYSEXIT, CLBR_ANY, \
498 jmp *%cs:paravirt_ops+PARAVIRT_irq_enable_sysexit)
499
500#define GET_CR0_INTO_EAX \
501 call *paravirt_ops+PARAVIRT_read_cr0
502
503#endif /* __ASSEMBLY__ */
504#endif /* CONFIG_PARAVIRT */
505#endif /* __ASM_PARAVIRT_H */
diff --git a/include/asm-i386/pda.h b/include/asm-i386/pda.h
new file mode 100644
index 00000000000..2ba2736aa10
--- /dev/null
+++ b/include/asm-i386/pda.h
@@ -0,0 +1,100 @@
1/*
2 Per-processor Data Areas
3 Jeremy Fitzhardinge <jeremy@goop.org> 2006
4 Based on asm-x86_64/pda.h by Andi Kleen.
5 */
6#ifndef _I386_PDA_H
7#define _I386_PDA_H
8
9#include <linux/stddef.h>
10#include <linux/types.h>
11
12struct i386_pda
13{
14 struct i386_pda *_pda; /* pointer to self */
15
16 int cpu_number;
17 struct task_struct *pcurrent; /* current process */
18 struct pt_regs *irq_regs;
19};
20
21extern struct i386_pda *_cpu_pda[];
22
23#define cpu_pda(i) (_cpu_pda[i])
24
25#define pda_offset(field) offsetof(struct i386_pda, field)
26
27extern void __bad_pda_field(void);
28
29/* This variable is never instantiated. It is only used as a stand-in
30 for the real per-cpu PDA memory, so that gcc can understand what
31 memory operations the inline asms() below are performing. This
32 eliminates the need to make the asms volatile or have memory
33 clobbers, so gcc can readily analyse them. */
34extern struct i386_pda _proxy_pda;
35
36#define pda_to_op(op,field,val) \
37 do { \
38 typedef typeof(_proxy_pda.field) T__; \
39 if (0) { T__ tmp__; tmp__ = (val); } \
40 switch (sizeof(_proxy_pda.field)) { \
41 case 1: \
42 asm(op "b %1,%%gs:%c2" \
43 : "+m" (_proxy_pda.field) \
44 :"ri" ((T__)val), \
45 "i"(pda_offset(field))); \
46 break; \
47 case 2: \
48 asm(op "w %1,%%gs:%c2" \
49 : "+m" (_proxy_pda.field) \
50 :"ri" ((T__)val), \
51 "i"(pda_offset(field))); \
52 break; \
53 case 4: \
54 asm(op "l %1,%%gs:%c2" \
55 : "+m" (_proxy_pda.field) \
56 :"ri" ((T__)val), \
57 "i"(pda_offset(field))); \
58 break; \
59 default: __bad_pda_field(); \
60 } \
61 } while (0)
62
63#define pda_from_op(op,field) \
64 ({ \
65 typeof(_proxy_pda.field) ret__; \
66 switch (sizeof(_proxy_pda.field)) { \
67 case 1: \
68 asm(op "b %%gs:%c1,%0" \
69 : "=r" (ret__) \
70 : "i" (pda_offset(field)), \
71 "m" (_proxy_pda.field)); \
72 break; \
73 case 2: \
74 asm(op "w %%gs:%c1,%0" \
75 : "=r" (ret__) \
76 : "i" (pda_offset(field)), \
77 "m" (_proxy_pda.field)); \
78 break; \
79 case 4: \
80 asm(op "l %%gs:%c1,%0" \
81 : "=r" (ret__) \
82 : "i" (pda_offset(field)), \
83 "m" (_proxy_pda.field)); \
84 break; \
85 default: __bad_pda_field(); \
86 } \
87 ret__; })
88
89/* Return a pointer to a pda field */
90#define pda_addr(field) \
91 ((typeof(_proxy_pda.field) *)((unsigned char *)read_pda(_pda) + \
92 pda_offset(field)))
93
94#define read_pda(field) pda_from_op("mov",field)
95#define write_pda(field,val) pda_to_op("mov",field,val)
96#define add_pda(field,val) pda_to_op("add",field,val)
97#define sub_pda(field,val) pda_to_op("sub",field,val)
98#define or_pda(field,val) pda_to_op("or",field,val)
99
100#endif /* _I386_PDA_H */
diff --git a/include/asm-i386/percpu.h b/include/asm-i386/percpu.h
index 5764afa4b6a..510ae1d3486 100644
--- a/include/asm-i386/percpu.h
+++ b/include/asm-i386/percpu.h
@@ -1,6 +1,31 @@
1#ifndef __ARCH_I386_PERCPU__ 1#ifndef __ARCH_I386_PERCPU__
2#define __ARCH_I386_PERCPU__ 2#define __ARCH_I386_PERCPU__
3 3
4#ifndef __ASSEMBLY__
4#include <asm-generic/percpu.h> 5#include <asm-generic/percpu.h>
6#else
7
8/*
9 * PER_CPU finds an address of a per-cpu variable.
10 *
11 * Args:
12 * var - variable name
13 * cpu - 32bit register containing the current CPU number
14 *
15 * The resulting address is stored in the "cpu" argument.
16 *
17 * Example:
18 * PER_CPU(cpu_gdt_descr, %ebx)
19 */
20#ifdef CONFIG_SMP
21#define PER_CPU(var, cpu) \
22 movl __per_cpu_offset(,cpu,4), cpu; \
23 addl $per_cpu__/**/var, cpu;
24#else /* ! SMP */
25#define PER_CPU(var, cpu) \
26 movl $per_cpu__/**/var, cpu;
27#endif /* SMP */
28
29#endif /* !__ASSEMBLY__ */
5 30
6#endif /* __ARCH_I386_PERCPU__ */ 31#endif /* __ARCH_I386_PERCPU__ */
diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h
index 8d8d3b9ecdb..38c3fcc0676 100644
--- a/include/asm-i386/pgtable-2level.h
+++ b/include/asm-i386/pgtable-2level.h
@@ -1,8 +1,6 @@
1#ifndef _I386_PGTABLE_2LEVEL_H 1#ifndef _I386_PGTABLE_2LEVEL_H
2#define _I386_PGTABLE_2LEVEL_H 2#define _I386_PGTABLE_2LEVEL_H
3 3
4#include <asm-generic/pgtable-nopmd.h>
5
6#define pte_ERROR(e) \ 4#define pte_ERROR(e) \
7 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low) 5 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
8#define pgd_ERROR(e) \ 6#define pgd_ERROR(e) \
@@ -13,17 +11,19 @@
13 * within a page table are directly modified. Thus, the following 11 * within a page table are directly modified. Thus, the following
14 * hook is made available. 12 * hook is made available.
15 */ 13 */
14#ifndef CONFIG_PARAVIRT
16#define set_pte(pteptr, pteval) (*(pteptr) = pteval) 15#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
17#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) 16#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
17#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
18#endif
19
18#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval) 20#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
19#define set_pte_present(mm,addr,ptep,pteval) set_pte_at(mm,addr,ptep,pteval) 21#define set_pte_present(mm,addr,ptep,pteval) set_pte_at(mm,addr,ptep,pteval)
20#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
21 22
22#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) 23#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
23#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) 24#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
24 25
25#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 26#define raw_ptep_get_and_clear(xp) __pte(xchg(&(xp)->pte_low, 0))
26#define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte_low, 0))
27 27
28#define pte_page(x) pfn_to_page(pte_pfn(x)) 28#define pte_page(x) pfn_to_page(pte_pfn(x))
29#define pte_none(x) (!(x).pte_low) 29#define pte_none(x) (!(x).pte_low)
diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h
index c2d701ea35b..7a2318f3830 100644
--- a/include/asm-i386/pgtable-3level.h
+++ b/include/asm-i386/pgtable-3level.h
@@ -1,8 +1,6 @@
1#ifndef _I386_PGTABLE_3LEVEL_H 1#ifndef _I386_PGTABLE_3LEVEL_H
2#define _I386_PGTABLE_3LEVEL_H 2#define _I386_PGTABLE_3LEVEL_H
3 3
4#include <asm-generic/pgtable-nopud.h>
5
6/* 4/*
7 * Intel Physical Address Extension (PAE) Mode - three-level page 5 * Intel Physical Address Extension (PAE) Mode - three-level page
8 * tables on PPro+ CPUs. 6 * tables on PPro+ CPUs.
@@ -44,6 +42,7 @@ static inline int pte_exec_kernel(pte_t pte)
44 return pte_x(pte); 42 return pte_x(pte);
45} 43}
46 44
45#ifndef CONFIG_PARAVIRT
47/* Rules for using set_pte: the pte being assigned *must* be 46/* Rules for using set_pte: the pte being assigned *must* be
48 * either not present or in a state where the hardware will 47 * either not present or in a state where the hardware will
49 * not attempt to update the pte. In places where this is 48 * not attempt to update the pte. In places where this is
@@ -81,25 +80,6 @@ static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, pte
81 (*(pudptr) = (pudval)) 80 (*(pudptr) = (pudval))
82 81
83/* 82/*
84 * Pentium-II erratum A13: in PAE mode we explicitly have to flush
85 * the TLB via cr3 if the top-level pgd is changed...
86 * We do not let the generic code free and clear pgd entries due to
87 * this erratum.
88 */
89static inline void pud_clear (pud_t * pud) { }
90
91#define pud_page(pud) \
92((struct page *) __va(pud_val(pud) & PAGE_MASK))
93
94#define pud_page_vaddr(pud) \
95((unsigned long) __va(pud_val(pud) & PAGE_MASK))
96
97
98/* Find an entry in the second-level page table.. */
99#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
100 pmd_index(address))
101
102/*
103 * For PTEs and PDEs, we must clear the P-bit first when clearing a page table 83 * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
104 * entry, so clear the bottom half first and enforce ordering with a compiler 84 * entry, so clear the bottom half first and enforce ordering with a compiler
105 * barrier. 85 * barrier.
@@ -118,9 +98,28 @@ static inline void pmd_clear(pmd_t *pmd)
118 smp_wmb(); 98 smp_wmb();
119 *(tmp + 1) = 0; 99 *(tmp + 1) = 0;
120} 100}
101#endif
102
103/*
104 * Pentium-II erratum A13: in PAE mode we explicitly have to flush
105 * the TLB via cr3 if the top-level pgd is changed...
106 * We do not let the generic code free and clear pgd entries due to
107 * this erratum.
108 */
109static inline void pud_clear (pud_t * pud) { }
110
111#define pud_page(pud) \
112((struct page *) __va(pud_val(pud) & PAGE_MASK))
113
114#define pud_page_vaddr(pud) \
115((unsigned long) __va(pud_val(pud) & PAGE_MASK))
116
117
118/* Find an entry in the second-level page table.. */
119#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
120 pmd_index(address))
121 121
122#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 122static inline pte_t raw_ptep_get_and_clear(pte_t *ptep)
123static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
124{ 123{
125 pte_t res; 124 pte_t res;
126 125
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 7d398f493dd..e6a4723f0eb 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -15,6 +15,7 @@
15#include <asm/processor.h> 15#include <asm/processor.h>
16#include <asm/fixmap.h> 16#include <asm/fixmap.h>
17#include <linux/threads.h> 17#include <linux/threads.h>
18#include <asm/paravirt.h>
18 19
19#ifndef _I386_BITOPS_H 20#ifndef _I386_BITOPS_H
20#include <asm/bitops.h> 21#include <asm/bitops.h>
@@ -34,14 +35,14 @@ struct vm_area_struct;
34#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 35#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
35extern unsigned long empty_zero_page[1024]; 36extern unsigned long empty_zero_page[1024];
36extern pgd_t swapper_pg_dir[1024]; 37extern pgd_t swapper_pg_dir[1024];
37extern kmem_cache_t *pgd_cache; 38extern struct kmem_cache *pgd_cache;
38extern kmem_cache_t *pmd_cache; 39extern struct kmem_cache *pmd_cache;
39extern spinlock_t pgd_lock; 40extern spinlock_t pgd_lock;
40extern struct page *pgd_list; 41extern struct page *pgd_list;
41 42
42void pmd_ctor(void *, kmem_cache_t *, unsigned long); 43void pmd_ctor(void *, struct kmem_cache *, unsigned long);
43void pgd_ctor(void *, kmem_cache_t *, unsigned long); 44void pgd_ctor(void *, struct kmem_cache *, unsigned long);
44void pgd_dtor(void *, kmem_cache_t *, unsigned long); 45void pgd_dtor(void *, struct kmem_cache *, unsigned long);
45void pgtable_cache_init(void); 46void pgtable_cache_init(void);
46void paging_init(void); 47void paging_init(void);
47 48
@@ -246,6 +247,7 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p
246# include <asm/pgtable-2level.h> 247# include <asm/pgtable-2level.h>
247#endif 248#endif
248 249
250#ifndef CONFIG_PARAVIRT
249/* 251/*
250 * Rules for using pte_update - it must be called after any PTE update which 252 * Rules for using pte_update - it must be called after any PTE update which
251 * has not been done using the set_pte / clear_pte interfaces. It is used by 253 * has not been done using the set_pte / clear_pte interfaces. It is used by
@@ -261,7 +263,7 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p
261 */ 263 */
262#define pte_update(mm, addr, ptep) do { } while (0) 264#define pte_update(mm, addr, ptep) do { } while (0)
263#define pte_update_defer(mm, addr, ptep) do { } while (0) 265#define pte_update_defer(mm, addr, ptep) do { } while (0)
264 266#endif
265 267
266/* 268/*
267 * We only update the dirty/accessed state if we set 269 * We only update the dirty/accessed state if we set
@@ -275,7 +277,7 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p
275do { \ 277do { \
276 if (dirty) { \ 278 if (dirty) { \
277 (ptep)->pte_low = (entry).pte_low; \ 279 (ptep)->pte_low = (entry).pte_low; \
278 pte_update_defer((vma)->vm_mm, (addr), (ptep)); \ 280 pte_update_defer((vma)->vm_mm, (address), (ptep)); \
279 flush_tlb_page(vma, address); \ 281 flush_tlb_page(vma, address); \
280 } \ 282 } \
281} while (0) 283} while (0)
@@ -305,7 +307,7 @@ do { \
305 __dirty = pte_dirty(*(ptep)); \ 307 __dirty = pte_dirty(*(ptep)); \
306 if (__dirty) { \ 308 if (__dirty) { \
307 clear_bit(_PAGE_BIT_DIRTY, &(ptep)->pte_low); \ 309 clear_bit(_PAGE_BIT_DIRTY, &(ptep)->pte_low); \
308 pte_update_defer((vma)->vm_mm, (addr), (ptep)); \ 310 pte_update_defer((vma)->vm_mm, (address), (ptep)); \
309 flush_tlb_page(vma, address); \ 311 flush_tlb_page(vma, address); \
310 } \ 312 } \
311 __dirty; \ 313 __dirty; \
@@ -318,12 +320,20 @@ do { \
318 __young = pte_young(*(ptep)); \ 320 __young = pte_young(*(ptep)); \
319 if (__young) { \ 321 if (__young) { \
320 clear_bit(_PAGE_BIT_ACCESSED, &(ptep)->pte_low); \ 322 clear_bit(_PAGE_BIT_ACCESSED, &(ptep)->pte_low); \
321 pte_update_defer((vma)->vm_mm, (addr), (ptep)); \ 323 pte_update_defer((vma)->vm_mm, (address), (ptep)); \
322 flush_tlb_page(vma, address); \ 324 flush_tlb_page(vma, address); \
323 } \ 325 } \
324 __young; \ 326 __young; \
325}) 327})
326 328
329#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
330static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
331{
332 pte_t pte = raw_ptep_get_and_clear(ptep);
333 pte_update(mm, addr, ptep);
334 return pte;
335}
336
327#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL 337#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
328static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) 338static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
329{ 339{
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index e0ddca94d50..a52d6544042 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -20,6 +20,7 @@
20#include <linux/threads.h> 20#include <linux/threads.h>
21#include <asm/percpu.h> 21#include <asm/percpu.h>
22#include <linux/cpumask.h> 22#include <linux/cpumask.h>
23#include <linux/init.h>
23 24
24/* flag for disabling the tsc */ 25/* flag for disabling the tsc */
25extern int tsc_disable; 26extern int tsc_disable;
@@ -72,6 +73,7 @@ struct cpuinfo_x86 {
72#endif 73#endif
73 unsigned char x86_max_cores; /* cpuid returned max cores value */ 74 unsigned char x86_max_cores; /* cpuid returned max cores value */
74 unsigned char apicid; 75 unsigned char apicid;
76 unsigned short x86_clflush_size;
75#ifdef CONFIG_SMP 77#ifdef CONFIG_SMP
76 unsigned char booted_cores; /* number of cores as seen by OS */ 78 unsigned char booted_cores; /* number of cores as seen by OS */
77 __u8 phys_proc_id; /* Physical processor id. */ 79 __u8 phys_proc_id; /* Physical processor id. */
@@ -111,6 +113,8 @@ extern struct cpuinfo_x86 cpu_data[];
111extern int cpu_llc_id[NR_CPUS]; 113extern int cpu_llc_id[NR_CPUS];
112extern char ignore_fpu_irq; 114extern char ignore_fpu_irq;
113 115
116void __init cpu_detect(struct cpuinfo_x86 *c);
117
114extern void identify_cpu(struct cpuinfo_x86 *); 118extern void identify_cpu(struct cpuinfo_x86 *);
115extern void print_cpu_info(struct cpuinfo_x86 *); 119extern void print_cpu_info(struct cpuinfo_x86 *);
116extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); 120extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
@@ -143,8 +147,8 @@ static inline void detect_ht(struct cpuinfo_x86 *c) {}
143#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */ 147#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
144#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */ 148#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
145 149
146static inline void __cpuid(unsigned int *eax, unsigned int *ebx, 150static inline fastcall void native_cpuid(unsigned int *eax, unsigned int *ebx,
147 unsigned int *ecx, unsigned int *edx) 151 unsigned int *ecx, unsigned int *edx)
148{ 152{
149 /* ecx is often an input as well as an output. */ 153 /* ecx is often an input as well as an output. */
150 __asm__("cpuid" 154 __asm__("cpuid"
@@ -155,59 +159,6 @@ static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
155 : "0" (*eax), "2" (*ecx)); 159 : "0" (*eax), "2" (*ecx));
156} 160}
157 161
158/*
159 * Generic CPUID function
160 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
161 * resulting in stale register contents being returned.
162 */
163static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
164{
165 *eax = op;
166 *ecx = 0;
167 __cpuid(eax, ebx, ecx, edx);
168}
169
170/* Some CPUID calls want 'count' to be placed in ecx */
171static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
172 int *edx)
173{
174 *eax = op;
175 *ecx = count;
176 __cpuid(eax, ebx, ecx, edx);
177}
178
179/*
180 * CPUID functions returning a single datum
181 */
182static inline unsigned int cpuid_eax(unsigned int op)
183{
184 unsigned int eax, ebx, ecx, edx;
185
186 cpuid(op, &eax, &ebx, &ecx, &edx);
187 return eax;
188}
189static inline unsigned int cpuid_ebx(unsigned int op)
190{
191 unsigned int eax, ebx, ecx, edx;
192
193 cpuid(op, &eax, &ebx, &ecx, &edx);
194 return ebx;
195}
196static inline unsigned int cpuid_ecx(unsigned int op)
197{
198 unsigned int eax, ebx, ecx, edx;
199
200 cpuid(op, &eax, &ebx, &ecx, &edx);
201 return ecx;
202}
203static inline unsigned int cpuid_edx(unsigned int op)
204{
205 unsigned int eax, ebx, ecx, edx;
206
207 cpuid(op, &eax, &ebx, &ecx, &edx);
208 return edx;
209}
210
211#define load_cr3(pgdir) write_cr3(__pa(pgdir)) 162#define load_cr3(pgdir) write_cr3(__pa(pgdir))
212 163
213/* 164/*
@@ -473,6 +424,7 @@ struct thread_struct {
473 .vm86_info = NULL, \ 424 .vm86_info = NULL, \
474 .sysenter_cs = __KERNEL_CS, \ 425 .sysenter_cs = __KERNEL_CS, \
475 .io_bitmap_ptr = NULL, \ 426 .io_bitmap_ptr = NULL, \
427 .gs = __KERNEL_PDA, \
476} 428}
477 429
478/* 430/*
@@ -489,18 +441,9 @@ struct thread_struct {
489 .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \ 441 .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
490} 442}
491 443
492static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
493{
494 tss->esp0 = thread->esp0;
495 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
496 if (unlikely(tss->ss1 != thread->sysenter_cs)) {
497 tss->ss1 = thread->sysenter_cs;
498 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
499 }
500}
501
502#define start_thread(regs, new_eip, new_esp) do { \ 444#define start_thread(regs, new_eip, new_esp) do { \
503 __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0)); \ 445 __asm__("movl %0,%%fs": :"r" (0)); \
446 regs->xgs = 0; \
504 set_fs(USER_DS); \ 447 set_fs(USER_DS); \
505 regs->xds = __USER_DS; \ 448 regs->xds = __USER_DS; \
506 regs->xes = __USER_DS; \ 449 regs->xes = __USER_DS; \
@@ -510,33 +453,6 @@ static inline void load_esp0(struct tss_struct *tss, struct thread_struct *threa
510 regs->esp = new_esp; \ 453 regs->esp = new_esp; \
511} while (0) 454} while (0)
512 455
513/*
514 * These special macros can be used to get or set a debugging register
515 */
516#define get_debugreg(var, register) \
517 __asm__("movl %%db" #register ", %0" \
518 :"=r" (var))
519#define set_debugreg(value, register) \
520 __asm__("movl %0,%%db" #register \
521 : /* no output */ \
522 :"r" (value))
523
524/*
525 * Set IOPL bits in EFLAGS from given mask
526 */
527static inline void set_iopl_mask(unsigned mask)
528{
529 unsigned int reg;
530 __asm__ __volatile__ ("pushfl;"
531 "popl %0;"
532 "andl %1, %0;"
533 "orl %2, %0;"
534 "pushl %0;"
535 "popfl"
536 : "=&r" (reg)
537 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
538}
539
540/* Forward declaration, a strange C thing */ 456/* Forward declaration, a strange C thing */
541struct task_struct; 457struct task_struct;
542struct mm_struct; 458struct mm_struct;
@@ -628,6 +544,105 @@ static inline void rep_nop(void)
628 544
629#define cpu_relax() rep_nop() 545#define cpu_relax() rep_nop()
630 546
547#ifdef CONFIG_PARAVIRT
548#include <asm/paravirt.h>
549#else
550#define paravirt_enabled() 0
551#define __cpuid native_cpuid
552
553static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
554{
555 tss->esp0 = thread->esp0;
556 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
557 if (unlikely(tss->ss1 != thread->sysenter_cs)) {
558 tss->ss1 = thread->sysenter_cs;
559 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
560 }
561}
562
563/*
564 * These special macros can be used to get or set a debugging register
565 */
566#define get_debugreg(var, register) \
567 __asm__("movl %%db" #register ", %0" \
568 :"=r" (var))
569#define set_debugreg(value, register) \
570 __asm__("movl %0,%%db" #register \
571 : /* no output */ \
572 :"r" (value))
573
574#define set_iopl_mask native_set_iopl_mask
575#endif /* CONFIG_PARAVIRT */
576
577/*
578 * Set IOPL bits in EFLAGS from given mask
579 */
580static fastcall inline void native_set_iopl_mask(unsigned mask)
581{
582 unsigned int reg;
583 __asm__ __volatile__ ("pushfl;"
584 "popl %0;"
585 "andl %1, %0;"
586 "orl %2, %0;"
587 "pushl %0;"
588 "popfl"
589 : "=&r" (reg)
590 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
591}
592
593/*
594 * Generic CPUID function
595 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
596 * resulting in stale register contents being returned.
597 */
598static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
599{
600 *eax = op;
601 *ecx = 0;
602 __cpuid(eax, ebx, ecx, edx);
603}
604
605/* Some CPUID calls want 'count' to be placed in ecx */
606static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
607 int *edx)
608{
609 *eax = op;
610 *ecx = count;
611 __cpuid(eax, ebx, ecx, edx);
612}
613
614/*
615 * CPUID functions returning a single datum
616 */
617static inline unsigned int cpuid_eax(unsigned int op)
618{
619 unsigned int eax, ebx, ecx, edx;
620
621 cpuid(op, &eax, &ebx, &ecx, &edx);
622 return eax;
623}
624static inline unsigned int cpuid_ebx(unsigned int op)
625{
626 unsigned int eax, ebx, ecx, edx;
627
628 cpuid(op, &eax, &ebx, &ecx, &edx);
629 return ebx;
630}
631static inline unsigned int cpuid_ecx(unsigned int op)
632{
633 unsigned int eax, ebx, ecx, edx;
634
635 cpuid(op, &eax, &ebx, &ecx, &edx);
636 return ecx;
637}
638static inline unsigned int cpuid_edx(unsigned int op)
639{
640 unsigned int eax, ebx, ecx, edx;
641
642 cpuid(op, &eax, &ebx, &ecx, &edx);
643 return edx;
644}
645
631/* generic versions from gas */ 646/* generic versions from gas */
632#define GENERIC_NOP1 ".byte 0x90\n" 647#define GENERIC_NOP1 ".byte 0x90\n"
633#define GENERIC_NOP2 ".byte 0x89,0xf6\n" 648#define GENERIC_NOP2 ".byte 0x89,0xf6\n"
@@ -727,4 +742,7 @@ extern unsigned long boot_option_idle_override;
727extern void enable_sep_cpu(void); 742extern void enable_sep_cpu(void);
728extern int sysenter_setup(void); 743extern int sysenter_setup(void);
729 744
745extern int init_gdt(int cpu, struct task_struct *idle);
746extern void secondary_cpu_init(void);
747
730#endif /* __ASM_I386_PROCESSOR_H */ 748#endif /* __ASM_I386_PROCESSOR_H */
diff --git a/include/asm-i386/ptrace.h b/include/asm-i386/ptrace.h
index d505f501077..bdbc894339b 100644
--- a/include/asm-i386/ptrace.h
+++ b/include/asm-i386/ptrace.h
@@ -16,6 +16,8 @@ struct pt_regs {
16 long eax; 16 long eax;
17 int xds; 17 int xds;
18 int xes; 18 int xes;
19 /* int xfs; */
20 int xgs;
19 long orig_eax; 21 long orig_eax;
20 long eip; 22 long eip;
21 int xcs; 23 int xcs;
diff --git a/include/asm-i386/rwsem.h b/include/asm-i386/rwsem.h
index bc598d6388e..041906f3c6d 100644
--- a/include/asm-i386/rwsem.h
+++ b/include/asm-i386/rwsem.h
@@ -75,8 +75,8 @@ struct rw_semaphore {
75 75
76 76
77#define __RWSEM_INITIALIZER(name) \ 77#define __RWSEM_INITIALIZER(name) \
78{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \ 78{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
79 __RWSEM_DEP_MAP_INIT(name) } 79 LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
80 80
81#define DECLARE_RWSEM(name) \ 81#define DECLARE_RWSEM(name) \
82 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 82 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
diff --git a/include/asm-i386/segment.h b/include/asm-i386/segment.h
index b7ab59685ba..3c796af3377 100644
--- a/include/asm-i386/segment.h
+++ b/include/asm-i386/segment.h
@@ -39,7 +39,7 @@
39 * 25 - APM BIOS support 39 * 25 - APM BIOS support
40 * 40 *
41 * 26 - ESPFIX small SS 41 * 26 - ESPFIX small SS
42 * 27 - unused 42 * 27 - PDA [ per-cpu private data area ]
43 * 28 - unused 43 * 28 - unused
44 * 29 - unused 44 * 29 - unused
45 * 30 - unused 45 * 30 - unused
@@ -74,6 +74,9 @@
74#define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14) 74#define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
75#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8) 75#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
76 76
77#define GDT_ENTRY_PDA (GDT_ENTRY_KERNEL_BASE + 15)
78#define __KERNEL_PDA (GDT_ENTRY_PDA * 8)
79
77#define GDT_ENTRY_DOUBLEFAULT_TSS 31 80#define GDT_ENTRY_DOUBLEFAULT_TSS 31
78 81
79/* 82/*
@@ -128,5 +131,7 @@
128#define SEGMENT_LDT 0x4 131#define SEGMENT_LDT 0x4
129#define SEGMENT_GDT 0x0 132#define SEGMENT_GDT 0x0
130 133
134#ifndef CONFIG_PARAVIRT
131#define get_kernel_rpl() 0 135#define get_kernel_rpl() 0
132#endif 136#endif
137#endif
diff --git a/include/asm-i386/setup.h b/include/asm-i386/setup.h
index 2734909eff8..67659dbaf12 100644
--- a/include/asm-i386/setup.h
+++ b/include/asm-i386/setup.h
@@ -6,6 +6,8 @@
6#ifndef _i386_SETUP_H 6#ifndef _i386_SETUP_H
7#define _i386_SETUP_H 7#define _i386_SETUP_H
8 8
9#define COMMAND_LINE_SIZE 256
10
9#ifdef __KERNEL__ 11#ifdef __KERNEL__
10#include <linux/pfn.h> 12#include <linux/pfn.h>
11 13
@@ -14,10 +16,8 @@
14 */ 16 */
15#define MAXMEM_PFN PFN_DOWN(MAXMEM) 17#define MAXMEM_PFN PFN_DOWN(MAXMEM)
16#define MAX_NONPAE_PFN (1 << 20) 18#define MAX_NONPAE_PFN (1 << 20)
17#endif
18 19
19#define PARAM_SIZE 4096 20#define PARAM_SIZE 4096
20#define COMMAND_LINE_SIZE 256
21 21
22#define OLD_CL_MAGIC_ADDR 0x90020 22#define OLD_CL_MAGIC_ADDR 0x90020
23#define OLD_CL_MAGIC 0xA33F 23#define OLD_CL_MAGIC 0xA33F
@@ -70,6 +70,7 @@ extern unsigned char boot_params[PARAM_SIZE];
70struct e820entry; 70struct e820entry;
71 71
72char * __init machine_specific_memory_setup(void); 72char * __init machine_specific_memory_setup(void);
73char *memory_setup(void);
73 74
74int __init copy_e820_map(struct e820entry * biosmap, int nr_map); 75int __init copy_e820_map(struct e820entry * biosmap, int nr_map);
75int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map); 76int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map);
@@ -78,4 +79,6 @@ void __init add_memory_region(unsigned long long start,
78 79
79#endif /* __ASSEMBLY__ */ 80#endif /* __ASSEMBLY__ */
80 81
82#endif /* __KERNEL__ */
83
81#endif /* _i386_SETUP_H */ 84#endif /* _i386_SETUP_H */
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h
index bd59c1508e7..64fe624c02c 100644
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -8,6 +8,7 @@
8#include <linux/kernel.h> 8#include <linux/kernel.h>
9#include <linux/threads.h> 9#include <linux/threads.h>
10#include <linux/cpumask.h> 10#include <linux/cpumask.h>
11#include <asm/pda.h>
11#endif 12#endif
12 13
13#ifdef CONFIG_X86_LOCAL_APIC 14#ifdef CONFIG_X86_LOCAL_APIC
@@ -56,7 +57,7 @@ extern void cpu_uninit(void);
56 * from the initial startup. We map APIC_BASE very early in page_setup(), 57 * from the initial startup. We map APIC_BASE very early in page_setup(),
57 * so this is correct in the x86 case. 58 * so this is correct in the x86 case.
58 */ 59 */
59#define raw_smp_processor_id() (current_thread_info()->cpu) 60#define raw_smp_processor_id() (read_pda(cpu_number))
60 61
61extern cpumask_t cpu_callout_map; 62extern cpumask_t cpu_callout_map;
62extern cpumask_t cpu_callin_map; 63extern cpumask_t cpu_callin_map;
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index c18b71fae6b..d3bcebed60c 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -7,8 +7,14 @@
7#include <asm/processor.h> 7#include <asm/processor.h>
8#include <linux/compiler.h> 8#include <linux/compiler.h>
9 9
10#ifdef CONFIG_PARAVIRT
11#include <asm/paravirt.h>
12#else
10#define CLI_STRING "cli" 13#define CLI_STRING "cli"
11#define STI_STRING "sti" 14#define STI_STRING "sti"
15#define CLI_STI_CLOBBERS
16#define CLI_STI_INPUT_ARGS
17#endif /* CONFIG_PARAVIRT */
12 18
13/* 19/*
14 * Your basic SMP spinlocks, allowing only a single CPU anywhere 20 * Your basic SMP spinlocks, allowing only a single CPU anywhere
@@ -53,25 +59,28 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
53{ 59{
54 asm volatile( 60 asm volatile(
55 "\n1:\t" 61 "\n1:\t"
56 LOCK_PREFIX " ; decb %0\n\t" 62 LOCK_PREFIX " ; decb %[slock]\n\t"
57 "jns 5f\n" 63 "jns 5f\n"
58 "2:\t" 64 "2:\t"
59 "testl $0x200, %1\n\t" 65 "testl $0x200, %[flags]\n\t"
60 "jz 4f\n\t" 66 "jz 4f\n\t"
61 STI_STRING "\n" 67 STI_STRING "\n"
62 "3:\t" 68 "3:\t"
63 "rep;nop\n\t" 69 "rep;nop\n\t"
64 "cmpb $0, %0\n\t" 70 "cmpb $0, %[slock]\n\t"
65 "jle 3b\n\t" 71 "jle 3b\n\t"
66 CLI_STRING "\n\t" 72 CLI_STRING "\n\t"
67 "jmp 1b\n" 73 "jmp 1b\n"
68 "4:\t" 74 "4:\t"
69 "rep;nop\n\t" 75 "rep;nop\n\t"
70 "cmpb $0, %0\n\t" 76 "cmpb $0, %[slock]\n\t"
71 "jg 1b\n\t" 77 "jg 1b\n\t"
72 "jmp 4b\n" 78 "jmp 4b\n"
73 "5:\n\t" 79 "5:\n\t"
74 : "+m" (lock->slock) : "r" (flags) : "memory"); 80 : [slock] "+m" (lock->slock)
81 : [flags] "r" (flags)
82 CLI_STI_INPUT_ARGS
83 : "memory" CLI_STI_CLOBBERS);
75} 84}
76#endif 85#endif
77 86
diff --git a/include/asm-i386/suspend.h b/include/asm-i386/suspend.h
index 08be1e5009d..8dbaafe611f 100644
--- a/include/asm-i386/suspend.h
+++ b/include/asm-i386/suspend.h
@@ -6,29 +6,14 @@
6#include <asm/desc.h> 6#include <asm/desc.h>
7#include <asm/i387.h> 7#include <asm/i387.h>
8 8
9static inline int 9static inline int arch_prepare_suspend(void) { return 0; }
10arch_prepare_suspend(void)
11{
12 /* If you want to make non-PSE machine work, turn off paging
13 in swsusp_arch_suspend. swsusp_pg_dir should have identity mapping, so
14 it could work... */
15 if (!cpu_has_pse) {
16 printk(KERN_ERR "PSE is required for swsusp.\n");
17 return -EPERM;
18 }
19 return 0;
20}
21 10
22/* image of the saved processor state */ 11/* image of the saved processor state */
23struct saved_context { 12struct saved_context {
24 u16 es, fs, gs, ss; 13 u16 es, fs, gs, ss;
25 unsigned long cr0, cr2, cr3, cr4; 14 unsigned long cr0, cr2, cr3, cr4;
26 u16 gdt_pad; 15 struct Xgt_desc_struct gdt;
27 u16 gdt_limit; 16 struct Xgt_desc_struct idt;
28 unsigned long gdt_base;
29 u16 idt_pad;
30 u16 idt_limit;
31 unsigned long idt_base;
32 u16 ldt; 17 u16 ldt;
33 u16 tss; 18 u16 tss;
34 unsigned long tr; 19 unsigned long tr;
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index a6dabbcd6e6..a6d20d9a1a3 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -88,6 +88,9 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
88#define savesegment(seg, value) \ 88#define savesegment(seg, value) \
89 asm volatile("mov %%" #seg ",%0":"=rm" (value)) 89 asm volatile("mov %%" #seg ",%0":"=rm" (value))
90 90
91#ifdef CONFIG_PARAVIRT
92#include <asm/paravirt.h>
93#else
91#define read_cr0() ({ \ 94#define read_cr0() ({ \
92 unsigned int __dummy; \ 95 unsigned int __dummy; \
93 __asm__ __volatile__( \ 96 __asm__ __volatile__( \
@@ -139,17 +142,18 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
139#define write_cr4(x) \ 142#define write_cr4(x) \
140 __asm__ __volatile__("movl %0,%%cr4": :"r" (x)) 143 __asm__ __volatile__("movl %0,%%cr4": :"r" (x))
141 144
142/* 145#define wbinvd() \
143 * Clear and set 'TS' bit respectively 146 __asm__ __volatile__ ("wbinvd": : :"memory")
144 */ 147
148/* Clear the 'TS' bit */
145#define clts() __asm__ __volatile__ ("clts") 149#define clts() __asm__ __volatile__ ("clts")
150#endif/* CONFIG_PARAVIRT */
151
152/* Set the 'TS' bit */
146#define stts() write_cr0(8 | read_cr0()) 153#define stts() write_cr0(8 | read_cr0())
147 154
148#endif /* __KERNEL__ */ 155#endif /* __KERNEL__ */
149 156
150#define wbinvd() \
151 __asm__ __volatile__ ("wbinvd": : :"memory")
152
153static inline unsigned long get_limit(unsigned long segment) 157static inline unsigned long get_limit(unsigned long segment)
154{ 158{
155 unsigned long __limit; 159 unsigned long __limit;
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
index 54d6d7aea93..46d32ad9208 100644
--- a/include/asm-i386/thread_info.h
+++ b/include/asm-i386/thread_info.h
@@ -95,15 +95,7 @@ static inline struct thread_info *current_thread_info(void)
95 95
96/* thread information allocation */ 96/* thread information allocation */
97#ifdef CONFIG_DEBUG_STACK_USAGE 97#ifdef CONFIG_DEBUG_STACK_USAGE
98#define alloc_thread_info(tsk) \ 98#define alloc_thread_info(tsk) kzalloc(THREAD_SIZE, GFP_KERNEL)
99 ({ \
100 struct thread_info *ret; \
101 \
102 ret = kmalloc(THREAD_SIZE, GFP_KERNEL); \
103 if (ret) \
104 memset(ret, 0, THREAD_SIZE); \
105 ret; \
106 })
107#else 99#else
108#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL) 100#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL)
109#endif 101#endif
diff --git a/include/asm-i386/time.h b/include/asm-i386/time.h
new file mode 100644
index 00000000000..ea8065af825
--- /dev/null
+++ b/include/asm-i386/time.h
@@ -0,0 +1,41 @@
1#ifndef _ASMi386_TIME_H
2#define _ASMi386_TIME_H
3
4#include <linux/efi.h>
5#include "mach_time.h"
6
7static inline unsigned long native_get_wallclock(void)
8{
9 unsigned long retval;
10
11 if (efi_enabled)
12 retval = efi_get_time();
13 else
14 retval = mach_get_cmos_time();
15
16 return retval;
17}
18
19static inline int native_set_wallclock(unsigned long nowtime)
20{
21 int retval;
22
23 if (efi_enabled)
24 retval = efi_set_rtc_mmss(nowtime);
25 else
26 retval = mach_set_rtc_mmss(nowtime);
27
28 return retval;
29}
30
31#ifdef CONFIG_PARAVIRT
32#include <asm/paravirt.h>
33#else /* !CONFIG_PARAVIRT */
34
35#define get_wallclock() native_get_wallclock()
36#define set_wallclock(x) native_set_wallclock(x)
37#define do_time_init() time_init_hook()
38
39#endif /* CONFIG_PARAVIRT */
40
41#endif
diff --git a/include/asm-i386/tlbflush.h b/include/asm-i386/tlbflush.h
index 360648b0f2b..4dd82840d53 100644
--- a/include/asm-i386/tlbflush.h
+++ b/include/asm-i386/tlbflush.h
@@ -4,7 +4,15 @@
4#include <linux/mm.h> 4#include <linux/mm.h>
5#include <asm/processor.h> 5#include <asm/processor.h>
6 6
7#define __flush_tlb() \ 7#ifdef CONFIG_PARAVIRT
8#include <asm/paravirt.h>
9#else
10#define __flush_tlb() __native_flush_tlb()
11#define __flush_tlb_global() __native_flush_tlb_global()
12#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
13#endif
14
15#define __native_flush_tlb() \
8 do { \ 16 do { \
9 unsigned int tmpreg; \ 17 unsigned int tmpreg; \
10 \ 18 \
@@ -19,7 +27,7 @@
19 * Global pages have to be flushed a bit differently. Not a real 27 * Global pages have to be flushed a bit differently. Not a real
20 * performance problem because this does not happen often. 28 * performance problem because this does not happen often.
21 */ 29 */
22#define __flush_tlb_global() \ 30#define __native_flush_tlb_global() \
23 do { \ 31 do { \
24 unsigned int tmpreg, cr4, cr4_orig; \ 32 unsigned int tmpreg, cr4, cr4_orig; \
25 \ 33 \
@@ -36,6 +44,9 @@
36 : "memory"); \ 44 : "memory"); \
37 } while (0) 45 } while (0)
38 46
47#define __native_flush_tlb_single(addr) \
48 __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory")
49
39# define __flush_tlb_all() \ 50# define __flush_tlb_all() \
40 do { \ 51 do { \
41 if (cpu_has_pge) \ 52 if (cpu_has_pge) \
@@ -46,9 +57,6 @@
46 57
47#define cpu_has_invlpg (boot_cpu_data.x86 > 3) 58#define cpu_has_invlpg (boot_cpu_data.x86 > 3)
48 59
49#define __flush_tlb_single(addr) \
50 __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory")
51
52#ifdef CONFIG_X86_INVLPG 60#ifdef CONFIG_X86_INVLPG
53# define __flush_tlb_one(addr) __flush_tlb_single(addr) 61# define __flush_tlb_one(addr) __flush_tlb_single(addr)
54#else 62#else
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index beeeaf6b054..833fa1704ff 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -329,104 +329,6 @@
329#ifdef __KERNEL__ 329#ifdef __KERNEL__
330 330
331#define NR_syscalls 320 331#define NR_syscalls 320
332#include <linux/err.h>
333
334/*
335 * user-visible error numbers are in the range -1 - -MAX_ERRNO: see
336 * <asm-i386/errno.h>
337 */
338#define __syscall_return(type, res) \
339do { \
340 if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
341 errno = -(res); \
342 res = -1; \
343 } \
344 return (type) (res); \
345} while (0)
346
347/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
348#define _syscall0(type,name) \
349type name(void) \
350{ \
351long __res; \
352__asm__ volatile ("int $0x80" \
353 : "=a" (__res) \
354 : "0" (__NR_##name)); \
355__syscall_return(type,__res); \
356}
357
358#define _syscall1(type,name,type1,arg1) \
359type name(type1 arg1) \
360{ \
361long __res; \
362__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
363 : "=a" (__res) \
364 : "0" (__NR_##name),"ri" ((long)(arg1)) : "memory"); \
365__syscall_return(type,__res); \
366}
367
368#define _syscall2(type,name,type1,arg1,type2,arg2) \
369type name(type1 arg1,type2 arg2) \
370{ \
371long __res; \
372__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
373 : "=a" (__res) \
374 : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)) \
375 : "memory"); \
376__syscall_return(type,__res); \
377}
378
379#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
380type name(type1 arg1,type2 arg2,type3 arg3) \
381{ \
382long __res; \
383__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
384 : "=a" (__res) \
385 : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \
386 "d" ((long)(arg3)) : "memory"); \
387__syscall_return(type,__res); \
388}
389
390#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
391type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
392{ \
393long __res; \
394__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
395 : "=a" (__res) \
396 : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \
397 "d" ((long)(arg3)),"S" ((long)(arg4)) : "memory"); \
398__syscall_return(type,__res); \
399}
400
401#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
402 type5,arg5) \
403type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
404{ \
405long __res; \
406__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; movl %1,%%eax ; " \
407 "int $0x80 ; pop %%ebx" \
408 : "=a" (__res) \
409 : "i" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \
410 "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) \
411 : "memory"); \
412__syscall_return(type,__res); \
413}
414
415#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
416 type5,arg5,type6,arg6) \
417type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,type6 arg6) \
418{ \
419long __res; \
420 struct { long __a1; long __a6; } __s = { (long)arg1, (long)arg6 }; \
421__asm__ volatile ("push %%ebp ; push %%ebx ; movl 4(%2),%%ebp ; " \
422 "movl 0(%2),%%ebx ; movl %1,%%eax ; int $0x80 ; " \
423 "pop %%ebx ; pop %%ebp" \
424 : "=a" (__res) \
425 : "i" (__NR_##name),"0" ((long)(&__s)),"c" ((long)(arg2)), \
426 "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) \
427 : "memory"); \
428__syscall_return(type,__res); \
429}
430 332
431#define __ARCH_WANT_IPC_PARSE_VERSION 333#define __ARCH_WANT_IPC_PARSE_VERSION
432#define __ARCH_WANT_OLD_READDIR 334#define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-i386/unwind.h b/include/asm-i386/unwind.h
index 5031d693b89..aa2c931e30d 100644
--- a/include/asm-i386/unwind.h
+++ b/include/asm-i386/unwind.h
@@ -71,6 +71,7 @@ static inline void arch_unw_init_blocked(struct unwind_frame_info *info)
71 info->regs.xss = __KERNEL_DS; 71 info->regs.xss = __KERNEL_DS;
72 info->regs.xds = __USER_DS; 72 info->regs.xds = __USER_DS;
73 info->regs.xes = __USER_DS; 73 info->regs.xes = __USER_DS;
74 info->regs.xgs = __KERNEL_PDA;
74} 75}
75 76
76extern asmlinkage int arch_unwind_init_running(struct unwind_frame_info *, 77extern asmlinkage int arch_unwind_init_running(struct unwind_frame_info *,
@@ -78,17 +79,13 @@ extern asmlinkage int arch_unwind_init_running(struct unwind_frame_info *,
78 void *arg), 79 void *arg),
79 void *arg); 80 void *arg);
80 81
81static inline int arch_unw_user_mode(const struct unwind_frame_info *info) 82static inline int arch_unw_user_mode(/*const*/ struct unwind_frame_info *info)
82{ 83{
83#if 0 /* This can only work when selector register and EFLAGS saves/restores 84 return user_mode_vm(&info->regs)
84 are properly annotated (and tracked in UNW_REGISTER_INFO). */ 85 || info->regs.eip < PAGE_OFFSET
85 return user_mode_vm(&info->regs);
86#else
87 return info->regs.eip < PAGE_OFFSET
88 || (info->regs.eip >= __fix_to_virt(FIX_VDSO) 86 || (info->regs.eip >= __fix_to_virt(FIX_VDSO)
89 && info->regs.eip < __fix_to_virt(FIX_VDSO) + PAGE_SIZE) 87 && info->regs.eip < __fix_to_virt(FIX_VDSO) + PAGE_SIZE)
90 || info->regs.esp < PAGE_OFFSET; 88 || info->regs.esp < PAGE_OFFSET;
91#endif
92} 89}
93 90
94#else 91#else
diff --git a/include/asm-i386/vm86.h b/include/asm-i386/vm86.h
index 952fd695738..a5edf517b99 100644
--- a/include/asm-i386/vm86.h
+++ b/include/asm-i386/vm86.h
@@ -145,26 +145,13 @@ struct vm86plus_struct {
145 * at the end of the structure. Look at ptrace.h to see the "normal" 145 * at the end of the structure. Look at ptrace.h to see the "normal"
146 * setup. For user space layout see 'struct vm86_regs' above. 146 * setup. For user space layout see 'struct vm86_regs' above.
147 */ 147 */
148#include <asm/ptrace.h>
148 149
149struct kernel_vm86_regs { 150struct kernel_vm86_regs {
150/* 151/*
151 * normal regs, with special meaning for the segment descriptors.. 152 * normal regs, with special meaning for the segment descriptors..
152 */ 153 */
153 long ebx; 154 struct pt_regs pt;
154 long ecx;
155 long edx;
156 long esi;
157 long edi;
158 long ebp;
159 long eax;
160 long __null_ds;
161 long __null_es;
162 long orig_eax;
163 long eip;
164 unsigned short cs, __csh;
165 long eflags;
166 long esp;
167 unsigned short ss, __ssh;
168/* 155/*
169 * these are specific to v86 mode: 156 * these are specific to v86 mode:
170 */ 157 */
diff --git a/include/asm-ia64/Kbuild b/include/asm-ia64/Kbuild
index 15818a18bc5..4a1e48b9f40 100644
--- a/include/asm-ia64/Kbuild
+++ b/include/asm-ia64/Kbuild
@@ -10,7 +10,6 @@ header-y += intrinsics.h
10header-y += perfmon_default_smpl.h 10header-y += perfmon_default_smpl.h
11header-y += ptrace_offsets.h 11header-y += ptrace_offsets.h
12header-y += rse.h 12header-y += rse.h
13header-y += setup.h
14header-y += ucontext.h 13header-y += ucontext.h
15 14
16unifdef-y += perfmon.h 15unifdef-y += perfmon.h
diff --git a/include/asm-ia64/dma-mapping.h b/include/asm-ia64/dma-mapping.h
index 99a8f8e1218..ebd5887f4b1 100644
--- a/include/asm-ia64/dma-mapping.h
+++ b/include/asm-ia64/dma-mapping.h
@@ -50,7 +50,8 @@ dma_set_mask (struct device *dev, u64 mask)
50extern int dma_get_cache_alignment(void); 50extern int dma_get_cache_alignment(void);
51 51
52static inline void 52static inline void
53dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir) 53dma_cache_sync (struct device *dev, void *vaddr, size_t size,
54 enum dma_data_direction dir)
54{ 55{
55 /* 56 /*
56 * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to 57 * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to
@@ -59,6 +60,6 @@ dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir)
59 mb(); 60 mb();
60} 61}
61 62
62#define dma_is_consistent(dma_handle) (1) /* all we do is coherent memory... */ 63#define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */
63 64
64#endif /* _ASM_IA64_DMA_MAPPING_H */ 65#endif /* _ASM_IA64_DMA_MAPPING_H */
diff --git a/include/asm-ia64/futex.h b/include/asm-ia64/futex.h
index 07d77f3a8cb..8a98a265413 100644
--- a/include/asm-ia64/futex.h
+++ b/include/asm-ia64/futex.h
@@ -59,7 +59,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
59 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) 59 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
60 return -EFAULT; 60 return -EFAULT;
61 61
62 inc_preempt_count(); 62 pagefault_disable();
63 63
64 switch (op) { 64 switch (op) {
65 case FUTEX_OP_SET: 65 case FUTEX_OP_SET:
@@ -83,7 +83,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
83 ret = -ENOSYS; 83 ret = -ENOSYS;
84 } 84 }
85 85
86 dec_preempt_count(); 86 pagefault_enable();
87 87
88 if (!ret) { 88 if (!ret) {
89 switch (cmp) { 89 switch (cmp) {
diff --git a/include/asm-ia64/pgalloc.h b/include/asm-ia64/pgalloc.h
index 9cb68e9b377..393e04c42a2 100644
--- a/include/asm-ia64/pgalloc.h
+++ b/include/asm-ia64/pgalloc.h
@@ -60,7 +60,7 @@ static inline void *pgtable_quicklist_alloc(void)
60static inline void pgtable_quicklist_free(void *pgtable_entry) 60static inline void pgtable_quicklist_free(void *pgtable_entry)
61{ 61{
62#ifdef CONFIG_NUMA 62#ifdef CONFIG_NUMA
63 unsigned long nid = page_to_nid(virt_to_page(pgtable_entry)); 63 int nid = page_to_nid(virt_to_page(pgtable_entry));
64 64
65 if (unlikely(nid != numa_node_id())) { 65 if (unlikely(nid != numa_node_id())) {
66 free_page((unsigned long)pgtable_entry); 66 free_page((unsigned long)pgtable_entry);
diff --git a/include/asm-m32r/setup.h b/include/asm-m32r/setup.h
index 52f4fa29abf..6a0b32202d4 100644
--- a/include/asm-m32r/setup.h
+++ b/include/asm-m32r/setup.h
@@ -1,6 +1,11 @@
1/* 1/*
2 * This is set up by the setup-routine at boot-time 2 * This is set up by the setup-routine at boot-time
3 */ 3 */
4
5#define COMMAND_LINE_SIZE 512
6
7#ifdef __KERNEL__
8
4#define PARAM ((unsigned char *)empty_zero_page) 9#define PARAM ((unsigned char *)empty_zero_page)
5 10
6#define MOUNT_ROOT_RDONLY (*(unsigned long *) (PARAM+0x000)) 11#define MOUNT_ROOT_RDONLY (*(unsigned long *) (PARAM+0x000))
@@ -18,8 +23,6 @@
18 23
19#define SCREEN_INFO (*(struct screen_info *) (PARAM+0x200)) 24#define SCREEN_INFO (*(struct screen_info *) (PARAM+0x200))
20 25
21#define COMMAND_LINE_SIZE (512)
22
23#define RAMDISK_IMAGE_START_MASK (0x07FF) 26#define RAMDISK_IMAGE_START_MASK (0x07FF)
24#define RAMDISK_PROMPT_FLAG (0x8000) 27#define RAMDISK_PROMPT_FLAG (0x8000)
25#define RAMDISK_LOAD_FLAG (0x4000) 28#define RAMDISK_LOAD_FLAG (0x4000)
@@ -27,3 +30,5 @@
27extern unsigned long memory_start; 30extern unsigned long memory_start;
28extern unsigned long memory_end; 31extern unsigned long memory_end;
29 32
33#endif /* __KERNEL__ */
34
diff --git a/include/asm-m32r/unistd.h b/include/asm-m32r/unistd.h
index 95aa34298d8..5b66bd3c6ed 100644
--- a/include/asm-m32r/unistd.h
+++ b/include/asm-m32r/unistd.h
@@ -296,117 +296,6 @@
296#ifdef __KERNEL__ 296#ifdef __KERNEL__
297 297
298#define NR_syscalls 285 298#define NR_syscalls 285
299#include <linux/err.h>
300
301/* user-visible error numbers are in the range -1 - -MAX_ERRNO: see
302 * <asm-m32r/errno.h>
303 */
304
305#include <asm/syscall.h> /* SYSCALL_* */
306
307#define __syscall_return(type, res) \
308do { \
309 if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
310 /* Avoid using "res" which is declared to be in register r0; \
311 errno might expand to a function call and clobber it. */ \
312 int __err = -(res); \
313 errno = __err; \
314 res = -1; \
315 } \
316 return (type) (res); \
317} while (0)
318
319#define _syscall0(type,name) \
320type name(void) \
321{ \
322register long __scno __asm__ ("r7") = __NR_##name; \
323register long __res __asm__("r0"); \
324__asm__ __volatile__ (\
325 "trap #" SYSCALL_VECTOR "|| nop"\
326 : "=r" (__res) \
327 : "r" (__scno) \
328 : "memory"); \
329__syscall_return(type,__res); \
330}
331
332#define _syscall1(type,name,type1,arg1) \
333type name(type1 arg1) \
334{ \
335register long __scno __asm__ ("r7") = __NR_##name; \
336register long __res __asm__ ("r0") = (long)(arg1); \
337__asm__ __volatile__ (\
338 "trap #" SYSCALL_VECTOR "|| nop"\
339 : "=r" (__res) \
340 : "r" (__scno), "0" (__res) \
341 : "memory"); \
342__syscall_return(type,__res); \
343}
344
345#define _syscall2(type,name,type1,arg1,type2,arg2) \
346type name(type1 arg1,type2 arg2) \
347{ \
348register long __scno __asm__ ("r7") = __NR_##name; \
349register long __arg2 __asm__ ("r1") = (long)(arg2); \
350register long __res __asm__ ("r0") = (long)(arg1); \
351__asm__ __volatile__ (\
352 "trap #" SYSCALL_VECTOR "|| nop"\
353 : "=r" (__res) \
354 : "r" (__scno), "0" (__res), "r" (__arg2) \
355 : "memory"); \
356__syscall_return(type,__res); \
357}
358
359#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
360type name(type1 arg1,type2 arg2,type3 arg3) \
361{ \
362register long __scno __asm__ ("r7") = __NR_##name; \
363register long __arg3 __asm__ ("r2") = (long)(arg3); \
364register long __arg2 __asm__ ("r1") = (long)(arg2); \
365register long __res __asm__ ("r0") = (long)(arg1); \
366__asm__ __volatile__ (\
367 "trap #" SYSCALL_VECTOR "|| nop"\
368 : "=r" (__res) \
369 : "r" (__scno), "0" (__res), "r" (__arg2), \
370 "r" (__arg3) \
371 : "memory"); \
372__syscall_return(type,__res); \
373}
374
375#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
376type name(type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
377{ \
378register long __scno __asm__ ("r7") = __NR_##name; \
379register long __arg4 __asm__ ("r3") = (long)(arg4); \
380register long __arg3 __asm__ ("r2") = (long)(arg3); \
381register long __arg2 __asm__ ("r1") = (long)(arg2); \
382register long __res __asm__ ("r0") = (long)(arg1); \
383__asm__ __volatile__ (\
384 "trap #" SYSCALL_VECTOR "|| nop"\
385 : "=r" (__res) \
386 : "r" (__scno), "0" (__res), "r" (__arg2), \
387 "r" (__arg3), "r" (__arg4) \
388 : "memory"); \
389__syscall_return(type,__res); \
390}
391
392#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
393 type5,arg5) \
394type name(type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
395{ \
396register long __scno __asm__ ("r7") = __NR_##name; \
397register long __arg5 __asm__ ("r4") = (long)(arg5); \
398register long __arg4 __asm__ ("r3") = (long)(arg4); \
399register long __arg3 __asm__ ("r2") = (long)(arg3); \
400register long __arg2 __asm__ ("r1") = (long)(arg2); \
401register long __res __asm__ ("r0") = (long)(arg1); \
402__asm__ __volatile__ (\
403 "trap #" SYSCALL_VECTOR "|| nop"\
404 : "=r" (__res) \
405 : "r" (__scno), "0" (__res), "r" (__arg2), \
406 "r" (__arg3), "r" (__arg4), "r" (__arg5) \
407 : "memory"); \
408__syscall_return(type,__res); \
409}
410 299
411#define __ARCH_WANT_IPC_PARSE_VERSION 300#define __ARCH_WANT_IPC_PARSE_VERSION
412#define __ARCH_WANT_STAT64 301#define __ARCH_WANT_STAT64
diff --git a/include/asm-m68k/dma-mapping.h b/include/asm-m68k/dma-mapping.h
index d90d841d3df..00259ed6fc9 100644
--- a/include/asm-m68k/dma-mapping.h
+++ b/include/asm-m68k/dma-mapping.h
@@ -21,7 +21,7 @@ static inline int dma_get_cache_alignment(void)
21 return 1 << L1_CACHE_SHIFT; 21 return 1 << L1_CACHE_SHIFT;
22} 22}
23 23
24static inline int dma_is_consistent(dma_addr_t dma_addr) 24static inline int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
25{ 25{
26 return 0; 26 return 0;
27} 27}
@@ -41,7 +41,7 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size,
41{ 41{
42 dma_free_coherent(dev, size, addr, handle); 42 dma_free_coherent(dev, size, addr, handle);
43} 43}
44static inline void dma_cache_sync(void *vaddr, size_t size, 44static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
45 enum dma_data_direction dir) 45 enum dma_data_direction dir)
46{ 46{
47 /* we use coherent allocation, so not much to do here. */ 47 /* we use coherent allocation, so not much to do here. */
diff --git a/include/asm-m68k/setup.h b/include/asm-m68k/setup.h
index 7facc9a46e7..2a8853cd655 100644
--- a/include/asm-m68k/setup.h
+++ b/include/asm-m68k/setup.h
@@ -41,8 +41,12 @@
41#define MACH_Q40 10 41#define MACH_Q40 10
42#define MACH_SUN3X 11 42#define MACH_SUN3X 11
43 43
44#define COMMAND_LINE_SIZE 256
45
44#ifdef __KERNEL__ 46#ifdef __KERNEL__
45 47
48#define CL_SIZE COMMAND_LINE_SIZE
49
46#ifndef __ASSEMBLY__ 50#ifndef __ASSEMBLY__
47extern unsigned long m68k_machtype; 51extern unsigned long m68k_machtype;
48#endif /* !__ASSEMBLY__ */ 52#endif /* !__ASSEMBLY__ */
@@ -355,8 +359,6 @@ extern int m68k_is040or060;
355 */ 359 */
356 360
357#define NUM_MEMINFO 4 361#define NUM_MEMINFO 4
358#define CL_SIZE 256
359#define COMMAND_LINE_SIZE CL_SIZE
360 362
361#ifndef __ASSEMBLY__ 363#ifndef __ASSEMBLY__
362struct mem_info { 364struct mem_info {
diff --git a/include/asm-m68k/unistd.h b/include/asm-m68k/unistd.h
index ad4348058c6..fdbb60e6a0d 100644
--- a/include/asm-m68k/unistd.h
+++ b/include/asm-m68k/unistd.h
@@ -317,103 +317,6 @@
317#ifdef __KERNEL__ 317#ifdef __KERNEL__
318 318
319#define NR_syscalls 311 319#define NR_syscalls 311
320#include <linux/err.h>
321
322/* user-visible error numbers are in the range -1 - -MAX_ERRNO: see
323 <asm-m68k/errno.h> */
324
325#define __syscall_return(type, res) \
326do { \
327 if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
328 /* avoid using res which is declared to be in register d0; \
329 errno might expand to a function call and clobber it. */ \
330 int __err = -(res); \
331 errno = __err; \
332 res = -1; \
333 } \
334 return (type) (res); \
335} while (0)
336
337#define _syscall0(type,name) \
338type name(void) \
339{ \
340register long __res __asm__ ("%d0") = __NR_##name; \
341__asm__ __volatile__ ("trap #0" \
342 : "+d" (__res) ); \
343__syscall_return(type,__res); \
344}
345
346#define _syscall1(type,name,atype,a) \
347type name(atype a) \
348{ \
349register long __res __asm__ ("%d0") = __NR_##name; \
350register long __a __asm__ ("%d1") = (long)(a); \
351__asm__ __volatile__ ("trap #0" \
352 : "+d" (__res) \
353 : "d" (__a) ); \
354__syscall_return(type,__res); \
355}
356
357#define _syscall2(type,name,atype,a,btype,b) \
358type name(atype a,btype b) \
359{ \
360register long __res __asm__ ("%d0") = __NR_##name; \
361register long __a __asm__ ("%d1") = (long)(a); \
362register long __b __asm__ ("%d2") = (long)(b); \
363__asm__ __volatile__ ("trap #0" \
364 : "+d" (__res) \
365 : "d" (__a), "d" (__b) \
366 ); \
367__syscall_return(type,__res); \
368}
369
370#define _syscall3(type,name,atype,a,btype,b,ctype,c) \
371type name(atype a,btype b,ctype c) \
372{ \
373register long __res __asm__ ("%d0") = __NR_##name; \
374register long __a __asm__ ("%d1") = (long)(a); \
375register long __b __asm__ ("%d2") = (long)(b); \
376register long __c __asm__ ("%d3") = (long)(c); \
377__asm__ __volatile__ ("trap #0" \
378 : "+d" (__res) \
379 : "d" (__a), "d" (__b), \
380 "d" (__c) \
381 ); \
382__syscall_return(type,__res); \
383}
384
385#define _syscall4(type,name,atype,a,btype,b,ctype,c,dtype,d) \
386type name (atype a, btype b, ctype c, dtype d) \
387{ \
388register long __res __asm__ ("%d0") = __NR_##name; \
389register long __a __asm__ ("%d1") = (long)(a); \
390register long __b __asm__ ("%d2") = (long)(b); \
391register long __c __asm__ ("%d3") = (long)(c); \
392register long __d __asm__ ("%d4") = (long)(d); \
393__asm__ __volatile__ ("trap #0" \
394 : "+d" (__res) \
395 : "d" (__a), "d" (__b), \
396 "d" (__c), "d" (__d) \
397 ); \
398__syscall_return(type,__res); \
399}
400
401#define _syscall5(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e) \
402type name (atype a,btype b,ctype c,dtype d,etype e) \
403{ \
404register long __res __asm__ ("%d0") = __NR_##name; \
405register long __a __asm__ ("%d1") = (long)(a); \
406register long __b __asm__ ("%d2") = (long)(b); \
407register long __c __asm__ ("%d3") = (long)(c); \
408register long __d __asm__ ("%d4") = (long)(d); \
409register long __e __asm__ ("%d5") = (long)(e); \
410__asm__ __volatile__ ("trap #0" \
411 : "+d" (__res) \
412 : "d" (__a), "d" (__b), \
413 "d" (__c), "d" (__d), "d" (__e) \
414 ); \
415__syscall_return(type,__res); \
416}
417 320
418#define __ARCH_WANT_IPC_PARSE_VERSION 321#define __ARCH_WANT_IPC_PARSE_VERSION
419#define __ARCH_WANT_OLD_READDIR 322#define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-m68knommu/setup.h b/include/asm-m68knommu/setup.h
index d2b0fcce41b..fb86bb2a607 100644
--- a/include/asm-m68knommu/setup.h
+++ b/include/asm-m68knommu/setup.h
@@ -1,5 +1,10 @@
1#ifdef __KERNEL__
2
1#include <asm-m68k/setup.h> 3#include <asm-m68k/setup.h>
2 4
3/* We have a bigger command line buffer. */ 5/* We have a bigger command line buffer. */
4#undef COMMAND_LINE_SIZE 6#undef COMMAND_LINE_SIZE
7
8#endif /* __KERNEL__ */
9
5#define COMMAND_LINE_SIZE 512 10#define COMMAND_LINE_SIZE 512
diff --git a/include/asm-m68knommu/unistd.h b/include/asm-m68knommu/unistd.h
index ebaf0319711..82e03195f32 100644
--- a/include/asm-m68knommu/unistd.h
+++ b/include/asm-m68knommu/unistd.h
@@ -318,156 +318,6 @@
318#ifdef __KERNEL__ 318#ifdef __KERNEL__
319 319
320#define NR_syscalls 311 320#define NR_syscalls 311
321#include <linux/err.h>
322
323/* user-visible error numbers are in the range -1 - -MAX_ERRNO: see
324 <asm-m68k/errno.h> */
325
326#define __syscall_return(type, res) \
327do { \
328 if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
329 /* avoid using res which is declared to be in register d0; \
330 errno might expand to a function call and clobber it. */ \
331 int __err = -(res); \
332 errno = __err; \
333 res = -1; \
334 } \
335 return (type) (res); \
336} while (0)
337
338#define _syscall0(type, name) \
339type name(void) \
340{ \
341 long __res; \
342 __asm__ __volatile__ ("movel %1, %%d0\n\t" \
343 "trap #0\n\t" \
344 "movel %%d0, %0" \
345 : "=g" (__res) \
346 : "i" (__NR_##name) \
347 : "cc", "%d0"); \
348 if ((unsigned long)(__res) >= (unsigned long)(-125)) { \
349 errno = -__res; \
350 __res = -1; \
351 } \
352 return (type)__res; \
353}
354
355#define _syscall1(type, name, atype, a) \
356type name(atype a) \
357{ \
358 long __res; \
359 __asm__ __volatile__ ("movel %2, %%d1\n\t" \
360 "movel %1, %%d0\n\t" \
361 "trap #0\n\t" \
362 "movel %%d0, %0" \
363 : "=g" (__res) \
364 : "i" (__NR_##name), \
365 "g" ((long)a) \
366 : "cc", "%d0", "%d1"); \
367 if ((unsigned long)(__res) >= (unsigned long)(-125)) { \
368 errno = -__res; \
369 __res = -1; \
370 } \
371 return (type)__res; \
372}
373
374#define _syscall2(type, name, atype, a, btype, b) \
375type name(atype a, btype b) \
376{ \
377 long __res; \
378 __asm__ __volatile__ ("movel %3, %%d2\n\t" \
379 "movel %2, %%d1\n\t" \
380 "movel %1, %%d0\n\t" \
381 "trap #0\n\t" \
382 "movel %%d0, %0" \
383 : "=g" (__res) \
384 : "i" (__NR_##name), \
385 "a" ((long)a), \
386 "g" ((long)b) \
387 : "cc", "%d0", "%d1", "%d2"); \
388 if ((unsigned long)(__res) >= (unsigned long)(-125)) { \
389 errno = -__res; \
390 __res = -1; \
391 } \
392 return (type)__res; \
393}
394
395#define _syscall3(type, name, atype, a, btype, b, ctype, c) \
396type name(atype a, btype b, ctype c) \
397{ \
398 long __res; \
399 __asm__ __volatile__ ("movel %4, %%d3\n\t" \
400 "movel %3, %%d2\n\t" \
401 "movel %2, %%d1\n\t" \
402 "movel %1, %%d0\n\t" \
403 "trap #0\n\t" \
404 "movel %%d0, %0" \
405 : "=g" (__res) \
406 : "i" (__NR_##name), \
407 "a" ((long)a), \
408 "a" ((long)b), \
409 "g" ((long)c) \
410 : "cc", "%d0", "%d1", "%d2", "%d3"); \
411 if ((unsigned long)(__res) >= (unsigned long)(-125)) { \
412 errno = -__res; \
413 __res = -1; \
414 } \
415 return (type)__res; \
416}
417
418#define _syscall4(type, name, atype, a, btype, b, ctype, c, dtype, d) \
419type name(atype a, btype b, ctype c, dtype d) \
420{ \
421 long __res; \
422 __asm__ __volatile__ ("movel %5, %%d4\n\t" \
423 "movel %4, %%d3\n\t" \
424 "movel %3, %%d2\n\t" \
425 "movel %2, %%d1\n\t" \
426 "movel %1, %%d0\n\t" \
427 "trap #0\n\t" \
428 "movel %%d0, %0" \
429 : "=g" (__res) \
430 : "i" (__NR_##name), \
431 "a" ((long)a), \
432 "a" ((long)b), \
433 "a" ((long)c), \
434 "g" ((long)d) \
435 : "cc", "%d0", "%d1", "%d2", "%d3", \
436 "%d4"); \
437 if ((unsigned long)(__res) >= (unsigned long)(-125)) { \
438 errno = -__res; \
439 __res = -1; \
440 } \
441 return (type)__res; \
442}
443
444#define _syscall5(type, name, atype, a, btype, b, ctype, c, dtype, d, etype, e) \
445type name(atype a, btype b, ctype c, dtype d, etype e) \
446{ \
447 long __res; \
448 __asm__ __volatile__ ("movel %6, %%d5\n\t" \
449 "movel %5, %%d4\n\t" \
450 "movel %4, %%d3\n\t" \
451 "movel %3, %%d2\n\t" \
452 "movel %2, %%d1\n\t" \
453 "movel %1, %%d0\n\t" \
454 "trap #0\n\t" \
455 "movel %%d0, %0" \
456 : "=g" (__res) \
457 : "i" (__NR_##name), \
458 "a" ((long)a), \
459 "a" ((long)b), \
460 "a" ((long)c), \
461 "a" ((long)d), \
462 "g" ((long)e) \
463 : "cc", "%d0", "%d1", "%d2", "%d3", \
464 "%d4", "%d5"); \
465 if ((unsigned long)(__res) >= (unsigned long)(-125)) { \
466 errno = -__res; \
467 __res = -1; \
468 } \
469 return (type)__res; \
470}
471 321
472#define __ARCH_WANT_IPC_PARSE_VERSION 322#define __ARCH_WANT_IPC_PARSE_VERSION
473#define __ARCH_WANT_OLD_READDIR 323#define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-mips/dma-mapping.h b/include/asm-mips/dma-mapping.h
index 43288634c38..236d1a467cc 100644
--- a/include/asm-mips/dma-mapping.h
+++ b/include/asm-mips/dma-mapping.h
@@ -63,9 +63,9 @@ dma_get_cache_alignment(void)
63 return 128; 63 return 128;
64} 64}
65 65
66extern int dma_is_consistent(dma_addr_t dma_addr); 66extern int dma_is_consistent(struct device *dev, dma_addr_t dma_addr);
67 67
68extern void dma_cache_sync(void *vaddr, size_t size, 68extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
69 enum dma_data_direction direction); 69 enum dma_data_direction direction);
70 70
71#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY 71#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
diff --git a/include/asm-mips/futex.h b/include/asm-mips/futex.h
index 927a216bd53..47e5679c235 100644
--- a/include/asm-mips/futex.h
+++ b/include/asm-mips/futex.h
@@ -88,7 +88,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
88 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) 88 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
89 return -EFAULT; 89 return -EFAULT;
90 90
91 inc_preempt_count(); 91 pagefault_disable();
92 92
93 switch (op) { 93 switch (op) {
94 case FUTEX_OP_SET: 94 case FUTEX_OP_SET:
@@ -115,7 +115,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
115 ret = -ENOSYS; 115 ret = -ENOSYS;
116 } 116 }
117 117
118 dec_preempt_count(); 118 pagefault_enable();
119 119
120 if (!ret) { 120 if (!ret) {
121 switch (cmp) { 121 switch (cmp) {
diff --git a/include/asm-mips/highmem.h b/include/asm-mips/highmem.h
index c976bfaaba8..f8c8182f7f2 100644
--- a/include/asm-mips/highmem.h
+++ b/include/asm-mips/highmem.h
@@ -21,6 +21,7 @@
21 21
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/uaccess.h>
24#include <asm/kmap_types.h> 25#include <asm/kmap_types.h>
25 26
26/* undef for production */ 27/* undef for production */
@@ -70,11 +71,16 @@ static inline void *kmap(struct page *page)
70 71
71static inline void *kmap_atomic(struct page *page, enum km_type type) 72static inline void *kmap_atomic(struct page *page, enum km_type type)
72{ 73{
74 pagefault_disable();
73 return page_address(page); 75 return page_address(page);
74} 76}
75 77
76static inline void kunmap_atomic(void *kvaddr, enum km_type type) { } 78static inline void kunmap_atomic(void *kvaddr, enum km_type type)
77#define kmap_atomic_pfn(pfn, idx) page_address(pfn_to_page(pfn)) 79{
80 pagefault_enable();
81}
82
83#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx))
78 84
79#define kmap_atomic_to_page(ptr) virt_to_page(ptr) 85#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
80 86
diff --git a/include/asm-mips/setup.h b/include/asm-mips/setup.h
index 737fa4a6912..70009a90263 100644
--- a/include/asm-mips/setup.h
+++ b/include/asm-mips/setup.h
@@ -1,8 +1,6 @@
1#ifdef __KERNEL__
2#ifndef _MIPS_SETUP_H 1#ifndef _MIPS_SETUP_H
3#define _MIPS_SETUP_H 2#define _MIPS_SETUP_H
4 3
5#define COMMAND_LINE_SIZE 256 4#define COMMAND_LINE_SIZE 256
6 5
7#endif /* __SETUP_H */ 6#endif /* __SETUP_H */
8#endif /* __KERNEL__ */
diff --git a/include/asm-mips/unistd.h b/include/asm-mips/unistd.h
index ec56aa52f66..696cff39a1d 100644
--- a/include/asm-mips/unistd.h
+++ b/include/asm-mips/unistd.h
@@ -933,268 +933,6 @@
933 933
934#ifndef __ASSEMBLY__ 934#ifndef __ASSEMBLY__
935 935
936/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
937#define _syscall0(type,name) \
938type name(void) \
939{ \
940 register unsigned long __a3 asm("$7"); \
941 unsigned long __v0; \
942 \
943 __asm__ volatile ( \
944 ".set\tnoreorder\n\t" \
945 "li\t$2, %2\t\t\t# " #name "\n\t" \
946 "syscall\n\t" \
947 "move\t%0, $2\n\t" \
948 ".set\treorder" \
949 : "=&r" (__v0), "=r" (__a3) \
950 : "i" (__NR_##name) \
951 : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
952 "memory"); \
953 \
954 if (__a3 == 0) \
955 return (type) __v0; \
956 errno = __v0; \
957 return (type) -1; \
958}
959
960/*
961 * DANGER: This macro isn't usable for the pipe(2) call
962 * which has a unusual return convention.
963 */
964#define _syscall1(type,name,atype,a) \
965type name(atype a) \
966{ \
967 register unsigned long __a0 asm("$4") = (unsigned long) a; \
968 register unsigned long __a3 asm("$7"); \
969 unsigned long __v0; \
970 \
971 __asm__ volatile ( \
972 ".set\tnoreorder\n\t" \
973 "li\t$2, %3\t\t\t# " #name "\n\t" \
974 "syscall\n\t" \
975 "move\t%0, $2\n\t" \
976 ".set\treorder" \
977 : "=&r" (__v0), "=r" (__a3) \
978 : "r" (__a0), "i" (__NR_##name) \
979 : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
980 "memory"); \
981 \
982 if (__a3 == 0) \
983 return (type) __v0; \
984 errno = __v0; \
985 return (type) -1; \
986}
987
988#define _syscall2(type,name,atype,a,btype,b) \
989type name(atype a, btype b) \
990{ \
991 register unsigned long __a0 asm("$4") = (unsigned long) a; \
992 register unsigned long __a1 asm("$5") = (unsigned long) b; \
993 register unsigned long __a3 asm("$7"); \
994 unsigned long __v0; \
995 \
996 __asm__ volatile ( \
997 ".set\tnoreorder\n\t" \
998 "li\t$2, %4\t\t\t# " #name "\n\t" \
999 "syscall\n\t" \
1000 "move\t%0, $2\n\t" \
1001 ".set\treorder" \
1002 : "=&r" (__v0), "=r" (__a3) \
1003 : "r" (__a0), "r" (__a1), "i" (__NR_##name) \
1004 : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
1005 "memory"); \
1006 \
1007 if (__a3 == 0) \
1008 return (type) __v0; \
1009 errno = __v0; \
1010 return (type) -1; \
1011}
1012
1013#define _syscall3(type,name,atype,a,btype,b,ctype,c) \
1014type name(atype a, btype b, ctype c) \
1015{ \
1016 register unsigned long __a0 asm("$4") = (unsigned long) a; \
1017 register unsigned long __a1 asm("$5") = (unsigned long) b; \
1018 register unsigned long __a2 asm("$6") = (unsigned long) c; \
1019 register unsigned long __a3 asm("$7"); \
1020 unsigned long __v0; \
1021 \
1022 __asm__ volatile ( \
1023 ".set\tnoreorder\n\t" \
1024 "li\t$2, %5\t\t\t# " #name "\n\t" \
1025 "syscall\n\t" \
1026 "move\t%0, $2\n\t" \
1027 ".set\treorder" \
1028 : "=&r" (__v0), "=r" (__a3) \
1029 : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name) \
1030 : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
1031 "memory"); \
1032 \
1033 if (__a3 == 0) \
1034 return (type) __v0; \
1035 errno = __v0; \
1036 return (type) -1; \
1037}
1038
1039#define _syscall4(type,name,atype,a,btype,b,ctype,c,dtype,d) \
1040type name(atype a, btype b, ctype c, dtype d) \
1041{ \
1042 register unsigned long __a0 asm("$4") = (unsigned long) a; \
1043 register unsigned long __a1 asm("$5") = (unsigned long) b; \
1044 register unsigned long __a2 asm("$6") = (unsigned long) c; \
1045 register unsigned long __a3 asm("$7") = (unsigned long) d; \
1046 unsigned long __v0; \
1047 \
1048 __asm__ volatile ( \
1049 ".set\tnoreorder\n\t" \
1050 "li\t$2, %5\t\t\t# " #name "\n\t" \
1051 "syscall\n\t" \
1052 "move\t%0, $2\n\t" \
1053 ".set\treorder" \
1054 : "=&r" (__v0), "+r" (__a3) \
1055 : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name) \
1056 : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
1057 "memory"); \
1058 \
1059 if (__a3 == 0) \
1060 return (type) __v0; \
1061 errno = __v0; \
1062 return (type) -1; \
1063}
1064
1065#if (_MIPS_SIM == _MIPS_SIM_ABI32)
1066
1067/*
1068 * Using those means your brain needs more than an oil change ;-)
1069 */
1070
1071#define _syscall5(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e) \
1072type name(atype a, btype b, ctype c, dtype d, etype e) \
1073{ \
1074 register unsigned long __a0 asm("$4") = (unsigned long) a; \
1075 register unsigned long __a1 asm("$5") = (unsigned long) b; \
1076 register unsigned long __a2 asm("$6") = (unsigned long) c; \
1077 register unsigned long __a3 asm("$7") = (unsigned long) d; \
1078 unsigned long __v0; \
1079 \
1080 __asm__ volatile ( \
1081 ".set\tnoreorder\n\t" \
1082 "lw\t$2, %6\n\t" \
1083 "subu\t$29, 32\n\t" \
1084 "sw\t$2, 16($29)\n\t" \
1085 "li\t$2, %5\t\t\t# " #name "\n\t" \
1086 "syscall\n\t" \
1087 "move\t%0, $2\n\t" \
1088 "addiu\t$29, 32\n\t" \
1089 ".set\treorder" \
1090 : "=&r" (__v0), "+r" (__a3) \
1091 : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name), \
1092 "m" ((unsigned long)e) \
1093 : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
1094 "memory"); \
1095 \
1096 if (__a3 == 0) \
1097 return (type) __v0; \
1098 errno = __v0; \
1099 return (type) -1; \
1100}
1101
1102#define _syscall6(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e,ftype,f) \
1103type name(atype a, btype b, ctype c, dtype d, etype e, ftype f) \
1104{ \
1105 register unsigned long __a0 asm("$4") = (unsigned long) a; \
1106 register unsigned long __a1 asm("$5") = (unsigned long) b; \
1107 register unsigned long __a2 asm("$6") = (unsigned long) c; \
1108 register unsigned long __a3 asm("$7") = (unsigned long) d; \
1109 unsigned long __v0; \
1110 \
1111 __asm__ volatile ( \
1112 ".set\tnoreorder\n\t" \
1113 "lw\t$2, %6\n\t" \
1114 "lw\t$8, %7\n\t" \
1115 "subu\t$29, 32\n\t" \
1116 "sw\t$2, 16($29)\n\t" \
1117 "sw\t$8, 20($29)\n\t" \
1118 "li\t$2, %5\t\t\t# " #name "\n\t" \
1119 "syscall\n\t" \
1120 "move\t%0, $2\n\t" \
1121 "addiu\t$29, 32\n\t" \
1122 ".set\treorder" \
1123 : "=&r" (__v0), "+r" (__a3) \
1124 : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name), \
1125 "m" ((unsigned long)e), "m" ((unsigned long)f) \
1126 : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
1127 "memory"); \
1128 \
1129 if (__a3 == 0) \
1130 return (type) __v0; \
1131 errno = __v0; \
1132 return (type) -1; \
1133}
1134
1135#endif /* (_MIPS_SIM == _MIPS_SIM_ABI32) */
1136
1137#if (_MIPS_SIM == _MIPS_SIM_NABI32) || (_MIPS_SIM == _MIPS_SIM_ABI64)
1138
1139#define _syscall5(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e) \
1140type name (atype a,btype b,ctype c,dtype d,etype e) \
1141{ \
1142 register unsigned long __a0 asm("$4") = (unsigned long) a; \
1143 register unsigned long __a1 asm("$5") = (unsigned long) b; \
1144 register unsigned long __a2 asm("$6") = (unsigned long) c; \
1145 register unsigned long __a3 asm("$7") = (unsigned long) d; \
1146 register unsigned long __a4 asm("$8") = (unsigned long) e; \
1147 unsigned long __v0; \
1148 \
1149 __asm__ volatile ( \
1150 ".set\tnoreorder\n\t" \
1151 "li\t$2, %6\t\t\t# " #name "\n\t" \
1152 "syscall\n\t" \
1153 "move\t%0, $2\n\t" \
1154 ".set\treorder" \
1155 : "=&r" (__v0), "+r" (__a3) \
1156 : "r" (__a0), "r" (__a1), "r" (__a2), "r" (__a4), "i" (__NR_##name) \
1157 : "$2", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
1158 "memory"); \
1159 \
1160 if (__a3 == 0) \
1161 return (type) __v0; \
1162 errno = __v0; \
1163 return (type) -1; \
1164}
1165
1166#define _syscall6(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e,ftype,f) \
1167type name (atype a,btype b,ctype c,dtype d,etype e,ftype f) \
1168{ \
1169 register unsigned long __a0 asm("$4") = (unsigned long) a; \
1170 register unsigned long __a1 asm("$5") = (unsigned long) b; \
1171 register unsigned long __a2 asm("$6") = (unsigned long) c; \
1172 register unsigned long __a3 asm("$7") = (unsigned long) d; \
1173 register unsigned long __a4 asm("$8") = (unsigned long) e; \
1174 register unsigned long __a5 asm("$9") = (unsigned long) f; \
1175 unsigned long __v0; \
1176 \
1177 __asm__ volatile ( \
1178 ".set\tnoreorder\n\t" \
1179 "li\t$2, %7\t\t\t# " #name "\n\t" \
1180 "syscall\n\t" \
1181 "move\t%0, $2\n\t" \
1182 ".set\treorder" \
1183 : "=&r" (__v0), "+r" (__a3) \
1184 : "r" (__a0), "r" (__a1), "r" (__a2), "r" (__a4), "r" (__a5), \
1185 "i" (__NR_##name) \
1186 : "$2", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
1187 "memory"); \
1188 \
1189 if (__a3 == 0) \
1190 return (type) __v0; \
1191 errno = __v0; \
1192 return (type) -1; \
1193}
1194
1195#endif /* (_MIPS_SIM == _MIPS_SIM_NABI32) || (_MIPS_SIM == _MIPS_SIM_ABI64) */
1196
1197
1198#define __ARCH_OMIT_COMPAT_SYS_GETDENTS64 936#define __ARCH_OMIT_COMPAT_SYS_GETDENTS64
1199#define __ARCH_WANT_IPC_PARSE_VERSION 937#define __ARCH_WANT_IPC_PARSE_VERSION
1200#define __ARCH_WANT_OLD_READDIR 938#define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-parisc/dma-mapping.h b/include/asm-parisc/dma-mapping.h
index 1e387e1dad3..66f0b408c66 100644
--- a/include/asm-parisc/dma-mapping.h
+++ b/include/asm-parisc/dma-mapping.h
@@ -191,13 +191,13 @@ dma_get_cache_alignment(void)
191} 191}
192 192
193static inline int 193static inline int
194dma_is_consistent(dma_addr_t dma_addr) 194dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
195{ 195{
196 return (hppa_dma_ops->dma_sync_single_for_cpu == NULL); 196 return (hppa_dma_ops->dma_sync_single_for_cpu == NULL);
197} 197}
198 198
199static inline void 199static inline void
200dma_cache_sync(void *vaddr, size_t size, 200dma_cache_sync(struct device *dev, void *vaddr, size_t size,
201 enum dma_data_direction direction) 201 enum dma_data_direction direction)
202{ 202{
203 if(hppa_dma_ops->dma_sync_single_for_cpu) 203 if(hppa_dma_ops->dma_sync_single_for_cpu)
diff --git a/include/asm-parisc/futex.h b/include/asm-parisc/futex.h
index d84bbb283fd..dbee6e60aa8 100644
--- a/include/asm-parisc/futex.h
+++ b/include/asm-parisc/futex.h
@@ -21,7 +21,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) 21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
22 return -EFAULT; 22 return -EFAULT;
23 23
24 inc_preempt_count(); 24 pagefault_disable();
25 25
26 switch (op) { 26 switch (op) {
27 case FUTEX_OP_SET: 27 case FUTEX_OP_SET:
@@ -33,7 +33,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
33 ret = -ENOSYS; 33 ret = -ENOSYS;
34 } 34 }
35 35
36 dec_preempt_count(); 36 pagefault_enable();
37 37
38 if (!ret) { 38 if (!ret) {
39 switch (cmp) { 39 switch (cmp) {
diff --git a/include/asm-powerpc/dma-mapping.h b/include/asm-powerpc/dma-mapping.h
index 7e38b5fddad..7c7de87bd8a 100644
--- a/include/asm-powerpc/dma-mapping.h
+++ b/include/asm-powerpc/dma-mapping.h
@@ -342,9 +342,9 @@ static inline int dma_mapping_error(dma_addr_t dma_addr)
342#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 342#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
343#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 343#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
344#ifdef CONFIG_NOT_COHERENT_CACHE 344#ifdef CONFIG_NOT_COHERENT_CACHE
345#define dma_is_consistent(d) (0) 345#define dma_is_consistent(d, h) (0)
346#else 346#else
347#define dma_is_consistent(d) (1) 347#define dma_is_consistent(d, h) (1)
348#endif 348#endif
349 349
350static inline int dma_get_cache_alignment(void) 350static inline int dma_get_cache_alignment(void)
@@ -378,7 +378,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
378 dma_sync_single_for_device(dev, dma_handle, offset + size, direction); 378 dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
379} 379}
380 380
381static inline void dma_cache_sync(void *vaddr, size_t size, 381static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
382 enum dma_data_direction direction) 382 enum dma_data_direction direction)
383{ 383{
384 BUG_ON(direction == DMA_NONE); 384 BUG_ON(direction == DMA_NONE);
diff --git a/include/asm-powerpc/elf.h b/include/asm-powerpc/elf.h
index b5436642a10..d36426c01b6 100644
--- a/include/asm-powerpc/elf.h
+++ b/include/asm-powerpc/elf.h
@@ -124,12 +124,10 @@ typedef elf_greg_t32 elf_gregset_t32[ELF_NGREG];
124# define ELF_DATA ELFDATA2MSB 124# define ELF_DATA ELFDATA2MSB
125 typedef elf_greg_t64 elf_greg_t; 125 typedef elf_greg_t64 elf_greg_t;
126 typedef elf_gregset_t64 elf_gregset_t; 126 typedef elf_gregset_t64 elf_gregset_t;
127# define elf_addr_t unsigned long
128#else 127#else
129 /* Assumption: ELF_ARCH == EM_PPC and ELF_CLASS == ELFCLASS32 */ 128 /* Assumption: ELF_ARCH == EM_PPC and ELF_CLASS == ELFCLASS32 */
130 typedef elf_greg_t32 elf_greg_t; 129 typedef elf_greg_t32 elf_greg_t;
131 typedef elf_gregset_t32 elf_gregset_t; 130 typedef elf_gregset_t32 elf_gregset_t;
132# define elf_addr_t __u32
133#endif /* ELF_ARCH */ 131#endif /* ELF_ARCH */
134 132
135/* Floating point registers */ 133/* Floating point registers */
diff --git a/include/asm-powerpc/futex.h b/include/asm-powerpc/futex.h
index 936422e5489..3f3673fd3ff 100644
--- a/include/asm-powerpc/futex.h
+++ b/include/asm-powerpc/futex.h
@@ -43,7 +43,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
43 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) 43 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
44 return -EFAULT; 44 return -EFAULT;
45 45
46 inc_preempt_count(); 46 pagefault_disable();
47 47
48 switch (op) { 48 switch (op) {
49 case FUTEX_OP_SET: 49 case FUTEX_OP_SET:
@@ -65,7 +65,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
65 ret = -ENOSYS; 65 ret = -ENOSYS;
66 } 66 }
67 67
68 dec_preempt_count(); 68 pagefault_enable();
69 69
70 if (!ret) { 70 if (!ret) {
71 switch (cmp) { 71 switch (cmp) {
diff --git a/include/asm-powerpc/pgalloc.h b/include/asm-powerpc/pgalloc.h
index ae63db7b3e7..b0830db68f8 100644
--- a/include/asm-powerpc/pgalloc.h
+++ b/include/asm-powerpc/pgalloc.h
@@ -11,7 +11,7 @@
11#include <linux/cpumask.h> 11#include <linux/cpumask.h>
12#include <linux/percpu.h> 12#include <linux/percpu.h>
13 13
14extern kmem_cache_t *pgtable_cache[]; 14extern struct kmem_cache *pgtable_cache[];
15 15
16#ifdef CONFIG_PPC_64K_PAGES 16#ifdef CONFIG_PPC_64K_PAGES
17#define PTE_CACHE_NUM 0 17#define PTE_CACHE_NUM 0
diff --git a/include/asm-powerpc/setup.h b/include/asm-powerpc/setup.h
index 3d9740aae01..817fac0a071 100644
--- a/include/asm-powerpc/setup.h
+++ b/include/asm-powerpc/setup.h
@@ -1,9 +1,6 @@
1#ifndef _ASM_POWERPC_SETUP_H 1#ifndef _ASM_POWERPC_SETUP_H
2#define _ASM_POWERPC_SETUP_H 2#define _ASM_POWERPC_SETUP_H
3 3
4#ifdef __KERNEL__
5
6#define COMMAND_LINE_SIZE 512 4#define COMMAND_LINE_SIZE 512
7 5
8#endif /* __KERNEL__ */
9#endif /* _ASM_POWERPC_SETUP_H */ 6#endif /* _ASM_POWERPC_SETUP_H */
diff --git a/include/asm-powerpc/unistd.h b/include/asm-powerpc/unistd.h
index 04b6c17cc59..0ae954e3d25 100644
--- a/include/asm-powerpc/unistd.h
+++ b/include/asm-powerpc/unistd.h
@@ -334,115 +334,6 @@
334 334
335#ifndef __ASSEMBLY__ 335#ifndef __ASSEMBLY__
336 336
337/* On powerpc a system call basically clobbers the same registers like a
338 * function call, with the exception of LR (which is needed for the
339 * "sc; bnslr" sequence) and CR (where only CR0.SO is clobbered to signal
340 * an error return status).
341 */
342
343#define __syscall_nr(nr, type, name, args...) \
344 unsigned long __sc_ret, __sc_err; \
345 { \
346 register unsigned long __sc_0 __asm__ ("r0"); \
347 register unsigned long __sc_3 __asm__ ("r3"); \
348 register unsigned long __sc_4 __asm__ ("r4"); \
349 register unsigned long __sc_5 __asm__ ("r5"); \
350 register unsigned long __sc_6 __asm__ ("r6"); \
351 register unsigned long __sc_7 __asm__ ("r7"); \
352 register unsigned long __sc_8 __asm__ ("r8"); \
353 \
354 __sc_loadargs_##nr(name, args); \
355 __asm__ __volatile__ \
356 ("sc \n\t" \
357 "mfcr %0 " \
358 : "=&r" (__sc_0), \
359 "=&r" (__sc_3), "=&r" (__sc_4), \
360 "=&r" (__sc_5), "=&r" (__sc_6), \
361 "=&r" (__sc_7), "=&r" (__sc_8) \
362 : __sc_asm_input_##nr \
363 : "cr0", "ctr", "memory", \
364 "r9", "r10","r11", "r12"); \
365 __sc_ret = __sc_3; \
366 __sc_err = __sc_0; \
367 } \
368 if (__sc_err & 0x10000000) \
369 { \
370 errno = __sc_ret; \
371 __sc_ret = -1; \
372 } \
373 return (type) __sc_ret
374
375#define __sc_loadargs_0(name, dummy...) \
376 __sc_0 = __NR_##name
377#define __sc_loadargs_1(name, arg1) \
378 __sc_loadargs_0(name); \
379 __sc_3 = (unsigned long) (arg1)
380#define __sc_loadargs_2(name, arg1, arg2) \
381 __sc_loadargs_1(name, arg1); \
382 __sc_4 = (unsigned long) (arg2)
383#define __sc_loadargs_3(name, arg1, arg2, arg3) \
384 __sc_loadargs_2(name, arg1, arg2); \
385 __sc_5 = (unsigned long) (arg3)
386#define __sc_loadargs_4(name, arg1, arg2, arg3, arg4) \
387 __sc_loadargs_3(name, arg1, arg2, arg3); \
388 __sc_6 = (unsigned long) (arg4)
389#define __sc_loadargs_5(name, arg1, arg2, arg3, arg4, arg5) \
390 __sc_loadargs_4(name, arg1, arg2, arg3, arg4); \
391 __sc_7 = (unsigned long) (arg5)
392#define __sc_loadargs_6(name, arg1, arg2, arg3, arg4, arg5, arg6) \
393 __sc_loadargs_5(name, arg1, arg2, arg3, arg4, arg5); \
394 __sc_8 = (unsigned long) (arg6)
395
396#define __sc_asm_input_0 "0" (__sc_0)
397#define __sc_asm_input_1 __sc_asm_input_0, "1" (__sc_3)
398#define __sc_asm_input_2 __sc_asm_input_1, "2" (__sc_4)
399#define __sc_asm_input_3 __sc_asm_input_2, "3" (__sc_5)
400#define __sc_asm_input_4 __sc_asm_input_3, "4" (__sc_6)
401#define __sc_asm_input_5 __sc_asm_input_4, "5" (__sc_7)
402#define __sc_asm_input_6 __sc_asm_input_5, "6" (__sc_8)
403
404#define _syscall0(type,name) \
405type name(void) \
406{ \
407 __syscall_nr(0, type, name); \
408}
409
410#define _syscall1(type,name,type1,arg1) \
411type name(type1 arg1) \
412{ \
413 __syscall_nr(1, type, name, arg1); \
414}
415
416#define _syscall2(type,name,type1,arg1,type2,arg2) \
417type name(type1 arg1, type2 arg2) \
418{ \
419 __syscall_nr(2, type, name, arg1, arg2); \
420}
421
422#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
423type name(type1 arg1, type2 arg2, type3 arg3) \
424{ \
425 __syscall_nr(3, type, name, arg1, arg2, arg3); \
426}
427
428#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
429type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
430{ \
431 __syscall_nr(4, type, name, arg1, arg2, arg3, arg4); \
432}
433
434#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
435type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
436{ \
437 __syscall_nr(5, type, name, arg1, arg2, arg3, arg4, arg5); \
438}
439#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \
440type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
441{ \
442 __syscall_nr(6, type, name, arg1, arg2, arg3, arg4, arg5, arg6); \
443}
444
445
446#include <linux/types.h> 337#include <linux/types.h>
447#include <linux/compiler.h> 338#include <linux/compiler.h>
448#include <linux/linkage.h> 339#include <linux/linkage.h>
diff --git a/include/asm-ppc/highmem.h b/include/asm-ppc/highmem.h
index 1d2c4ef81c2..f7b21ee302b 100644
--- a/include/asm-ppc/highmem.h
+++ b/include/asm-ppc/highmem.h
@@ -79,7 +79,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
79 unsigned long vaddr; 79 unsigned long vaddr;
80 80
81 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 81 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
82 inc_preempt_count(); 82 pagefault_disable();
83 if (!PageHighMem(page)) 83 if (!PageHighMem(page))
84 return page_address(page); 84 return page_address(page);
85 85
@@ -101,8 +101,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
101 unsigned int idx = type + KM_TYPE_NR*smp_processor_id(); 101 unsigned int idx = type + KM_TYPE_NR*smp_processor_id();
102 102
103 if (vaddr < KMAP_FIX_BEGIN) { // FIXME 103 if (vaddr < KMAP_FIX_BEGIN) { // FIXME
104 dec_preempt_count(); 104 pagefault_enable();
105 preempt_check_resched();
106 return; 105 return;
107 } 106 }
108 107
@@ -115,8 +114,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
115 pte_clear(&init_mm, vaddr, kmap_pte+idx); 114 pte_clear(&init_mm, vaddr, kmap_pte+idx);
116 flush_tlb_page(NULL, vaddr); 115 flush_tlb_page(NULL, vaddr);
117#endif 116#endif
118 dec_preempt_count(); 117 pagefault_enable();
119 preempt_check_resched();
120} 118}
121 119
122static inline struct page *kmap_atomic_to_page(void *ptr) 120static inline struct page *kmap_atomic_to_page(void *ptr)
diff --git a/include/asm-s390/setup.h b/include/asm-s390/setup.h
index 7664bacdd83..9574fe80a04 100644
--- a/include/asm-s390/setup.h
+++ b/include/asm-s390/setup.h
@@ -8,12 +8,13 @@
8#ifndef _ASM_S390_SETUP_H 8#ifndef _ASM_S390_SETUP_H
9#define _ASM_S390_SETUP_H 9#define _ASM_S390_SETUP_H
10 10
11#define COMMAND_LINE_SIZE 896
12
11#ifdef __KERNEL__ 13#ifdef __KERNEL__
12 14
13#include <asm/types.h> 15#include <asm/types.h>
14 16
15#define PARMAREA 0x10400 17#define PARMAREA 0x10400
16#define COMMAND_LINE_SIZE 896
17#define MEMORY_CHUNKS 16 /* max 0x7fff */ 18#define MEMORY_CHUNKS 16 /* max 0x7fff */
18#define IPL_PARMBLOCK_ORIGIN 0x2000 19#define IPL_PARMBLOCK_ORIGIN 0x2000
19 20
diff --git a/include/asm-s390/unistd.h b/include/asm-s390/unistd.h
index 71d3c21b84f..fb6fef97d73 100644
--- a/include/asm-s390/unistd.h
+++ b/include/asm-s390/unistd.h
@@ -345,160 +345,6 @@
345 345
346#ifdef __KERNEL__ 346#ifdef __KERNEL__
347 347
348#include <linux/err.h>
349
350#define __syscall_return(type, res) \
351do { \
352 if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
353 errno = -(res); \
354 res = -1; \
355 } \
356 return (type) (res); \
357} while (0)
358
359#define _svc_clobber "1", "cc", "memory"
360
361#define _syscall0(type,name) \
362type name(void) { \
363 register long __svcres asm("2"); \
364 long __res; \
365 asm volatile( \
366 " .if %1 < 256\n" \
367 " svc %b1\n" \
368 " .else\n" \
369 " la %%r1,%1\n" \
370 " svc 0\n" \
371 " .endif" \
372 : "=d" (__svcres) \
373 : "i" (__NR_##name) \
374 : _svc_clobber); \
375 __res = __svcres; \
376 __syscall_return(type,__res); \
377}
378
379#define _syscall1(type,name,type1,arg1) \
380type name(type1 arg1) { \
381 register type1 __arg1 asm("2") = arg1; \
382 register long __svcres asm("2"); \
383 long __res; \
384 asm volatile( \
385 " .if %1 < 256\n" \
386 " svc %b1\n" \
387 " .else\n" \
388 " la %%r1,%1\n" \
389 " svc 0\n" \
390 " .endif" \
391 : "=d" (__svcres) \
392 : "i" (__NR_##name), \
393 "0" (__arg1) \
394 : _svc_clobber); \
395 __res = __svcres; \
396 __syscall_return(type,__res); \
397}
398
399#define _syscall2(type,name,type1,arg1,type2,arg2) \
400type name(type1 arg1, type2 arg2) { \
401 register type1 __arg1 asm("2") = arg1; \
402 register type2 __arg2 asm("3") = arg2; \
403 register long __svcres asm("2"); \
404 long __res; \
405 asm volatile( \
406 " .if %1 < 256\n" \
407 " svc %b1\n" \
408 " .else\n" \
409 " la %%r1,%1\n" \
410 " svc 0\n" \
411 " .endif" \
412 : "=d" (__svcres) \
413 : "i" (__NR_##name), \
414 "0" (__arg1), \
415 "d" (__arg2) \
416 : _svc_clobber ); \
417 __res = __svcres; \
418 __syscall_return(type,__res); \
419}
420
421#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
422type name(type1 arg1, type2 arg2, type3 arg3) { \
423 register type1 __arg1 asm("2") = arg1; \
424 register type2 __arg2 asm("3") = arg2; \
425 register type3 __arg3 asm("4") = arg3; \
426 register long __svcres asm("2"); \
427 long __res; \
428 asm volatile( \
429 " .if %1 < 256\n" \
430 " svc %b1\n" \
431 " .else\n" \
432 " la %%r1,%1\n" \
433 " svc 0\n" \
434 " .endif" \
435 : "=d" (__svcres) \
436 : "i" (__NR_##name), \
437 "0" (__arg1), \
438 "d" (__arg2), \
439 "d" (__arg3) \
440 : _svc_clobber); \
441 __res = __svcres; \
442 __syscall_return(type,__res); \
443}
444
445#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3, \
446 type4,name4) \
447type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \
448 register type1 __arg1 asm("2") = arg1; \
449 register type2 __arg2 asm("3") = arg2; \
450 register type3 __arg3 asm("4") = arg3; \
451 register type4 __arg4 asm("5") = arg4; \
452 register long __svcres asm("2"); \
453 long __res; \
454 asm volatile( \
455 " .if %1 < 256\n" \
456 " svc %b1\n" \
457 " .else\n" \
458 " la %%r1,%1\n" \
459 " svc 0\n" \
460 " .endif" \
461 : "=d" (__svcres) \
462 : "i" (__NR_##name), \
463 "0" (__arg1), \
464 "d" (__arg2), \
465 "d" (__arg3), \
466 "d" (__arg4) \
467 : _svc_clobber); \
468 __res = __svcres; \
469 __syscall_return(type,__res); \
470}
471
472#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3, \
473 type4,name4,type5,name5) \
474type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
475 type5 arg5) { \
476 register type1 __arg1 asm("2") = arg1; \
477 register type2 __arg2 asm("3") = arg2; \
478 register type3 __arg3 asm("4") = arg3; \
479 register type4 __arg4 asm("5") = arg4; \
480 register type5 __arg5 asm("6") = arg5; \
481 register long __svcres asm("2"); \
482 long __res; \
483 asm volatile( \
484 " .if %1 < 256\n" \
485 " svc %b1\n" \
486 " .else\n" \
487 " la %%r1,%1\n" \
488 " svc 0\n" \
489 " .endif" \
490 : "=d" (__svcres) \
491 : "i" (__NR_##name), \
492 "0" (__arg1), \
493 "d" (__arg2), \
494 "d" (__arg3), \
495 "d" (__arg4), \
496 "d" (__arg5) \
497 : _svc_clobber); \
498 __res = __svcres; \
499 __syscall_return(type,__res); \
500}
501
502#define __ARCH_WANT_IPC_PARSE_VERSION 348#define __ARCH_WANT_IPC_PARSE_VERSION
503#define __ARCH_WANT_OLD_READDIR 349#define __ARCH_WANT_OLD_READDIR
504#define __ARCH_WANT_SYS_ALARM 350#define __ARCH_WANT_SYS_ALARM
diff --git a/include/asm-sh/dma-mapping.h b/include/asm-sh/dma-mapping.h
index 56cd4b97723..37ab0c131a4 100644
--- a/include/asm-sh/dma-mapping.h
+++ b/include/asm-sh/dma-mapping.h
@@ -53,7 +53,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
53 consistent_free(vaddr, size); 53 consistent_free(vaddr, size);
54} 54}
55 55
56static inline void dma_cache_sync(void *vaddr, size_t size, 56static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
57 enum dma_data_direction dir) 57 enum dma_data_direction dir)
58{ 58{
59 consistent_sync(vaddr, size, (int)dir); 59 consistent_sync(vaddr, size, (int)dir);
diff --git a/include/asm-sh/setup.h b/include/asm-sh/setup.h
index 34ca8a7f06b..1583c6b7bda 100644
--- a/include/asm-sh/setup.h
+++ b/include/asm-sh/setup.h
@@ -1,10 +1,12 @@
1#ifdef __KERNEL__
2#ifndef _SH_SETUP_H 1#ifndef _SH_SETUP_H
3#define _SH_SETUP_H 2#define _SH_SETUP_H
4 3
5#define COMMAND_LINE_SIZE 256 4#define COMMAND_LINE_SIZE 256
6 5
6#ifdef __KERNEL__
7
7int setup_early_printk(char *); 8int setup_early_printk(char *);
8 9
9#endif /* _SH_SETUP_H */
10#endif /* __KERNEL__ */ 10#endif /* __KERNEL__ */
11
12#endif /* _SH_SETUP_H */
diff --git a/include/asm-sh/unistd.h b/include/asm-sh/unistd.h
index 0cae1d24876..f982073dc6c 100644
--- a/include/asm-sh/unistd.h
+++ b/include/asm-sh/unistd.h
@@ -332,143 +332,6 @@
332 332
333#ifdef __KERNEL__ 333#ifdef __KERNEL__
334 334
335#include <linux/err.h>
336
337/* user-visible error numbers are in the range -1 - -MAX_ERRNO:
338 * see <asm-sh/errno.h> */
339
340#define __syscall_return(type, res) \
341do { \
342 if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
343 /* Avoid using "res" which is declared to be in register r0; \
344 errno might expand to a function call and clobber it. */ \
345 int __err = -(res); \
346 errno = __err; \
347 res = -1; \
348 } \
349 return (type) (res); \
350} while (0)
351
352#if defined(__sh2__) || defined(__SH2E__) || defined(__SH2A__)
353#define SYSCALL_ARG0 "trapa #0x20"
354#define SYSCALL_ARG1 "trapa #0x21"
355#define SYSCALL_ARG2 "trapa #0x22"
356#define SYSCALL_ARG3 "trapa #0x23"
357#define SYSCALL_ARG4 "trapa #0x24"
358#define SYSCALL_ARG5 "trapa #0x25"
359#define SYSCALL_ARG6 "trapa #0x26"
360#else
361#define SYSCALL_ARG0 "trapa #0x10"
362#define SYSCALL_ARG1 "trapa #0x11"
363#define SYSCALL_ARG2 "trapa #0x12"
364#define SYSCALL_ARG3 "trapa #0x13"
365#define SYSCALL_ARG4 "trapa #0x14"
366#define SYSCALL_ARG5 "trapa #0x15"
367#define SYSCALL_ARG6 "trapa #0x16"
368#endif
369
370/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
371#define _syscall0(type,name) \
372type name(void) \
373{ \
374register long __sc0 __asm__ ("r3") = __NR_##name; \
375__asm__ __volatile__ (SYSCALL_ARG0 \
376 : "=z" (__sc0) \
377 : "0" (__sc0) \
378 : "memory" ); \
379__syscall_return(type,__sc0); \
380}
381
382#define _syscall1(type,name,type1,arg1) \
383type name(type1 arg1) \
384{ \
385register long __sc0 __asm__ ("r3") = __NR_##name; \
386register long __sc4 __asm__ ("r4") = (long) arg1; \
387__asm__ __volatile__ (SYSCALL_ARG1 \
388 : "=z" (__sc0) \
389 : "0" (__sc0), "r" (__sc4) \
390 : "memory"); \
391__syscall_return(type,__sc0); \
392}
393
394#define _syscall2(type,name,type1,arg1,type2,arg2) \
395type name(type1 arg1,type2 arg2) \
396{ \
397register long __sc0 __asm__ ("r3") = __NR_##name; \
398register long __sc4 __asm__ ("r4") = (long) arg1; \
399register long __sc5 __asm__ ("r5") = (long) arg2; \
400__asm__ __volatile__ (SYSCALL_ARG2 \
401 : "=z" (__sc0) \
402 : "0" (__sc0), "r" (__sc4), "r" (__sc5) \
403 : "memory"); \
404__syscall_return(type,__sc0); \
405}
406
407#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
408type name(type1 arg1,type2 arg2,type3 arg3) \
409{ \
410register long __sc0 __asm__ ("r3") = __NR_##name; \
411register long __sc4 __asm__ ("r4") = (long) arg1; \
412register long __sc5 __asm__ ("r5") = (long) arg2; \
413register long __sc6 __asm__ ("r6") = (long) arg3; \
414__asm__ __volatile__ (SYSCALL_ARG3 \
415 : "=z" (__sc0) \
416 : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6) \
417 : "memory"); \
418__syscall_return(type,__sc0); \
419}
420
421#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
422type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
423{ \
424register long __sc0 __asm__ ("r3") = __NR_##name; \
425register long __sc4 __asm__ ("r4") = (long) arg1; \
426register long __sc5 __asm__ ("r5") = (long) arg2; \
427register long __sc6 __asm__ ("r6") = (long) arg3; \
428register long __sc7 __asm__ ("r7") = (long) arg4; \
429__asm__ __volatile__ (SYSCALL_ARG4 \
430 : "=z" (__sc0) \
431 : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6), \
432 "r" (__sc7) \
433 : "memory" ); \
434__syscall_return(type,__sc0); \
435}
436
437#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
438type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
439{ \
440register long __sc3 __asm__ ("r3") = __NR_##name; \
441register long __sc4 __asm__ ("r4") = (long) arg1; \
442register long __sc5 __asm__ ("r5") = (long) arg2; \
443register long __sc6 __asm__ ("r6") = (long) arg3; \
444register long __sc7 __asm__ ("r7") = (long) arg4; \
445register long __sc0 __asm__ ("r0") = (long) arg5; \
446__asm__ __volatile__ (SYSCALL_ARG5 \
447 : "=z" (__sc0) \
448 : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6), "r" (__sc7), \
449 "r" (__sc3) \
450 : "memory" ); \
451__syscall_return(type,__sc0); \
452}
453
454#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \
455type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
456{ \
457register long __sc3 __asm__ ("r3") = __NR_##name; \
458register long __sc4 __asm__ ("r4") = (long) arg1; \
459register long __sc5 __asm__ ("r5") = (long) arg2; \
460register long __sc6 __asm__ ("r6") = (long) arg3; \
461register long __sc7 __asm__ ("r7") = (long) arg4; \
462register long __sc0 __asm__ ("r0") = (long) arg5; \
463register long __sc1 __asm__ ("r1") = (long) arg6; \
464__asm__ __volatile__ (SYSCALL_ARG6 \
465 : "=z" (__sc0) \
466 : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6), "r" (__sc7), \
467 "r" (__sc3), "r" (__sc1) \
468 : "memory" ); \
469__syscall_return(type,__sc0); \
470}
471
472#define __ARCH_WANT_IPC_PARSE_VERSION 335#define __ARCH_WANT_IPC_PARSE_VERSION
473#define __ARCH_WANT_OLD_READDIR 336#define __ARCH_WANT_OLD_READDIR
474#define __ARCH_WANT_OLD_STAT 337#define __ARCH_WANT_OLD_STAT
diff --git a/include/asm-sh64/dma-mapping.h b/include/asm-sh64/dma-mapping.h
index 68e27a8fca3..5efe906c59f 100644
--- a/include/asm-sh64/dma-mapping.h
+++ b/include/asm-sh64/dma-mapping.h
@@ -35,7 +35,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
35 consistent_free(NULL, size, vaddr, dma_handle); 35 consistent_free(NULL, size, vaddr, dma_handle);
36} 36}
37 37
38static inline void dma_cache_sync(void *vaddr, size_t size, 38static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
39 enum dma_data_direction dir) 39 enum dma_data_direction dir)
40{ 40{
41 dma_cache_wback_inv((unsigned long)vaddr, size); 41 dma_cache_wback_inv((unsigned long)vaddr, size);
diff --git a/include/asm-sh64/setup.h b/include/asm-sh64/setup.h
index ebd42eb1b70..5b07b14c292 100644
--- a/include/asm-sh64/setup.h
+++ b/include/asm-sh64/setup.h
@@ -1,6 +1,10 @@
1#ifndef __ASM_SH64_SETUP_H 1#ifndef __ASM_SH64_SETUP_H
2#define __ASM_SH64_SETUP_H 2#define __ASM_SH64_SETUP_H
3 3
4#define COMMAND_LINE_SIZE 256
5
6#ifdef __KERNEL__
7
4#define PARAM ((unsigned char *)empty_zero_page) 8#define PARAM ((unsigned char *)empty_zero_page)
5#define MOUNT_ROOT_RDONLY (*(unsigned long *) (PARAM+0x000)) 9#define MOUNT_ROOT_RDONLY (*(unsigned long *) (PARAM+0x000))
6#define RAMDISK_FLAGS (*(unsigned long *) (PARAM+0x004)) 10#define RAMDISK_FLAGS (*(unsigned long *) (PARAM+0x004))
@@ -12,5 +16,7 @@
12#define COMMAND_LINE ((char *) (PARAM+256)) 16#define COMMAND_LINE ((char *) (PARAM+256))
13#define COMMAND_LINE_SIZE 256 17#define COMMAND_LINE_SIZE 256
14 18
19#endif /* __KERNEL__ */
20
15#endif /* __ASM_SH64_SETUP_H */ 21#endif /* __ASM_SH64_SETUP_H */
16 22
diff --git a/include/asm-sh64/unistd.h b/include/asm-sh64/unistd.h
index ee7828b27ad..1f38a7aacaa 100644
--- a/include/asm-sh64/unistd.h
+++ b/include/asm-sh64/unistd.h
@@ -347,148 +347,6 @@
347#ifdef __KERNEL__ 347#ifdef __KERNEL__
348 348
349#define NR_syscalls 321 349#define NR_syscalls 321
350#include <linux/err.h>
351
352/* user-visible error numbers are in the range -1 - -MAX_ERRNO:
353 * see <asm-sh64/errno.h> */
354
355#define __syscall_return(type, res) \
356do { \
357 /* Note: when returning from kernel the return value is in r9 \
358 ** This prevents conflicts between return value and arg1 \
359 ** when dispatching signal handler, in other words makes \
360 ** life easier in the system call epilogue (see entry.S) \
361 */ \
362 register unsigned long __sr2 __asm__ ("r2") = res; \
363 if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
364 errno = -(res); \
365 __sr2 = -1; \
366 } \
367 return (type) (__sr2); \
368} while (0)
369
370/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
371
372#define _syscall0(type,name) \
373type name(void) \
374{ \
375register unsigned long __sc0 __asm__ ("r9") = ((0x10 << 16) | __NR_##name); \
376__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "()" \
377 : "=r" (__sc0) \
378 : "r" (__sc0) ); \
379__syscall_return(type,__sc0); \
380}
381
382 /*
383 * The apparent spurious "dummy" assembler comment is *needed*,
384 * as without it, the compiler treats the arg<n> variables
385 * as no longer live just before the asm. The compiler can
386 * then optimize the storage into any registers it wishes.
387 * The additional dummy statement forces the compiler to put
388 * the arguments into the correct registers before the TRAPA.
389 */
390#define _syscall1(type,name,type1,arg1) \
391type name(type1 arg1) \
392{ \
393register unsigned long __sc0 __asm__ ("r9") = ((0x11 << 16) | __NR_##name); \
394register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1; \
395__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "(%2)" \
396 : "=r" (__sc0) \
397 : "r" (__sc0), "r" (__sc2)); \
398__asm__ __volatile__ ("!dummy %0 %1" \
399 : \
400 : "r" (__sc0), "r" (__sc2)); \
401__syscall_return(type,__sc0); \
402}
403
404#define _syscall2(type,name,type1,arg1,type2,arg2) \
405type name(type1 arg1,type2 arg2) \
406{ \
407register unsigned long __sc0 __asm__ ("r9") = ((0x12 << 16) | __NR_##name); \
408register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1; \
409register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2; \
410__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "(%2,%3)" \
411 : "=r" (__sc0) \
412 : "r" (__sc0), "r" (__sc2), "r" (__sc3) ); \
413__asm__ __volatile__ ("!dummy %0 %1 %2" \
414 : \
415 : "r" (__sc0), "r" (__sc2), "r" (__sc3) ); \
416__syscall_return(type,__sc0); \
417}
418
419#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
420type name(type1 arg1,type2 arg2,type3 arg3) \
421{ \
422register unsigned long __sc0 __asm__ ("r9") = ((0x13 << 16) | __NR_##name); \
423register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1; \
424register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2; \
425register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3; \
426__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "(%2,%3,%4)" \
427 : "=r" (__sc0) \
428 : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) ); \
429__asm__ __volatile__ ("!dummy %0 %1 %2 %3" \
430 : \
431 : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) ); \
432__syscall_return(type,__sc0); \
433}
434
435#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
436type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
437{ \
438register unsigned long __sc0 __asm__ ("r9") = ((0x14 << 16) | __NR_##name); \
439register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1; \
440register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2; \
441register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3; \
442register unsigned long __sc5 __asm__ ("r5") = (unsigned long) arg4; \
443__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "(%2,%3,%4,%5)" \
444 : "=r" (__sc0) \
445 : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5) );\
446__asm__ __volatile__ ("!dummy %0 %1 %2 %3 %4" \
447 : \
448 : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5) );\
449__syscall_return(type,__sc0); \
450}
451
452#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
453type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
454{ \
455register unsigned long __sc0 __asm__ ("r9") = ((0x15 << 16) | __NR_##name); \
456register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1; \
457register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2; \
458register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3; \
459register unsigned long __sc5 __asm__ ("r5") = (unsigned long) arg4; \
460register unsigned long __sc6 __asm__ ("r6") = (unsigned long) arg5; \
461__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "(%2,%3,%4,%5,%6)" \
462 : "=r" (__sc0) \
463 : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5), \
464 "r" (__sc6)); \
465__asm__ __volatile__ ("!dummy %0 %1 %2 %3 %4 %5" \
466 : \
467 : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5), \
468 "r" (__sc6)); \
469__syscall_return(type,__sc0); \
470}
471
472#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5, type6, arg6) \
473type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
474{ \
475register unsigned long __sc0 __asm__ ("r9") = ((0x16 << 16) | __NR_##name); \
476register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1; \
477register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2; \
478register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3; \
479register unsigned long __sc5 __asm__ ("r5") = (unsigned long) arg4; \
480register unsigned long __sc6 __asm__ ("r6") = (unsigned long) arg5; \
481register unsigned long __sc7 __asm__ ("r7") = (unsigned long) arg6; \
482__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "(%2,%3,%4,%5,%6,%7)" \
483 : "=r" (__sc0) \
484 : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5), \
485 "r" (__sc6), "r" (__sc7)); \
486__asm__ __volatile__ ("!dummy %0 %1 %2 %3 %4 %5 %6" \
487 : \
488 : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5), \
489 "r" (__sc6), "r" (__sc7)); \
490__syscall_return(type,__sc0); \
491}
492 350
493#define __ARCH_WANT_IPC_PARSE_VERSION 351#define __ARCH_WANT_IPC_PARSE_VERSION
494#define __ARCH_WANT_OLD_READDIR 352#define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-sparc/unistd.h b/include/asm-sparc/unistd.h
index f7827fa4cd5..d5b2f8053b3 100644
--- a/include/asm-sparc/unistd.h
+++ b/include/asm-sparc/unistd.h
@@ -329,136 +329,6 @@
329 * find a free slot in the 0-302 range. 329 * find a free slot in the 0-302 range.
330 */ 330 */
331 331
332#define _syscall0(type,name) \
333type name(void) \
334{ \
335long __res; \
336register long __g1 __asm__ ("g1") = __NR_##name; \
337__asm__ __volatile__ ("t 0x10\n\t" \
338 "bcc 1f\n\t" \
339 "mov %%o0, %0\n\t" \
340 "sub %%g0, %%o0, %0\n\t" \
341 "1:\n\t" \
342 : "=r" (__res)\
343 : "r" (__g1) \
344 : "o0", "cc"); \
345if (__res < -255 || __res >= 0) \
346 return (type) __res; \
347errno = -__res; \
348return -1; \
349}
350
351#define _syscall1(type,name,type1,arg1) \
352type name(type1 arg1) \
353{ \
354long __res; \
355register long __g1 __asm__ ("g1") = __NR_##name; \
356register long __o0 __asm__ ("o0") = (long)(arg1); \
357__asm__ __volatile__ ("t 0x10\n\t" \
358 "bcc 1f\n\t" \
359 "mov %%o0, %0\n\t" \
360 "sub %%g0, %%o0, %0\n\t" \
361 "1:\n\t" \
362 : "=r" (__res), "=&r" (__o0) \
363 : "1" (__o0), "r" (__g1) \
364 : "cc"); \
365if (__res < -255 || __res >= 0) \
366 return (type) __res; \
367errno = -__res; \
368return -1; \
369}
370
371#define _syscall2(type,name,type1,arg1,type2,arg2) \
372type name(type1 arg1,type2 arg2) \
373{ \
374long __res; \
375register long __g1 __asm__ ("g1") = __NR_##name; \
376register long __o0 __asm__ ("o0") = (long)(arg1); \
377register long __o1 __asm__ ("o1") = (long)(arg2); \
378__asm__ __volatile__ ("t 0x10\n\t" \
379 "bcc 1f\n\t" \
380 "mov %%o0, %0\n\t" \
381 "sub %%g0, %%o0, %0\n\t" \
382 "1:\n\t" \
383 : "=r" (__res), "=&r" (__o0) \
384 : "1" (__o0), "r" (__o1), "r" (__g1) \
385 : "cc"); \
386if (__res < -255 || __res >= 0) \
387 return (type) __res; \
388errno = -__res; \
389return -1; \
390}
391
392#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
393type name(type1 arg1,type2 arg2,type3 arg3) \
394{ \
395long __res; \
396register long __g1 __asm__ ("g1") = __NR_##name; \
397register long __o0 __asm__ ("o0") = (long)(arg1); \
398register long __o1 __asm__ ("o1") = (long)(arg2); \
399register long __o2 __asm__ ("o2") = (long)(arg3); \
400__asm__ __volatile__ ("t 0x10\n\t" \
401 "bcc 1f\n\t" \
402 "mov %%o0, %0\n\t" \
403 "sub %%g0, %%o0, %0\n\t" \
404 "1:\n\t" \
405 : "=r" (__res), "=&r" (__o0) \
406 : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1) \
407 : "cc"); \
408if (__res < -255 || __res>=0) \
409 return (type) __res; \
410errno = -__res; \
411return -1; \
412}
413
414#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
415type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
416{ \
417long __res; \
418register long __g1 __asm__ ("g1") = __NR_##name; \
419register long __o0 __asm__ ("o0") = (long)(arg1); \
420register long __o1 __asm__ ("o1") = (long)(arg2); \
421register long __o2 __asm__ ("o2") = (long)(arg3); \
422register long __o3 __asm__ ("o3") = (long)(arg4); \
423__asm__ __volatile__ ("t 0x10\n\t" \
424 "bcc 1f\n\t" \
425 "mov %%o0, %0\n\t" \
426 "sub %%g0, %%o0, %0\n\t" \
427 "1:\n\t" \
428 : "=r" (__res), "=&r" (__o0) \
429 : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__o3), "r" (__g1) \
430 : "cc"); \
431if (__res < -255 || __res>=0) \
432 return (type) __res; \
433errno = -__res; \
434return -1; \
435}
436
437#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
438 type5,arg5) \
439type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
440{ \
441long __res; \
442register long __g1 __asm__ ("g1") = __NR_##name; \
443register long __o0 __asm__ ("o0") = (long)(arg1); \
444register long __o1 __asm__ ("o1") = (long)(arg2); \
445register long __o2 __asm__ ("o2") = (long)(arg3); \
446register long __o3 __asm__ ("o3") = (long)(arg4); \
447register long __o4 __asm__ ("o4") = (long)(arg5); \
448__asm__ __volatile__ ("t 0x10\n\t" \
449 "bcc 1f\n\t" \
450 "mov %%o0, %0\n\t" \
451 "sub %%g0, %%o0, %0\n\t" \
452 "1:\n\t" \
453 : "=r" (__res), "=&r" (__o0) \
454 : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__o3), "r" (__o4), "r" (__g1) \
455 : "cc"); \
456if (__res < -255 || __res>=0) \
457 return (type) __res; \
458errno = -__res; \
459return -1; \
460}
461
462#define __ARCH_WANT_IPC_PARSE_VERSION 332#define __ARCH_WANT_IPC_PARSE_VERSION
463#define __ARCH_WANT_OLD_READDIR 333#define __ARCH_WANT_OLD_READDIR
464#define __ARCH_WANT_STAT64 334#define __ARCH_WANT_STAT64
diff --git a/include/asm-sparc64/dma-mapping.h b/include/asm-sparc64/dma-mapping.h
index 27c46fbeebd..2f858a2df94 100644
--- a/include/asm-sparc64/dma-mapping.h
+++ b/include/asm-sparc64/dma-mapping.h
@@ -181,7 +181,7 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t siz
181 181
182#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 182#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
183#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 183#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
184#define dma_is_consistent(d) (1) 184#define dma_is_consistent(d, h) (1)
185 185
186static inline int 186static inline int
187dma_get_cache_alignment(void) 187dma_get_cache_alignment(void)
@@ -210,7 +210,7 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
210} 210}
211 211
212static inline void 212static inline void
213dma_cache_sync(void *vaddr, size_t size, 213dma_cache_sync(struct device *dev, void *vaddr, size_t size,
214 enum dma_data_direction direction) 214 enum dma_data_direction direction)
215{ 215{
216 /* could define this in terms of the dma_cache ... operations, 216 /* could define this in terms of the dma_cache ... operations,
diff --git a/include/asm-sparc64/futex.h b/include/asm-sparc64/futex.h
index 7392fc4a954..876312fe82c 100644
--- a/include/asm-sparc64/futex.h
+++ b/include/asm-sparc64/futex.h
@@ -45,7 +45,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
45 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) 45 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
46 oparg = 1 << oparg; 46 oparg = 1 << oparg;
47 47
48 inc_preempt_count(); 48 pagefault_disable();
49 49
50 switch (op) { 50 switch (op) {
51 case FUTEX_OP_SET: 51 case FUTEX_OP_SET:
@@ -67,7 +67,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
67 ret = -ENOSYS; 67 ret = -ENOSYS;
68 } 68 }
69 69
70 dec_preempt_count(); 70 pagefault_enable();
71 71
72 if (!ret) { 72 if (!ret) {
73 switch (cmp) { 73 switch (cmp) {
diff --git a/include/asm-sparc64/pgalloc.h b/include/asm-sparc64/pgalloc.h
index 010f9cd0a67..5891ff7ba76 100644
--- a/include/asm-sparc64/pgalloc.h
+++ b/include/asm-sparc64/pgalloc.h
@@ -13,7 +13,7 @@
13#include <asm/page.h> 13#include <asm/page.h>
14 14
15/* Page table allocation/freeing. */ 15/* Page table allocation/freeing. */
16extern kmem_cache_t *pgtable_cache; 16extern struct kmem_cache *pgtable_cache;
17 17
18static inline pgd_t *pgd_alloc(struct mm_struct *mm) 18static inline pgd_t *pgd_alloc(struct mm_struct *mm)
19{ 19{
diff --git a/include/asm-sparc64/unistd.h b/include/asm-sparc64/unistd.h
index 63669dad0d7..47047536f26 100644
--- a/include/asm-sparc64/unistd.h
+++ b/include/asm-sparc64/unistd.h
@@ -332,124 +332,6 @@
332 * find a free slot in the 0-302 range. 332 * find a free slot in the 0-302 range.
333 */ 333 */
334 334
335#define _syscall0(type,name) \
336type name(void) \
337{ \
338long __res; \
339register long __g1 __asm__ ("g1") = __NR_##name; \
340__asm__ __volatile__ ("t 0x6d\n\t" \
341 "sub %%g0, %%o0, %0\n\t" \
342 "movcc %%xcc, %%o0, %0\n\t" \
343 : "=r" (__res)\
344 : "r" (__g1) \
345 : "o0", "cc"); \
346if (__res >= 0) \
347 return (type) __res; \
348errno = -__res; \
349return -1; \
350}
351
352#define _syscall1(type,name,type1,arg1) \
353type name(type1 arg1) \
354{ \
355long __res; \
356register long __g1 __asm__ ("g1") = __NR_##name; \
357register long __o0 __asm__ ("o0") = (long)(arg1); \
358__asm__ __volatile__ ("t 0x6d\n\t" \
359 "sub %%g0, %%o0, %0\n\t" \
360 "movcc %%xcc, %%o0, %0\n\t" \
361 : "=r" (__res), "=&r" (__o0) \
362 : "1" (__o0), "r" (__g1) \
363 : "cc"); \
364if (__res >= 0) \
365 return (type) __res; \
366errno = -__res; \
367return -1; \
368}
369
370#define _syscall2(type,name,type1,arg1,type2,arg2) \
371type name(type1 arg1,type2 arg2) \
372{ \
373long __res; \
374register long __g1 __asm__ ("g1") = __NR_##name; \
375register long __o0 __asm__ ("o0") = (long)(arg1); \
376register long __o1 __asm__ ("o1") = (long)(arg2); \
377__asm__ __volatile__ ("t 0x6d\n\t" \
378 "sub %%g0, %%o0, %0\n\t" \
379 "movcc %%xcc, %%o0, %0\n\t" \
380 : "=r" (__res), "=&r" (__o0) \
381 : "1" (__o0), "r" (__o1), "r" (__g1) \
382 : "cc"); \
383if (__res >= 0) \
384 return (type) __res; \
385errno = -__res; \
386return -1; \
387}
388
389#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
390type name(type1 arg1,type2 arg2,type3 arg3) \
391{ \
392long __res; \
393register long __g1 __asm__ ("g1") = __NR_##name; \
394register long __o0 __asm__ ("o0") = (long)(arg1); \
395register long __o1 __asm__ ("o1") = (long)(arg2); \
396register long __o2 __asm__ ("o2") = (long)(arg3); \
397__asm__ __volatile__ ("t 0x6d\n\t" \
398 "sub %%g0, %%o0, %0\n\t" \
399 "movcc %%xcc, %%o0, %0\n\t" \
400 : "=r" (__res), "=&r" (__o0) \
401 : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1) \
402 : "cc"); \
403if (__res>=0) \
404 return (type) __res; \
405errno = -__res; \
406return -1; \
407}
408
409#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
410type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
411{ \
412long __res; \
413register long __g1 __asm__ ("g1") = __NR_##name; \
414register long __o0 __asm__ ("o0") = (long)(arg1); \
415register long __o1 __asm__ ("o1") = (long)(arg2); \
416register long __o2 __asm__ ("o2") = (long)(arg3); \
417register long __o3 __asm__ ("o3") = (long)(arg4); \
418__asm__ __volatile__ ("t 0x6d\n\t" \
419 "sub %%g0, %%o0, %0\n\t" \
420 "movcc %%xcc, %%o0, %0\n\t" \
421 : "=r" (__res), "=&r" (__o0) \
422 : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__o3), "r" (__g1) \
423 : "cc"); \
424if (__res>=0) \
425 return (type) __res; \
426errno = -__res; \
427return -1; \
428}
429
430#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
431 type5,arg5) \
432type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
433{ \
434long __res; \
435register long __g1 __asm__ ("g1") = __NR_##name; \
436register long __o0 __asm__ ("o0") = (long)(arg1); \
437register long __o1 __asm__ ("o1") = (long)(arg2); \
438register long __o2 __asm__ ("o2") = (long)(arg3); \
439register long __o3 __asm__ ("o3") = (long)(arg4); \
440register long __o4 __asm__ ("o4") = (long)(arg5); \
441__asm__ __volatile__ ("t 0x6d\n\t" \
442 "sub %%g0, %%o0, %0\n\t" \
443 "movcc %%xcc, %%o0, %0\n\t" \
444 : "=r" (__res), "=&r" (__o0) \
445 : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__o3), "r" (__o4), "r" (__g1) \
446 : "cc"); \
447if (__res>=0) \
448 return (type) __res; \
449errno = -__res; \
450return -1; \
451}
452
453/* sysconf options, for SunOS compatibility */ 335/* sysconf options, for SunOS compatibility */
454#define _SC_ARG_MAX 1 336#define _SC_ARG_MAX 1
455#define _SC_CHILD_MAX 2 337#define _SC_CHILD_MAX 2
diff --git a/include/asm-um/dma-mapping.h b/include/asm-um/dma-mapping.h
index babd2989511..f0ee4fb5591 100644
--- a/include/asm-um/dma-mapping.h
+++ b/include/asm-um/dma-mapping.h
@@ -94,7 +94,7 @@ dma_sync_sg(struct device *dev, struct scatterlist *sg, int nelems,
94 94
95#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 95#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
96#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 96#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
97#define dma_is_consistent(d) (1) 97#define dma_is_consistent(d, h) (1)
98 98
99static inline int 99static inline int
100dma_get_cache_alignment(void) 100dma_get_cache_alignment(void)
@@ -112,7 +112,7 @@ dma_sync_single_range(struct device *dev, dma_addr_t dma_handle,
112} 112}
113 113
114static inline void 114static inline void
115dma_cache_sync(void *vaddr, size_t size, 115dma_cache_sync(struct device *dev, void *vaddr, size_t size,
116 enum dma_data_direction direction) 116 enum dma_data_direction direction)
117{ 117{
118 BUG(); 118 BUG();
diff --git a/include/asm-v850/irq.h b/include/asm-v850/irq.h
index 1bf096db8f4..88687c181f0 100644
--- a/include/asm-v850/irq.h
+++ b/include/asm-v850/irq.h
@@ -46,8 +46,6 @@ extern void
46init_irq_handlers (int base_irq, int num, int interval, 46init_irq_handlers (int base_irq, int num, int interval,
47 struct hw_interrupt_type *irq_type); 47 struct hw_interrupt_type *irq_type);
48 48
49typedef void (*irq_handler_t)(int irq, void *data, struct pt_regs *regs);
50
51/* Handle interrupt IRQ. REGS are the registers at the time of ther 49/* Handle interrupt IRQ. REGS are the registers at the time of ther
52 interrupt. */ 50 interrupt. */
53extern unsigned int handle_irq (int irq, struct pt_regs *regs); 51extern unsigned int handle_irq (int irq, struct pt_regs *regs);
diff --git a/include/asm-v850/unistd.h b/include/asm-v850/unistd.h
index 737401e7d3a..2241ed45ecf 100644
--- a/include/asm-v850/unistd.h
+++ b/include/asm-v850/unistd.h
@@ -204,168 +204,8 @@
204#define __NR_gettid 201 204#define __NR_gettid 201
205#define __NR_tkill 202 205#define __NR_tkill 202
206 206
207
208/* Syscall protocol:
209 Syscall number in r12, args in r6-r9, r13-r14
210 Return value in r10
211 Trap 0 for `short' syscalls, where all the args can fit in function
212 call argument registers, and trap 1 when there are additional args in
213 r13-r14. */
214
215#define SYSCALL_NUM "r12"
216#define SYSCALL_ARG0 "r6"
217#define SYSCALL_ARG1 "r7"
218#define SYSCALL_ARG2 "r8"
219#define SYSCALL_ARG3 "r9"
220#define SYSCALL_ARG4 "r13"
221#define SYSCALL_ARG5 "r14"
222#define SYSCALL_RET "r10"
223
224#define SYSCALL_SHORT_TRAP "0"
225#define SYSCALL_LONG_TRAP "1"
226
227/* Registers clobbered by any syscall. This _doesn't_ include the syscall
228 number (r12) or the `extended arg' registers (r13, r14), even though
229 they are actually clobbered too (this is because gcc's `asm' statement
230 doesn't allow a clobber to be used as an input or output). */
231#define SYSCALL_CLOBBERS "r1", "r5", "r11", "r15", "r16", \
232 "r17", "r18", "r19"
233
234/* Registers clobbered by a `short' syscall. This includes all clobbers
235 except the syscall number (r12). */
236#define SYSCALL_SHORT_CLOBBERS SYSCALL_CLOBBERS, "r13", "r14"
237
238#ifdef __KERNEL__ 207#ifdef __KERNEL__
239 208
240#include <asm/clinkage.h>
241#include <linux/err.h>
242
243#define __syscall_return(type, res) \
244 do { \
245 /* user-visible error numbers are in the range -1 - -MAX_ERRNO: \
246 see <asm-v850/errno.h> */ \
247 if (__builtin_expect ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO), 0)) { \
248 errno = -(res); \
249 res = -1; \
250 } \
251 return (type) (res); \
252 } while (0)
253
254
255#define _syscall0(type, name) \
256type name (void) \
257{ \
258 register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name; \
259 register unsigned long __ret __asm__ (SYSCALL_RET); \
260 __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP \
261 : "=r" (__ret), "=r" (__syscall) \
262 : "1" (__syscall) \
263 : SYSCALL_SHORT_CLOBBERS); \
264 __syscall_return (type, __ret); \
265}
266
267#define _syscall1(type, name, atype, a) \
268type name (atype a) \
269{ \
270 register atype __a __asm__ (SYSCALL_ARG0) = a; \
271 register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name; \
272 register unsigned long __ret __asm__ (SYSCALL_RET); \
273 __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP \
274 : "=r" (__ret), "=r" (__syscall) \
275 : "1" (__syscall), "r" (__a) \
276 : SYSCALL_SHORT_CLOBBERS); \
277 __syscall_return (type, __ret); \
278}
279
280#define _syscall2(type, name, atype, a, btype, b) \
281type name (atype a, btype b) \
282{ \
283 register atype __a __asm__ (SYSCALL_ARG0) = a; \
284 register btype __b __asm__ (SYSCALL_ARG1) = b; \
285 register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name; \
286 register unsigned long __ret __asm__ (SYSCALL_RET); \
287 __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP \
288 : "=r" (__ret), "=r" (__syscall) \
289 : "1" (__syscall), "r" (__a), "r" (__b) \
290 : SYSCALL_SHORT_CLOBBERS); \
291 __syscall_return (type, __ret); \
292}
293
294#define _syscall3(type, name, atype, a, btype, b, ctype, c) \
295type name (atype a, btype b, ctype c) \
296{ \
297 register atype __a __asm__ (SYSCALL_ARG0) = a; \
298 register btype __b __asm__ (SYSCALL_ARG1) = b; \
299 register ctype __c __asm__ (SYSCALL_ARG2) = c; \
300 register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name; \
301 register unsigned long __ret __asm__ (SYSCALL_RET); \
302 __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP \
303 : "=r" (__ret), "=r" (__syscall) \
304 : "1" (__syscall), "r" (__a), "r" (__b), "r" (__c) \
305 : SYSCALL_SHORT_CLOBBERS); \
306 __syscall_return (type, __ret); \
307}
308
309#define _syscall4(type, name, atype, a, btype, b, ctype, c, dtype, d) \
310type name (atype a, btype b, ctype c, dtype d) \
311{ \
312 register atype __a __asm__ (SYSCALL_ARG0) = a; \
313 register btype __b __asm__ (SYSCALL_ARG1) = b; \
314 register ctype __c __asm__ (SYSCALL_ARG2) = c; \
315 register dtype __d __asm__ (SYSCALL_ARG3) = d; \
316 register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name; \
317 register unsigned long __ret __asm__ (SYSCALL_RET); \
318 __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP \
319 : "=r" (__ret), "=r" (__syscall) \
320 : "1" (__syscall), \
321 "r" (__a), "r" (__b), "r" (__c), "r" (__d) \
322 : SYSCALL_SHORT_CLOBBERS); \
323 __syscall_return (type, __ret); \
324}
325
326#define _syscall5(type, name, atype, a, btype, b, ctype, c, dtype, d, etype,e)\
327type name (atype a, btype b, ctype c, dtype d, etype e) \
328{ \
329 register atype __a __asm__ (SYSCALL_ARG0) = a; \
330 register btype __b __asm__ (SYSCALL_ARG1) = b; \
331 register ctype __c __asm__ (SYSCALL_ARG2) = c; \
332 register dtype __d __asm__ (SYSCALL_ARG3) = d; \
333 register etype __e __asm__ (SYSCALL_ARG4) = e; \
334 register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name; \
335 register unsigned long __ret __asm__ (SYSCALL_RET); \
336 __asm__ __volatile__ ("trap " SYSCALL_LONG_TRAP \
337 : "=r" (__ret), "=r" (__syscall), "=r" (__e) \
338 : "1" (__syscall), \
339 "r" (__a), "r" (__b), "r" (__c), "r" (__d), "2" (__e) \
340 : SYSCALL_CLOBBERS); \
341 __syscall_return (type, __ret); \
342}
343
344#define __SYSCALL6_TRAP(syscall, ret, a, b, c, d, e, f) \
345 __asm__ __volatile__ ("trap " SYSCALL_LONG_TRAP \
346 : "=r" (ret), "=r" (syscall), \
347 "=r" (e), "=r" (f) \
348 : "1" (syscall), \
349 "r" (a), "r" (b), "r" (c), "r" (d), \
350 "2" (e), "3" (f) \
351 : SYSCALL_CLOBBERS);
352
353#define _syscall6(type, name, atype, a, btype, b, ctype, c, dtype, d, etype, e, ftype, f) \
354type name (atype a, btype b, ctype c, dtype d, etype e, ftype f) \
355{ \
356 register atype __a __asm__ (SYSCALL_ARG0) = a; \
357 register btype __b __asm__ (SYSCALL_ARG1) = b; \
358 register ctype __c __asm__ (SYSCALL_ARG2) = c; \
359 register dtype __d __asm__ (SYSCALL_ARG3) = d; \
360 register etype __e __asm__ (SYSCALL_ARG4) = e; \
361 register etype __f __asm__ (SYSCALL_ARG5) = f; \
362 register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name; \
363 register unsigned long __ret __asm__ (SYSCALL_RET); \
364 __SYSCALL6_TRAP(__syscall, __ret, __a, __b, __c, __d, __e, __f); \
365 __syscall_return (type, __ret); \
366}
367
368
369#define __ARCH_WANT_IPC_PARSE_VERSION 209#define __ARCH_WANT_IPC_PARSE_VERSION
370#define __ARCH_WANT_OLD_READDIR 210#define __ARCH_WANT_OLD_READDIR
371#define __ARCH_WANT_STAT64 211#define __ARCH_WANT_STAT64
diff --git a/include/asm-x86_64/Kbuild b/include/asm-x86_64/Kbuild
index 1ee9b07f3fe..ebd7117782a 100644
--- a/include/asm-x86_64/Kbuild
+++ b/include/asm-x86_64/Kbuild
@@ -6,13 +6,11 @@ ALTARCHDEF := defined __i386__
6 6
7header-y += boot.h 7header-y += boot.h
8header-y += bootsetup.h 8header-y += bootsetup.h
9header-y += cpufeature.h
10header-y += debugreg.h 9header-y += debugreg.h
11header-y += ldt.h 10header-y += ldt.h
12header-y += msr.h 11header-y += msr.h
13header-y += prctl.h 12header-y += prctl.h
14header-y += ptrace-abi.h 13header-y += ptrace-abi.h
15header-y += setup.h
16header-y += sigcontext32.h 14header-y += sigcontext32.h
17header-y += ucontext.h 15header-y += ucontext.h
18header-y += vsyscall32.h 16header-y += vsyscall32.h
diff --git a/include/asm-x86_64/alternative.h b/include/asm-x86_64/alternative.h
index a584826cc57..a6657b4f3e0 100644
--- a/include/asm-x86_64/alternative.h
+++ b/include/asm-x86_64/alternative.h
@@ -4,6 +4,7 @@
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6#include <linux/types.h> 6#include <linux/types.h>
7#include <linux/stddef.h>
7#include <asm/cpufeature.h> 8#include <asm/cpufeature.h>
8 9
9struct alt_instr { 10struct alt_instr {
@@ -133,4 +134,15 @@ static inline void alternatives_smp_switch(int smp) {}
133#define LOCK_PREFIX "" 134#define LOCK_PREFIX ""
134#endif 135#endif
135 136
137struct paravirt_patch;
138#ifdef CONFIG_PARAVIRT
139void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end);
140#else
141static inline void
142apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end)
143{}
144#define __start_parainstructions NULL
145#define __stop_parainstructions NULL
146#endif
147
136#endif /* _X86_64_ALTERNATIVE_H */ 148#endif /* _X86_64_ALTERNATIVE_H */
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index 93849f7abc2..706ca4b6000 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -189,9 +189,9 @@ static __inline__ int atomic_add_return(int i, atomic_t *v)
189{ 189{
190 int __i = i; 190 int __i = i;
191 __asm__ __volatile__( 191 __asm__ __volatile__(
192 LOCK_PREFIX "xaddl %0, %1;" 192 LOCK_PREFIX "xaddl %0, %1"
193 :"=r"(i) 193 :"+r" (i), "+m" (v->counter)
194 :"m"(v->counter), "0"(i)); 194 : : "memory");
195 return i + __i; 195 return i + __i;
196} 196}
197 197
diff --git a/include/asm-x86_64/calgary.h b/include/asm-x86_64/calgary.h
index 6b93f5a3a5c..7ee90064571 100644
--- a/include/asm-x86_64/calgary.h
+++ b/include/asm-x86_64/calgary.h
@@ -51,6 +51,8 @@ struct iommu_table {
51#define TCE_TABLE_SIZE_4M 6 51#define TCE_TABLE_SIZE_4M 6
52#define TCE_TABLE_SIZE_8M 7 52#define TCE_TABLE_SIZE_8M 7
53 53
54extern int use_calgary;
55
54#ifdef CONFIG_CALGARY_IOMMU 56#ifdef CONFIG_CALGARY_IOMMU
55extern int calgary_iommu_init(void); 57extern int calgary_iommu_init(void);
56extern void detect_calgary(void); 58extern void detect_calgary(void);
diff --git a/include/asm-x86_64/cpufeature.h b/include/asm-x86_64/cpufeature.h
index ee792faaca0..0b3c686139f 100644
--- a/include/asm-x86_64/cpufeature.h
+++ b/include/asm-x86_64/cpufeature.h
@@ -29,7 +29,7 @@
29#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ 29#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */
30#define X86_FEATURE_PN (0*32+18) /* Processor serial number */ 30#define X86_FEATURE_PN (0*32+18) /* Processor serial number */
31#define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */ 31#define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */
32#define X86_FEATURE_DTES (0*32+21) /* Debug Trace Store */ 32#define X86_FEATURE_DS (0*32+21) /* Debug Store */
33#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ 33#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */
34#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ 34#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */
35#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */ 35#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */
@@ -68,6 +68,8 @@
68#define X86_FEATURE_FXSAVE_LEAK (3*32+7) /* FIP/FOP/FDP leaks through FXSAVE */ 68#define X86_FEATURE_FXSAVE_LEAK (3*32+7) /* FIP/FOP/FDP leaks through FXSAVE */
69#define X86_FEATURE_UP (3*32+8) /* SMP kernel running on UP */ 69#define X86_FEATURE_UP (3*32+8) /* SMP kernel running on UP */
70#define X86_FEATURE_ARCH_PERFMON (3*32+9) /* Intel Architectural PerfMon */ 70#define X86_FEATURE_ARCH_PERFMON (3*32+9) /* Intel Architectural PerfMon */
71#define X86_FEATURE_PEBS (3*32+10) /* Precise-Event Based Sampling */
72#define X86_FEATURE_BTS (3*32+11) /* Branch Trace Store */
71 73
72/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 74/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
73#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ 75#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
@@ -112,5 +114,8 @@
112#define cpu_has_cyrix_arr 0 114#define cpu_has_cyrix_arr 0
113#define cpu_has_centaur_mcr 0 115#define cpu_has_centaur_mcr 0
114#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH) 116#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH)
117#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS)
118#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS)
119#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS)
115 120
116#endif /* __ASM_X8664_CPUFEATURE_H */ 121#endif /* __ASM_X8664_CPUFEATURE_H */
diff --git a/include/asm-x86_64/delay.h b/include/asm-x86_64/delay.h
index 65f64acc531..c2669f1f552 100644
--- a/include/asm-x86_64/delay.h
+++ b/include/asm-x86_64/delay.h
@@ -7,18 +7,21 @@
7 * Delay routines calling functions in arch/x86_64/lib/delay.c 7 * Delay routines calling functions in arch/x86_64/lib/delay.c
8 */ 8 */
9 9
10/* Undefined functions to get compile-time errors */
10extern void __bad_udelay(void); 11extern void __bad_udelay(void);
11extern void __bad_ndelay(void); 12extern void __bad_ndelay(void);
12 13
13extern void __udelay(unsigned long usecs); 14extern void __udelay(unsigned long usecs);
14extern void __ndelay(unsigned long usecs); 15extern void __ndelay(unsigned long nsecs);
15extern void __const_udelay(unsigned long usecs); 16extern void __const_udelay(unsigned long usecs);
16extern void __delay(unsigned long loops); 17extern void __delay(unsigned long loops);
17 18
19/* 0x10c7 is 2**32 / 1000000 (rounded up) */
18#define udelay(n) (__builtin_constant_p(n) ? \ 20#define udelay(n) (__builtin_constant_p(n) ? \
19 ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c6ul)) : \ 21 ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
20 __udelay(n)) 22 __udelay(n))
21 23
24/* 0x5 is 2**32 / 1000000000 (rounded up) */
22#define ndelay(n) (__builtin_constant_p(n) ? \ 25#define ndelay(n) (__builtin_constant_p(n) ? \
23 ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ 26 ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
24 __ndelay(n)) 27 __ndelay(n))
diff --git a/include/asm-x86_64/desc.h b/include/asm-x86_64/desc.h
index eb7723a4679..913d6ac0003 100644
--- a/include/asm-x86_64/desc.h
+++ b/include/asm-x86_64/desc.h
@@ -9,64 +9,13 @@
9 9
10#include <linux/string.h> 10#include <linux/string.h>
11#include <linux/smp.h> 11#include <linux/smp.h>
12#include <asm/desc_defs.h>
12 13
13#include <asm/segment.h> 14#include <asm/segment.h>
14#include <asm/mmu.h> 15#include <asm/mmu.h>
15 16
16// 8 byte segment descriptor
17struct desc_struct {
18 u16 limit0;
19 u16 base0;
20 unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1;
21 unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8;
22} __attribute__((packed));
23
24struct n_desc_struct {
25 unsigned int a,b;
26};
27
28extern struct desc_struct cpu_gdt_table[GDT_ENTRIES]; 17extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
29 18
30enum {
31 GATE_INTERRUPT = 0xE,
32 GATE_TRAP = 0xF,
33 GATE_CALL = 0xC,
34};
35
36// 16byte gate
37struct gate_struct {
38 u16 offset_low;
39 u16 segment;
40 unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
41 u16 offset_middle;
42 u32 offset_high;
43 u32 zero1;
44} __attribute__((packed));
45
46#define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF)
47#define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF)
48#define PTR_HIGH(x) ((unsigned long)(x) >> 32)
49
50enum {
51 DESC_TSS = 0x9,
52 DESC_LDT = 0x2,
53};
54
55// LDT or TSS descriptor in the GDT. 16 bytes.
56struct ldttss_desc {
57 u16 limit0;
58 u16 base0;
59 unsigned base1 : 8, type : 5, dpl : 2, p : 1;
60 unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
61 u32 base3;
62 u32 zero1;
63} __attribute__((packed));
64
65struct desc_ptr {
66 unsigned short size;
67 unsigned long address;
68} __attribute__((packed)) ;
69
70#define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8)) 19#define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8))
71#define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8)) 20#define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8))
72#define clear_LDT() asm volatile("lldt %w0"::"r" (0)) 21#define clear_LDT() asm volatile("lldt %w0"::"r" (0))
diff --git a/include/asm-x86_64/desc_defs.h b/include/asm-x86_64/desc_defs.h
new file mode 100644
index 00000000000..08900407009
--- /dev/null
+++ b/include/asm-x86_64/desc_defs.h
@@ -0,0 +1,69 @@
1/* Written 2000 by Andi Kleen */
2#ifndef __ARCH_DESC_DEFS_H
3#define __ARCH_DESC_DEFS_H
4
5/*
6 * Segment descriptor structure definitions, usable from both x86_64 and i386
7 * archs.
8 */
9
10#ifndef __ASSEMBLY__
11
12#include <linux/types.h>
13
14// 8 byte segment descriptor
15struct desc_struct {
16 u16 limit0;
17 u16 base0;
18 unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1;
19 unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8;
20} __attribute__((packed));
21
22struct n_desc_struct {
23 unsigned int a,b;
24};
25
26enum {
27 GATE_INTERRUPT = 0xE,
28 GATE_TRAP = 0xF,
29 GATE_CALL = 0xC,
30};
31
32// 16byte gate
33struct gate_struct {
34 u16 offset_low;
35 u16 segment;
36 unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
37 u16 offset_middle;
38 u32 offset_high;
39 u32 zero1;
40} __attribute__((packed));
41
42#define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF)
43#define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF)
44#define PTR_HIGH(x) ((unsigned long)(x) >> 32)
45
46enum {
47 DESC_TSS = 0x9,
48 DESC_LDT = 0x2,
49};
50
51// LDT or TSS descriptor in the GDT. 16 bytes.
52struct ldttss_desc {
53 u16 limit0;
54 u16 base0;
55 unsigned base1 : 8, type : 5, dpl : 2, p : 1;
56 unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
57 u32 base3;
58 u32 zero1;
59} __attribute__((packed));
60
61struct desc_ptr {
62 unsigned short size;
63 unsigned long address;
64} __attribute__((packed)) ;
65
66
67#endif /* !__ASSEMBLY__ */
68
69#endif
diff --git a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h
index 10174b110a5..be9ec689072 100644
--- a/include/asm-x86_64/dma-mapping.h
+++ b/include/asm-x86_64/dma-mapping.h
@@ -180,12 +180,13 @@ static inline int dma_get_cache_alignment(void)
180 return boot_cpu_data.x86_clflush_size; 180 return boot_cpu_data.x86_clflush_size;
181} 181}
182 182
183#define dma_is_consistent(h) 1 183#define dma_is_consistent(d, h) 1
184 184
185extern int dma_set_mask(struct device *dev, u64 mask); 185extern int dma_set_mask(struct device *dev, u64 mask);
186 186
187static inline void 187static inline void
188dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir) 188dma_cache_sync(struct device *dev, void *vaddr, size_t size,
189 enum dma_data_direction dir)
189{ 190{
190 flush_write_buffers(); 191 flush_write_buffers();
191} 192}
diff --git a/include/asm-x86_64/futex.h b/include/asm-x86_64/futex.h
index 9804bf07b09..5cdfb08013c 100644
--- a/include/asm-x86_64/futex.h
+++ b/include/asm-x86_64/futex.h
@@ -55,7 +55,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
55 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) 55 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
56 return -EFAULT; 56 return -EFAULT;
57 57
58 inc_preempt_count(); 58 pagefault_disable();
59 59
60 switch (op) { 60 switch (op) {
61 case FUTEX_OP_SET: 61 case FUTEX_OP_SET:
@@ -78,7 +78,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
78 ret = -ENOSYS; 78 ret = -ENOSYS;
79 } 79 }
80 80
81 dec_preempt_count(); 81 pagefault_enable();
82 82
83 if (!ret) { 83 if (!ret) {
84 switch (cmp) { 84 switch (cmp) {
diff --git a/include/asm-x86_64/genapic.h b/include/asm-x86_64/genapic.h
index a0e9a4b9348..b80f4bb5f27 100644
--- a/include/asm-x86_64/genapic.h
+++ b/include/asm-x86_64/genapic.h
@@ -30,6 +30,6 @@ struct genapic {
30}; 30};
31 31
32 32
33extern struct genapic *genapic; 33extern struct genapic *genapic, *genapic_force, apic_flat;
34 34
35#endif 35#endif
diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h
index 37e194169fa..952783d35c7 100644
--- a/include/asm-x86_64/msr.h
+++ b/include/asm-x86_64/msr.h
@@ -169,8 +169,8 @@ static inline unsigned int cpuid_edx(unsigned int op)
169#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */ 169#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */
170#define MSR_CSTAR 0xc0000083 /* compatibility mode SYSCALL target */ 170#define MSR_CSTAR 0xc0000083 /* compatibility mode SYSCALL target */
171#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */ 171#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */
172#define MSR_FS_BASE 0xc0000100 /* 64bit GS base */ 172#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */
173#define MSR_GS_BASE 0xc0000101 /* 64bit FS base */ 173#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */
174#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow (or USER_GS from kernel) */ 174#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow (or USER_GS from kernel) */
175/* EFER bits: */ 175/* EFER bits: */
176#define _EFER_SCE 0 /* SYSCALL/SYSRET */ 176#define _EFER_SCE 0 /* SYSCALL/SYSRET */
@@ -210,6 +210,10 @@ static inline unsigned int cpuid_edx(unsigned int op)
210#define MSR_IA32_LASTINTFROMIP 0x1dd 210#define MSR_IA32_LASTINTFROMIP 0x1dd
211#define MSR_IA32_LASTINTTOIP 0x1de 211#define MSR_IA32_LASTINTTOIP 0x1de
212 212
213#define MSR_IA32_PEBS_ENABLE 0x3f1
214#define MSR_IA32_DS_AREA 0x600
215#define MSR_IA32_PERF_CAPABILITIES 0x345
216
213#define MSR_MTRRfix64K_00000 0x250 217#define MSR_MTRRfix64K_00000 0x250
214#define MSR_MTRRfix16K_80000 0x258 218#define MSR_MTRRfix16K_80000 0x258
215#define MSR_MTRRfix16K_A0000 0x259 219#define MSR_MTRRfix16K_A0000 0x259
@@ -407,4 +411,13 @@ static inline unsigned int cpuid_edx(unsigned int op)
407#define MSR_P4_U2L_ESCR0 0x3b0 411#define MSR_P4_U2L_ESCR0 0x3b0
408#define MSR_P4_U2L_ESCR1 0x3b1 412#define MSR_P4_U2L_ESCR1 0x3b1
409 413
414/* Intel Core-based CPU performance counters */
415#define MSR_CORE_PERF_FIXED_CTR0 0x309
416#define MSR_CORE_PERF_FIXED_CTR1 0x30a
417#define MSR_CORE_PERF_FIXED_CTR2 0x30b
418#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d
419#define MSR_CORE_PERF_GLOBAL_STATUS 0x38e
420#define MSR_CORE_PERF_GLOBAL_CTRL 0x38f
421#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390
422
410#endif 423#endif
diff --git a/include/asm-x86_64/nmi.h b/include/asm-x86_64/nmi.h
index f367d4014b4..72375e7d32a 100644
--- a/include/asm-x86_64/nmi.h
+++ b/include/asm-x86_64/nmi.h
@@ -77,4 +77,7 @@ extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
77 77
78extern int unknown_nmi_panic; 78extern int unknown_nmi_panic;
79 79
80void __trigger_all_cpu_backtrace(void);
81#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
82
80#endif /* ASM_NMI_H */ 83#endif /* ASM_NMI_H */
diff --git a/include/asm-x86_64/pci-direct.h b/include/asm-x86_64/pci-direct.h
index eba9cb471df..6823fa4f1af 100644
--- a/include/asm-x86_64/pci-direct.h
+++ b/include/asm-x86_64/pci-direct.h
@@ -10,6 +10,7 @@ extern u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset);
10extern u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset); 10extern u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset);
11extern u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset); 11extern u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset);
12extern void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset, u32 val); 12extern void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset, u32 val);
13extern void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val);
13 14
14extern int early_pci_allowed(void); 15extern int early_pci_allowed(void);
15 16
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index 0555c1c4d8f..59901c690a0 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -221,20 +221,19 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
221#define __S110 PAGE_SHARED_EXEC 221#define __S110 PAGE_SHARED_EXEC
222#define __S111 PAGE_SHARED_EXEC 222#define __S111 PAGE_SHARED_EXEC
223 223
224static inline unsigned long pgd_bad(pgd_t pgd) 224static inline unsigned long pgd_bad(pgd_t pgd)
225{ 225{
226 unsigned long val = pgd_val(pgd); 226 return pgd_val(pgd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
227 val &= ~PTE_MASK; 227}
228 val &= ~(_PAGE_USER | _PAGE_DIRTY);
229 return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
230}
231 228
232static inline unsigned long pud_bad(pud_t pud) 229static inline unsigned long pud_bad(pud_t pud)
233{ 230{
234 unsigned long val = pud_val(pud); 231 return pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
235 val &= ~PTE_MASK; 232}
236 val &= ~(_PAGE_USER | _PAGE_DIRTY); 233
237 return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED); 234static inline unsigned long pmd_bad(pmd_t pmd)
235{
236 return pmd_val(pmd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
238} 237}
239 238
240#define pte_none(x) (!pte_val(x)) 239#define pte_none(x) (!pte_val(x))
@@ -347,7 +346,6 @@ static inline int pmd_large(pmd_t pte) {
347#define pmd_none(x) (!pmd_val(x)) 346#define pmd_none(x) (!pmd_val(x))
348#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) 347#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
349#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) 348#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
350#define pmd_bad(x) ((pmd_val(x) & (~PTE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE )
351#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot))) 349#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
352#define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) 350#define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
353 351
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index cef17e0f828..76552d72804 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -475,6 +475,14 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
475 : :"a" (eax), "c" (ecx)); 475 : :"a" (eax), "c" (ecx));
476} 476}
477 477
478static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
479{
480 /* "mwait %eax,%ecx;" */
481 asm volatile(
482 "sti; .byte 0x0f,0x01,0xc9;"
483 : :"a" (eax), "c" (ecx));
484}
485
478extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); 486extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
479 487
480#define stack_current() \ 488#define stack_current() \
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h
index e72cfcdf534..6d324b83897 100644
--- a/include/asm-x86_64/proto.h
+++ b/include/asm-x86_64/proto.h
@@ -61,7 +61,6 @@ extern void numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn);
61extern unsigned long numa_free_all_bootmem(void); 61extern unsigned long numa_free_all_bootmem(void);
62 62
63extern void reserve_bootmem_generic(unsigned long phys, unsigned len); 63extern void reserve_bootmem_generic(unsigned long phys, unsigned len);
64extern void free_bootmem_generic(unsigned long phys, unsigned len);
65 64
66extern void load_gs_index(unsigned gs); 65extern void load_gs_index(unsigned gs);
67 66
@@ -88,6 +87,7 @@ extern void syscall32_cpu_init(void);
88extern void setup_node_bootmem(int nodeid, unsigned long start, unsigned long end); 87extern void setup_node_bootmem(int nodeid, unsigned long start, unsigned long end);
89 88
90extern void early_quirks(void); 89extern void early_quirks(void);
90extern void quirk_intel_irqbalance(void);
91extern void check_efer(void); 91extern void check_efer(void);
92 92
93extern int unhandled_signal(struct task_struct *tsk, int sig); 93extern int unhandled_signal(struct task_struct *tsk, int sig);
diff --git a/include/asm-x86_64/rio.h b/include/asm-x86_64/rio.h
new file mode 100644
index 00000000000..c7350f6d201
--- /dev/null
+++ b/include/asm-x86_64/rio.h
@@ -0,0 +1,74 @@
1/*
2 * Derived from include/asm-i386/mach-summit/mach_mpparse.h
3 * and include/asm-i386/mach-default/bios_ebda.h
4 *
5 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
6 */
7
8#ifndef __ASM_RIO_H
9#define __ASM_RIO_H
10
11#define RIO_TABLE_VERSION 3
12
13struct rio_table_hdr {
14 u8 version; /* Version number of this data structure */
15 u8 num_scal_dev; /* # of Scalability devices */
16 u8 num_rio_dev; /* # of RIO I/O devices */
17} __attribute__((packed));
18
19struct scal_detail {
20 u8 node_id; /* Scalability Node ID */
21 u32 CBAR; /* Address of 1MB register space */
22 u8 port0node; /* Node ID port connected to: 0xFF=None */
23 u8 port0port; /* Port num port connected to: 0,1,2, or */
24 /* 0xFF=None */
25 u8 port1node; /* Node ID port connected to: 0xFF = None */
26 u8 port1port; /* Port num port connected to: 0,1,2, or */
27 /* 0xFF=None */
28 u8 port2node; /* Node ID port connected to: 0xFF = None */
29 u8 port2port; /* Port num port connected to: 0,1,2, or */
30 /* 0xFF=None */
31 u8 chassis_num; /* 1 based Chassis number (1 = boot node) */
32} __attribute__((packed));
33
34struct rio_detail {
35 u8 node_id; /* RIO Node ID */
36 u32 BBAR; /* Address of 1MB register space */
37 u8 type; /* Type of device */
38 u8 owner_id; /* Node ID of Hurricane that owns this */
39 /* node */
40 u8 port0node; /* Node ID port connected to: 0xFF=None */
41 u8 port0port; /* Port num port connected to: 0,1,2, or */
42 /* 0xFF=None */
43 u8 port1node; /* Node ID port connected to: 0xFF=None */
44 u8 port1port; /* Port num port connected to: 0,1,2, or */
45 /* 0xFF=None */
46 u8 first_slot; /* Lowest slot number below this Calgary */
47 u8 status; /* Bit 0 = 1 : the XAPIC is used */
48 /* = 0 : the XAPIC is not used, ie: */
49 /* ints fwded to another XAPIC */
50 /* Bits1:7 Reserved */
51 u8 WP_index; /* instance index - lower ones have */
52 /* lower slot numbers/PCI bus numbers */
53 u8 chassis_num; /* 1 based Chassis number */
54} __attribute__((packed));
55
56enum {
57 HURR_SCALABILTY = 0, /* Hurricane Scalability info */
58 HURR_RIOIB = 2, /* Hurricane RIOIB info */
59 COMPAT_CALGARY = 4, /* Compatibility Calgary */
60 ALT_CALGARY = 5, /* Second Planar Calgary */
61};
62
63/*
64 * there is a real-mode segmented pointer pointing to the
65 * 4K EBDA area at 0x40E.
66 */
67static inline unsigned long get_bios_ebda(void)
68{
69 unsigned long address = *(unsigned short *)phys_to_virt(0x40EUL);
70 address <<= 4;
71 return address;
72}
73
74#endif /* __ASM_RIO_H */
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
index d6b7c057edb..e17b9ec42e9 100644
--- a/include/asm-x86_64/smp.h
+++ b/include/asm-x86_64/smp.h
@@ -82,11 +82,6 @@ extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */
82extern u8 x86_cpu_to_log_apicid[NR_CPUS]; 82extern u8 x86_cpu_to_log_apicid[NR_CPUS];
83extern u8 bios_cpu_apicid[]; 83extern u8 bios_cpu_apicid[];
84 84
85static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
86{
87 return cpus_addr(cpumask)[0];
88}
89
90static inline int cpu_present_to_apicid(int mps_cpu) 85static inline int cpu_present_to_apicid(int mps_cpu)
91{ 86{
92 if (mps_cpu < NR_CPUS) 87 if (mps_cpu < NR_CPUS)
@@ -118,13 +113,6 @@ static __inline int logical_smp_processor_id(void)
118#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu] 113#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
119#else 114#else
120#define cpu_physical_id(cpu) boot_cpu_id 115#define cpu_physical_id(cpu) boot_cpu_id
121static inline int smp_call_function_single(int cpuid, void (*func) (void *info),
122 void *info, int retry, int wait)
123{
124 /* Disable interrupts here? */
125 func(info);
126 return 0;
127}
128#endif /* !CONFIG_SMP */ 116#endif /* !CONFIG_SMP */
129#endif 117#endif
130 118
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h
index 05ef097ba55..88bf981e73c 100644
--- a/include/asm-x86_64/spinlock.h
+++ b/include/asm-x86_64/spinlock.h
@@ -36,7 +36,34 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
36 "2:\t" : "=m" (lock->slock) : : "memory"); 36 "2:\t" : "=m" (lock->slock) : : "memory");
37} 37}
38 38
39#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 39/*
40 * Same as __raw_spin_lock, but reenable interrupts during spinning.
41 */
42#ifndef CONFIG_PROVE_LOCKING
43static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
44{
45 asm volatile(
46 "\n1:\t"
47 LOCK_PREFIX " ; decl %0\n\t"
48 "jns 5f\n"
49 "testl $0x200, %1\n\t" /* interrupts were disabled? */
50 "jz 4f\n\t"
51 "sti\n"
52 "3:\t"
53 "rep;nop\n\t"
54 "cmpl $0, %0\n\t"
55 "jle 3b\n\t"
56 "cli\n\t"
57 "jmp 1b\n"
58 "4:\t"
59 "rep;nop\n\t"
60 "cmpl $0, %0\n\t"
61 "jg 1b\n\t"
62 "jmp 4b\n"
63 "5:\n\t"
64 : "+m" (lock->slock) : "r" ((unsigned)flags) : "memory");
65}
66#endif
40 67
41static inline int __raw_spin_trylock(raw_spinlock_t *lock) 68static inline int __raw_spin_trylock(raw_spinlock_t *lock)
42{ 69{
diff --git a/include/asm-x86_64/stacktrace.h b/include/asm-x86_64/stacktrace.h
index 5eb9799bef7..6f0b5459430 100644
--- a/include/asm-x86_64/stacktrace.h
+++ b/include/asm-x86_64/stacktrace.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_STACKTRACE_H 1#ifndef _ASM_STACKTRACE_H
2#define _ASM_STACKTRACE_H 1 2#define _ASM_STACKTRACE_H 1
3 3
4extern int kstack_depth_to_print;
5
4/* Generic stack tracer with callbacks */ 6/* Generic stack tracer with callbacks */
5 7
6struct stacktrace_ops { 8struct stacktrace_ops {
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index 777288eb7e7..c5f596e71fa 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -622,25 +622,7 @@ __SYSCALL(__NR_move_pages, sys_move_pages)
622 622
623#define __NR_syscall_max __NR_move_pages 623#define __NR_syscall_max __NR_move_pages
624 624
625#ifdef __KERNEL__
626#include <linux/err.h>
627#endif
628
629#ifndef __NO_STUBS 625#ifndef __NO_STUBS
630
631/* user-visible error numbers are in the range -1 - -MAX_ERRNO */
632
633#define __syscall_clobber "r11","rcx","memory"
634
635#define __syscall_return(type, res) \
636do { \
637 if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
638 errno = -(res); \
639 res = -1; \
640 } \
641 return (type) (res); \
642} while (0)
643
644#define __ARCH_WANT_OLD_READDIR 626#define __ARCH_WANT_OLD_READDIR
645#define __ARCH_WANT_OLD_STAT 627#define __ARCH_WANT_OLD_STAT
646#define __ARCH_WANT_SYS_ALARM 628#define __ARCH_WANT_SYS_ALARM
@@ -664,87 +646,6 @@ do { \
664#define __ARCH_WANT_SYS_TIME 646#define __ARCH_WANT_SYS_TIME
665#define __ARCH_WANT_COMPAT_SYS_TIME 647#define __ARCH_WANT_COMPAT_SYS_TIME
666 648
667#define __syscall "syscall"
668
669#define _syscall0(type,name) \
670type name(void) \
671{ \
672long __res; \
673__asm__ volatile (__syscall \
674 : "=a" (__res) \
675 : "0" (__NR_##name) : __syscall_clobber ); \
676__syscall_return(type,__res); \
677}
678
679#define _syscall1(type,name,type1,arg1) \
680type name(type1 arg1) \
681{ \
682long __res; \
683__asm__ volatile (__syscall \
684 : "=a" (__res) \
685 : "0" (__NR_##name),"D" ((long)(arg1)) : __syscall_clobber ); \
686__syscall_return(type,__res); \
687}
688
689#define _syscall2(type,name,type1,arg1,type2,arg2) \
690type name(type1 arg1,type2 arg2) \
691{ \
692long __res; \
693__asm__ volatile (__syscall \
694 : "=a" (__res) \
695 : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)) : __syscall_clobber ); \
696__syscall_return(type,__res); \
697}
698
699#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
700type name(type1 arg1,type2 arg2,type3 arg3) \
701{ \
702long __res; \
703__asm__ volatile (__syscall \
704 : "=a" (__res) \
705 : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \
706 "d" ((long)(arg3)) : __syscall_clobber); \
707__syscall_return(type,__res); \
708}
709
710#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
711type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
712{ \
713long __res; \
714__asm__ volatile ("movq %5,%%r10 ;" __syscall \
715 : "=a" (__res) \
716 : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \
717 "d" ((long)(arg3)),"g" ((long)(arg4)) : __syscall_clobber,"r10" ); \
718__syscall_return(type,__res); \
719}
720
721#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
722 type5,arg5) \
723type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
724{ \
725long __res; \
726__asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; " __syscall \
727 : "=a" (__res) \
728 : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \
729 "d" ((long)(arg3)),"g" ((long)(arg4)),"g" ((long)(arg5)) : \
730 __syscall_clobber,"r8","r10" ); \
731__syscall_return(type,__res); \
732}
733
734#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
735 type5,arg5,type6,arg6) \
736type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,type6 arg6) \
737{ \
738long __res; \
739__asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; movq %7,%%r9 ; " __syscall \
740 : "=a" (__res) \
741 : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \
742 "d" ((long)(arg3)), "g" ((long)(arg4)), "g" ((long)(arg5)), \
743 "g" ((long)(arg6)) : \
744 __syscall_clobber,"r8","r10","r9" ); \
745__syscall_return(type,__res); \
746}
747
748#ifdef __KERNEL__ 649#ifdef __KERNEL__
749#ifndef __ASSEMBLY__ 650#ifndef __ASSEMBLY__
750 651
diff --git a/include/asm-x86_64/unwind.h b/include/asm-x86_64/unwind.h
index 2e7ff10fd77..2f6349e4871 100644
--- a/include/asm-x86_64/unwind.h
+++ b/include/asm-x86_64/unwind.h
@@ -87,14 +87,10 @@ extern int arch_unwind_init_running(struct unwind_frame_info *,
87 87
88static inline int arch_unw_user_mode(const struct unwind_frame_info *info) 88static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
89{ 89{
90#if 0 /* This can only work when selector register saves/restores 90 return user_mode(&info->regs)
91 are properly annotated (and tracked in UNW_REGISTER_INFO). */ 91 || (long)info->regs.rip >= 0
92 return user_mode(&info->regs);
93#else
94 return (long)info->regs.rip >= 0
95 || (info->regs.rip >= VSYSCALL_START && info->regs.rip < VSYSCALL_END) 92 || (info->regs.rip >= VSYSCALL_START && info->regs.rip < VSYSCALL_END)
96 || (long)info->regs.rsp >= 0; 93 || (long)info->regs.rsp >= 0;
97#endif
98} 94}
99 95
100#else 96#else
diff --git a/include/asm-x86_64/vsyscall.h b/include/asm-x86_64/vsyscall.h
index 01d1c17e284..05cb8dd200d 100644
--- a/include/asm-x86_64/vsyscall.h
+++ b/include/asm-x86_64/vsyscall.h
@@ -10,6 +10,7 @@ enum vsyscall_num {
10#define VSYSCALL_START (-10UL << 20) 10#define VSYSCALL_START (-10UL << 20)
11#define VSYSCALL_SIZE 1024 11#define VSYSCALL_SIZE 1024
12#define VSYSCALL_END (-2UL << 20) 12#define VSYSCALL_END (-2UL << 20)
13#define VSYSCALL_MAPPED_PAGES 1
13#define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr)) 14#define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
14 15
15#ifdef __KERNEL__ 16#ifdef __KERNEL__
diff --git a/include/asm-xtensa/dma-mapping.h b/include/asm-xtensa/dma-mapping.h
index c39c91dfcc6..82b03b3a2ee 100644
--- a/include/asm-xtensa/dma-mapping.h
+++ b/include/asm-xtensa/dma-mapping.h
@@ -170,10 +170,10 @@ dma_get_cache_alignment(void)
170 return L1_CACHE_BYTES; 170 return L1_CACHE_BYTES;
171} 171}
172 172
173#define dma_is_consistent(d) (1) 173#define dma_is_consistent(d, h) (1)
174 174
175static inline void 175static inline void
176dma_cache_sync(void *vaddr, size_t size, 176dma_cache_sync(struct device *dev, void *vaddr, size_t size,
177 enum dma_data_direction direction) 177 enum dma_data_direction direction)
178{ 178{
179 consistent_sync(vaddr, size, direction); 179 consistent_sync(vaddr, size, direction);
diff --git a/include/asm-xtensa/unistd.h b/include/asm-xtensa/unistd.h
index 411f810a55c..2e1a1b997e7 100644
--- a/include/asm-xtensa/unistd.h
+++ b/include/asm-xtensa/unistd.h
@@ -218,190 +218,6 @@
218 218
219#define SYSXTENSA_COUNT 5 /* count of syscall0 functions*/ 219#define SYSXTENSA_COUNT 5 /* count of syscall0 functions*/
220 220
221#ifdef __KERNEL__
222#include <linux/linkage.h>
223
224#define __syscall_return(type, res) return ((type)(res))
225
226/* Tensilica's xt-xcc compiler is much more agressive at code
227 * optimization than gcc. Multiple __asm__ statements are
228 * insufficient for xt-xcc because subsequent optimization passes
229 * (beyond the front-end that knows of __asm__ statements and other
230 * such GNU Extensions to C) can modify the register selection for
231 * containment of C variables.
232 *
233 * xt-xcc cannot modify the contents of a single __asm__ statement, so
234 * we create single-asm versions of the syscall macros that are
235 * suitable and optimal for both xt-xcc and gcc.
236 *
237 * Linux takes system-call arguments in registers. The following
238 * design is optimized for user-land apps (e.g., glibc) which
239 * typically have a function wrapper around the "syscall" assembly
240 * instruction. It satisfies the Xtensa ABI while minizing argument
241 * shifting.
242 *
243 * The Xtensa ABI and software conventions require the system-call
244 * number in a2. If an argument exists in a2, we move it to the next
245 * available register. Note that for improved efficiency, we do NOT
246 * shift all parameters down one register to maintain the original
247 * order.
248 *
249 * At best case (zero arguments), we just write the syscall number to
250 * a2. At worst case (1 to 6 arguments), we move the argument in a2
251 * to the next available register, then write the syscall number to
252 * a2.
253 *
254 * For clarity, the following truth table enumerates all possibilities.
255 *
256 * arguments syscall number arg0, arg1, arg2, arg3, arg4, arg5
257 * --------- -------------- ----------------------------------
258 * 0 a2
259 * 1 a2 a3
260 * 2 a2 a4, a3
261 * 3 a2 a5, a3, a4
262 * 4 a2 a6, a3, a4, a5
263 * 5 a2 a7, a3, a4, a5, a6
264 * 6 a2 a8, a3, a4, a5, a6, a7
265 */
266
267#define _syscall0(type,name) \
268type name(void) \
269{ \
270long __res; \
271__asm__ __volatile__ ( \
272 " movi a2, %1 \n" \
273 " syscall \n" \
274 " mov %0, a2 \n" \
275 : "=a" (__res) \
276 : "i" (__NR_##name) \
277 : "a2" \
278 ); \
279__syscall_return(type,__res); \
280}
281
282#define _syscall1(type,name,type0,arg0) \
283type name(type0 arg0) \
284{ \
285long __res; \
286__asm__ __volatile__ ( \
287 " mov a3, %2 \n" \
288 " movi a2, %1 \n" \
289 " syscall \n" \
290 " mov %0, a2 \n" \
291 : "=a" (__res) \
292 : "i" (__NR_##name), "a" (arg0) \
293 : "a2", "a3" \
294 ); \
295__syscall_return(type,__res); \
296}
297
298#define _syscall2(type,name,type0,arg0,type1,arg1) \
299type name(type0 arg0,type1 arg1) \
300{ \
301long __res; \
302__asm__ __volatile__ ( \
303 " mov a4, %2 \n" \
304 " mov a3, %3 \n" \
305 " movi a2, %1 \n" \
306 " syscall \n" \
307 " mov %0, a2 \n" \
308 : "=a" (__res) \
309 : "i" (__NR_##name), "a" (arg0), "a" (arg1) \
310 : "a2", "a3", "a4" \
311 ); \
312__syscall_return(type,__res); \
313}
314
315#define _syscall3(type,name,type0,arg0,type1,arg1,type2,arg2) \
316type name(type0 arg0,type1 arg1,type2 arg2) \
317{ \
318long __res; \
319__asm__ __volatile__ ( \
320 " mov a5, %2 \n" \
321 " mov a4, %4 \n" \
322 " mov a3, %3 \n" \
323 " movi a2, %1 \n" \
324 " syscall \n" \
325 " mov %0, a2 \n" \
326 : "=a" (__res) \
327 : "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2) \
328 : "a2", "a3", "a4", "a5" \
329 ); \
330__syscall_return(type,__res); \
331}
332
333#define _syscall4(type,name,type0,arg0,type1,arg1,type2,arg2,type3,arg3) \
334type name(type0 arg0,type1 arg1,type2 arg2,type3 arg3) \
335{ \
336long __res; \
337__asm__ __volatile__ ( \
338 " mov a6, %2 \n" \
339 " mov a5, %5 \n" \
340 " mov a4, %4 \n" \
341 " mov a3, %3 \n" \
342 " movi a2, %1 \n" \
343 " syscall \n" \
344 " mov %0, a2 \n" \
345 : "=a" (__res) \
346 : "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2), "a" (arg3) \
347 : "a2", "a3", "a4", "a5", "a6" \
348 ); \
349__syscall_return(type,__res); \
350}
351
352/* Note that we save and restore the a7 frame pointer.
353 * Including a7 in the clobber list doesn't do what you'd expect.
354 */
355#define _syscall5(type,name,type0,arg0,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
356type name(type0 arg0,type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
357{ \
358long __res; \
359__asm__ __volatile__ ( \
360 " mov a9, a7 \n" \
361 " mov a7, %2 \n" \
362 " mov a6, %6 \n" \
363 " mov a5, %5 \n" \
364 " mov a4, %4 \n" \
365 " mov a3, %3 \n" \
366 " movi a2, %1 \n" \
367 " syscall \n" \
368 " mov a7, a9 \n" \
369 " mov %0, a2 \n" \
370 : "=a" (__res) \
371 : "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2), \
372 "a" (arg3), "a" (arg4) \
373 : "a2", "a3", "a4", "a5", "a6", "a9" \
374 ); \
375__syscall_return(type,__res); \
376}
377
378/* Note that we save and restore the a7 frame pointer.
379 * Including a7 in the clobber list doesn't do what you'd expect.
380 */
381#define _syscall6(type,name,type0,arg0,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
382type name(type0 arg0,type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
383{ \
384long __res; \
385__asm__ __volatile__ ( \
386 " mov a9, a7 \n" \
387 " mov a8, %2 \n" \
388 " mov a7, %7 \n" \
389 " mov a6, %6 \n" \
390 " mov a5, %5 \n" \
391 " mov a4, %4 \n" \
392 " mov a3, %3 \n" \
393 " movi a2, %1 \n" \
394 " syscall \n" \
395 " mov a7, a9 \n" \
396 " mov %0, a2 \n" \
397 : "=a" (__res) \
398 : "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2), \
399 "a" (arg3), "a" (arg4), "a" (arg5) \
400 : "a2", "a3", "a4", "a5", "a6", "a8", "a9" \
401 ); \
402__syscall_return(type,__res); \
403}
404
405/* 221/*
406 * "Conditional" syscalls 222 * "Conditional" syscalls
407 * 223 *
diff --git a/include/crypto/b128ops.h b/include/crypto/b128ops.h
new file mode 100644
index 00000000000..0b8e6bc5530
--- /dev/null
+++ b/include/crypto/b128ops.h
@@ -0,0 +1,80 @@
1/* b128ops.h - common 128-bit block operations
2 *
3 * Copyright (c) 2003, Dr Brian Gladman, Worcester, UK.
4 * Copyright (c) 2006, Rik Snel <rsnel@cube.dyndns.org>
5 *
6 * Based on Dr Brian Gladman's (GPL'd) work published at
7 * http://fp.gladman.plus.com/cryptography_technology/index.htm
8 * See the original copyright notice below.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 */
15/*
16 ---------------------------------------------------------------------------
17 Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. All rights reserved.
18
19 LICENSE TERMS
20
21 The free distribution and use of this software in both source and binary
22 form is allowed (with or without changes) provided that:
23
24 1. distributions of this source code include the above copyright
25 notice, this list of conditions and the following disclaimer;
26
27 2. distributions in binary form include the above copyright
28 notice, this list of conditions and the following disclaimer
29 in the documentation and/or other associated materials;
30
31 3. the copyright holder's name is not used to endorse products
32 built using this software without specific written permission.
33
34 ALTERNATIVELY, provided that this notice is retained in full, this product
35 may be distributed under the terms of the GNU General Public License (GPL),
36 in which case the provisions of the GPL apply INSTEAD OF those given above.
37
38 DISCLAIMER
39
40 This software is provided 'as is' with no explicit or implied warranties
41 in respect of its properties, including, but not limited to, correctness
42 and/or fitness for purpose.
43 ---------------------------------------------------------------------------
44 Issue Date: 13/06/2006
45*/
46
47#ifndef _CRYPTO_B128OPS_H
48#define _CRYPTO_B128OPS_H
49
50#include <linux/types.h>
51
52typedef struct {
53 u64 a, b;
54} u128;
55
56typedef struct {
57 __be64 a, b;
58} be128;
59
60typedef struct {
61 __le64 b, a;
62} le128;
63
64static inline void u128_xor(u128 *r, const u128 *p, const u128 *q)
65{
66 r->a = p->a ^ q->a;
67 r->b = p->b ^ q->b;
68}
69
70static inline void be128_xor(be128 *r, const be128 *p, const be128 *q)
71{
72 u128_xor((u128 *)r, (u128 *)p, (u128 *)q);
73}
74
75static inline void le128_xor(le128 *r, const le128 *p, const le128 *q)
76{
77 u128_xor((u128 *)r, (u128 *)p, (u128 *)q);
78}
79
80#endif /* _CRYPTO_B128OPS_H */
diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h
new file mode 100644
index 00000000000..4fd31520244
--- /dev/null
+++ b/include/crypto/gf128mul.h
@@ -0,0 +1,198 @@
1/* gf128mul.h - GF(2^128) multiplication functions
2 *
3 * Copyright (c) 2003, Dr Brian Gladman, Worcester, UK.
4 * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org>
5 *
6 * Based on Dr Brian Gladman's (GPL'd) work published at
7 * http://fp.gladman.plus.com/cryptography_technology/index.htm
8 * See the original copyright notice below.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 */
15/*
16 ---------------------------------------------------------------------------
17 Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. All rights reserved.
18
19 LICENSE TERMS
20
21 The free distribution and use of this software in both source and binary
22 form is allowed (with or without changes) provided that:
23
24 1. distributions of this source code include the above copyright
25 notice, this list of conditions and the following disclaimer;
26
27 2. distributions in binary form include the above copyright
28 notice, this list of conditions and the following disclaimer
29 in the documentation and/or other associated materials;
30
31 3. the copyright holder's name is not used to endorse products
32 built using this software without specific written permission.
33
34 ALTERNATIVELY, provided that this notice is retained in full, this product
35 may be distributed under the terms of the GNU General Public License (GPL),
36 in which case the provisions of the GPL apply INSTEAD OF those given above.
37
38 DISCLAIMER
39
40 This software is provided 'as is' with no explicit or implied warranties
41 in respect of its properties, including, but not limited to, correctness
42 and/or fitness for purpose.
43 ---------------------------------------------------------------------------
44 Issue Date: 31/01/2006
45
46 An implementation of field multiplication in Galois Field GF(128)
47*/
48
49#ifndef _CRYPTO_GF128MUL_H
50#define _CRYPTO_GF128MUL_H
51
52#include <crypto/b128ops.h>
53#include <linux/slab.h>
54
55/* Comment by Rik:
56 *
57 * For some background on GF(2^128) see for example: http://-
58 * csrc.nist.gov/CryptoToolkit/modes/proposedmodes/gcm/gcm-revised-spec.pdf
59 *
60 * The elements of GF(2^128) := GF(2)[X]/(X^128-X^7-X^2-X^1-1) can
61 * be mapped to computer memory in a variety of ways. Let's examine
62 * three common cases.
63 *
64 * Take a look at the 16 binary octets below in memory order. The msb's
65 * are left and the lsb's are right. char b[16] is an array and b[0] is
66 * the first octet.
67 *
68 * 80000000 00000000 00000000 00000000 .... 00000000 00000000 00000000
69 * b[0] b[1] b[2] b[3] b[13] b[14] b[15]
70 *
71 * Every bit is a coefficient of some power of X. We can store the bits
72 * in every byte in little-endian order and the bytes themselves also in
73 * little endian order. I will call this lle (little-little-endian).
74 * The above buffer represents the polynomial 1, and X^7+X^2+X^1+1 looks
75 * like 11100001 00000000 .... 00000000 = { 0xE1, 0x00, }.
76 * This format was originally implemented in gf128mul and is used
77 * in GCM (Galois/Counter mode) and in ABL (Arbitrary Block Length).
78 *
79 * Another convention says: store the bits in bigendian order and the
80 * bytes also. This is bbe (big-big-endian). Now the buffer above
81 * represents X^127. X^7+X^2+X^1+1 looks like 00000000 .... 10000111,
82 * b[15] = 0x87 and the rest is 0. LRW uses this convention and bbe
83 * is partly implemented.
84 *
85 * Both of the above formats are easy to implement on big-endian
86 * machines.
87 *
88 * EME (which is patent encumbered) uses the ble format (bits are stored
89 * in big endian order and the bytes in little endian). The above buffer
90 * represents X^7 in this case and the primitive polynomial is b[0] = 0x87.
91 *
92 * The common machine word-size is smaller than 128 bits, so to make
93 * an efficient implementation we must split into machine word sizes.
94 * This file uses one 32bit for the moment. Machine endianness comes into
95 * play. The lle format in relation to machine endianness is discussed
96 * below by the original author of gf128mul Dr Brian Gladman.
97 *
98 * Let's look at the bbe and ble format on a little endian machine.
99 *
100 * bbe on a little endian machine u32 x[4]:
101 *
102 * MS x[0] LS MS x[1] LS
103 * ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
104 * 103..96 111.104 119.112 127.120 71...64 79...72 87...80 95...88
105 *
106 * MS x[2] LS MS x[3] LS
107 * ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
108 * 39...32 47...40 55...48 63...56 07...00 15...08 23...16 31...24
109 *
110 * ble on a little endian machine
111 *
112 * MS x[0] LS MS x[1] LS
113 * ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
114 * 31...24 23...16 15...08 07...00 63...56 55...48 47...40 39...32
115 *
116 * MS x[2] LS MS x[3] LS
117 * ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
118 * 95...88 87...80 79...72 71...64 127.120 199.112 111.104 103..96
119 *
120 * Multiplications in GF(2^128) are mostly bit-shifts, so you see why
121 * ble (and lbe also) are easier to implement on a little-endian
122 * machine than on a big-endian machine. The converse holds for bbe
123 * and lle.
124 *
125 * Note: to have good alignment, it seems to me that it is sufficient
126 * to keep elements of GF(2^128) in type u64[2]. On 32-bit wordsize
127 * machines this will automatically aligned to wordsize and on a 64-bit
128 * machine also.
129 */
130/* Multiply a GF128 field element by x. Field elements are held in arrays
131 of bytes in which field bits 8n..8n + 7 are held in byte[n], with lower
132 indexed bits placed in the more numerically significant bit positions
133 within bytes.
134
135 On little endian machines the bit indexes translate into the bit
136 positions within four 32-bit words in the following way
137
138 MS x[0] LS MS x[1] LS
139 ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
140 24...31 16...23 08...15 00...07 56...63 48...55 40...47 32...39
141
142 MS x[2] LS MS x[3] LS
143 ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
144 88...95 80...87 72...79 64...71 120.127 112.119 104.111 96..103
145
146 On big endian machines the bit indexes translate into the bit
147 positions within four 32-bit words in the following way
148
149 MS x[0] LS MS x[1] LS
150 ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
151 00...07 08...15 16...23 24...31 32...39 40...47 48...55 56...63
152
153 MS x[2] LS MS x[3] LS
154 ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
155 64...71 72...79 80...87 88...95 96..103 104.111 112.119 120.127
156*/
157
158/* A slow generic version of gf_mul, implemented for lle and bbe
159 * It multiplies a and b and puts the result in a */
160void gf128mul_lle(be128 *a, const be128 *b);
161
162void gf128mul_bbe(be128 *a, const be128 *b);
163
164
165/* 4k table optimization */
166
167struct gf128mul_4k {
168 be128 t[256];
169};
170
171struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g);
172struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g);
173void gf128mul_4k_lle(be128 *a, struct gf128mul_4k *t);
174void gf128mul_4k_bbe(be128 *a, struct gf128mul_4k *t);
175
176static inline void gf128mul_free_4k(struct gf128mul_4k *t)
177{
178 kfree(t);
179}
180
181
182/* 64k table optimization, implemented for lle and bbe */
183
184struct gf128mul_64k {
185 struct gf128mul_4k *t[16];
186};
187
188/* first initialize with the constant factor with which you
189 * want to multiply and then call gf128_64k_lle with the other
190 * factor in the first argument, the table in the second and a
191 * scratch register in the third. Afterwards *a = *r. */
192struct gf128mul_64k *gf128mul_init_64k_lle(const be128 *g);
193struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g);
194void gf128mul_free_64k(struct gf128mul_64k *t);
195void gf128mul_64k_lle(be128 *a, struct gf128mul_64k *t);
196void gf128mul_64k_bbe(be128 *a, struct gf128mul_64k *t);
197
198#endif /* _CRYPTO_GF128MUL_H */
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index ff433126361..e618b25b5ad 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -221,6 +221,7 @@ unifdef-y += if_bridge.h
221unifdef-y += if_ec.h 221unifdef-y += if_ec.h
222unifdef-y += if_eql.h 222unifdef-y += if_eql.h
223unifdef-y += if_ether.h 223unifdef-y += if_ether.h
224unifdef-y += if_fddi.h
224unifdef-y += if_frad.h 225unifdef-y += if_frad.h
225unifdef-y += if_ltalk.h 226unifdef-y += if_ltalk.h
226unifdef-y += if_pppox.h 227unifdef-y += if_pppox.h
@@ -282,6 +283,7 @@ unifdef-y += nvram.h
282unifdef-y += parport.h 283unifdef-y += parport.h
283unifdef-y += patchkey.h 284unifdef-y += patchkey.h
284unifdef-y += pci.h 285unifdef-y += pci.h
286unifdef-y += personality.h
285unifdef-y += pktcdvd.h 287unifdef-y += pktcdvd.h
286unifdef-y += pmu.h 288unifdef-y += pmu.h
287unifdef-y += poll.h 289unifdef-y += poll.h
@@ -337,6 +339,7 @@ unifdef-y += videodev.h
337unifdef-y += wait.h 339unifdef-y += wait.h
338unifdef-y += wanrouter.h 340unifdef-y += wanrouter.h
339unifdef-y += watchdog.h 341unifdef-y += watchdog.h
342unifdef-y += wireless.h
340unifdef-y += xfrm.h 343unifdef-y += xfrm.h
341 344
342objhdr-y += version.h 345objhdr-y += version.h
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 9e350fd44d7..3372ec6bf53 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -111,7 +111,6 @@ struct kiocb {
111 size_t ki_nbytes; /* copy of iocb->aio_nbytes */ 111 size_t ki_nbytes; /* copy of iocb->aio_nbytes */
112 char __user *ki_buf; /* remaining iocb->aio_buf */ 112 char __user *ki_buf; /* remaining iocb->aio_buf */
113 size_t ki_left; /* remaining bytes */ 113 size_t ki_left; /* remaining bytes */
114 long ki_retried; /* just for testing */
115 struct iovec ki_inline_vec; /* inline vector */ 114 struct iovec ki_inline_vec; /* inline vector */
116 struct iovec *ki_iovec; 115 struct iovec *ki_iovec;
117 unsigned long ki_nr_segs; 116 unsigned long ki_nr_segs;
@@ -238,7 +237,6 @@ do { \
238} while (0) 237} while (0)
239 238
240#define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait) 239#define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait)
241#define is_retried_kiocb(iocb) ((iocb)->ki_retried > 1)
242 240
243#include <linux/aio_abi.h> 241#include <linux/aio_abi.h>
244 242
diff --git a/include/linux/audit.h b/include/linux/audit.h
index b2ca666d999..0e07db6cc0d 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -101,6 +101,10 @@
101#define AUDIT_MAC_CIPSOV4_DEL 1408 /* NetLabel: del CIPSOv4 DOI entry */ 101#define AUDIT_MAC_CIPSOV4_DEL 1408 /* NetLabel: del CIPSOv4 DOI entry */
102#define AUDIT_MAC_MAP_ADD 1409 /* NetLabel: add LSM domain mapping */ 102#define AUDIT_MAC_MAP_ADD 1409 /* NetLabel: add LSM domain mapping */
103#define AUDIT_MAC_MAP_DEL 1410 /* NetLabel: del LSM domain mapping */ 103#define AUDIT_MAC_MAP_DEL 1410 /* NetLabel: del LSM domain mapping */
104#define AUDIT_MAC_IPSEC_ADDSA 1411 /* Add a XFRM state */
105#define AUDIT_MAC_IPSEC_DELSA 1412 /* Delete a XFRM state */
106#define AUDIT_MAC_IPSEC_ADDSPD 1413 /* Add a XFRM policy */
107#define AUDIT_MAC_IPSEC_DELSPD 1414 /* Delete a XFRM policy */
104 108
105#define AUDIT_FIRST_KERN_ANOM_MSG 1700 109#define AUDIT_FIRST_KERN_ANOM_MSG 1700
106#define AUDIT_LAST_KERN_ANOM_MSG 1799 110#define AUDIT_LAST_KERN_ANOM_MSG 1799
@@ -377,6 +381,7 @@ extern void auditsc_get_stamp(struct audit_context *ctx,
377 struct timespec *t, unsigned int *serial); 381 struct timespec *t, unsigned int *serial);
378extern int audit_set_loginuid(struct task_struct *task, uid_t loginuid); 382extern int audit_set_loginuid(struct task_struct *task, uid_t loginuid);
379extern uid_t audit_get_loginuid(struct audit_context *ctx); 383extern uid_t audit_get_loginuid(struct audit_context *ctx);
384extern void audit_log_task_context(struct audit_buffer *ab);
380extern int __audit_ipc_obj(struct kern_ipc_perm *ipcp); 385extern int __audit_ipc_obj(struct kern_ipc_perm *ipcp);
381extern int __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode); 386extern int __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode);
382extern int audit_bprm(struct linux_binprm *bprm); 387extern int audit_bprm(struct linux_binprm *bprm);
@@ -449,6 +454,7 @@ extern int audit_n_rules;
449#define audit_inode_update(i) do { ; } while (0) 454#define audit_inode_update(i) do { ; } while (0)
450#define auditsc_get_stamp(c,t,s) do { BUG(); } while (0) 455#define auditsc_get_stamp(c,t,s) do { BUG(); } while (0)
451#define audit_get_loginuid(c) ({ -1; }) 456#define audit_get_loginuid(c) ({ -1; })
457#define audit_log_task_context(b) do { ; } while (0)
452#define audit_ipc_obj(i) ({ 0; }) 458#define audit_ipc_obj(i) ({ 0; })
453#define audit_ipc_set_perm(q,u,g,m) ({ 0; }) 459#define audit_ipc_set_perm(q,u,g,m) ({ 0; })
454#define audit_bprm(p) ({ 0; }) 460#define audit_bprm(p) ({ 0; })
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 31e9abb6d97..2275f274870 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -119,8 +119,7 @@ extern void *alloc_large_system_hash(const char *tablename,
119 unsigned int *_hash_mask, 119 unsigned int *_hash_mask,
120 unsigned long limit); 120 unsigned long limit);
121 121
122#define HASH_HIGHMEM 0x00000001 /* Consider highmem? */ 122#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
123#define HASH_EARLY 0x00000002 /* Allocating during early boot? */
124 123
125/* Only NUMA needs hash distribution. 124/* Only NUMA needs hash distribution.
126 * IA64 is known to have sufficient vmalloc space. 125 * IA64 is known to have sufficient vmalloc space.
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
new file mode 100644
index 00000000000..777dbf695d4
--- /dev/null
+++ b/include/linux/bottom_half.h
@@ -0,0 +1,10 @@
1#ifndef _LINUX_BH_H
2#define _LINUX_BH_H
3
4extern void local_bh_disable(void);
5extern void __local_bh_enable(void);
6extern void _local_bh_enable(void);
7extern void local_bh_enable(void);
8extern void local_bh_enable_ip(unsigned long ip);
9
10#endif /* _LINUX_BH_H */
diff --git a/include/linux/carta_random32.h b/include/linux/carta_random32.h
deleted file mode 100644
index f6f3bd9f20b..00000000000
--- a/include/linux/carta_random32.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * Fast, simple, yet decent quality random number generator based on
3 * a paper by David G. Carta ("Two Fast Implementations of the
4 * `Minimal Standard' Random Number Generator," Communications of the
5 * ACM, January, 1990).
6 *
7 * Copyright (c) 2002-2006 Hewlett-Packard Development Company, L.P.
8 * Contributed by Stephane Eranian <eranian@hpl.hp.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
22 * 02111-1307 USA
23 */
24#ifndef _LINUX_CARTA_RANDOM32_H_
25#define _LINUX_CARTA_RANDOM32_H_
26
27u64 carta_random32(u64 seed);
28
29#endif /* _LINUX_CARTA_RANDOM32_H_ */
diff --git a/include/linux/cciss_ioctl.h b/include/linux/cciss_ioctl.h
index 6e27f42e3a5..cb57c30081a 100644
--- a/include/linux/cciss_ioctl.h
+++ b/include/linux/cciss_ioctl.h
@@ -80,7 +80,7 @@ typedef __u32 DriverVer_type;
80#define HWORD __u16 80#define HWORD __u16
81#define DWORD __u32 81#define DWORD __u32
82 82
83#define CISS_MAX_LUN 16 83#define CISS_MAX_LUN 1024
84 84
85#define LEVEL2LUN 1 // index into Target(x) structure, due to byte swapping 85#define LEVEL2LUN 1 // index into Target(x) structure, due to byte swapping
86#define LEVEL3LUN 0 86#define LEVEL3LUN 0
diff --git a/include/linux/cdev.h b/include/linux/cdev.h
index ee5f53f2ca1..f309b00e986 100644
--- a/include/linux/cdev.h
+++ b/include/linux/cdev.h
@@ -2,6 +2,10 @@
2#define _LINUX_CDEV_H 2#define _LINUX_CDEV_H
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5#include <linux/kobject.h>
6#include <linux/kdev_t.h>
7#include <linux/list.h>
8
5struct cdev { 9struct cdev {
6 struct kobject kobj; 10 struct kobject kobj;
7 struct module *owner; 11 struct module *owner;
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index f02d71bf689..bfb520212d7 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -24,10 +24,11 @@
24#include <linux/compiler.h> 24#include <linux/compiler.h>
25#include <linux/cpumask.h> 25#include <linux/cpumask.h>
26#include <asm/semaphore.h> 26#include <asm/semaphore.h>
27#include <linux/mutex.h>
27 28
28struct cpu { 29struct cpu {
29 int node_id; /* The node which contains the CPU */ 30 int node_id; /* The node which contains the CPU */
30 int no_control; /* Should the sysfs control file be created? */ 31 int hotpluggable; /* creates sysfs control file if hotpluggable */
31 struct sys_device sysdev; 32 struct sys_device sysdev;
32}; 33};
33 34
@@ -74,6 +75,17 @@ extern struct sysdev_class cpu_sysdev_class;
74 75
75#ifdef CONFIG_HOTPLUG_CPU 76#ifdef CONFIG_HOTPLUG_CPU
76/* Stop CPUs going up and down. */ 77/* Stop CPUs going up and down. */
78
79static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex)
80{
81 mutex_lock(cpu_hp_mutex);
82}
83
84static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex)
85{
86 mutex_unlock(cpu_hp_mutex);
87}
88
77extern void lock_cpu_hotplug(void); 89extern void lock_cpu_hotplug(void);
78extern void unlock_cpu_hotplug(void); 90extern void unlock_cpu_hotplug(void);
79#define hotcpu_notifier(fn, pri) { \ 91#define hotcpu_notifier(fn, pri) { \
@@ -85,17 +97,24 @@ extern void unlock_cpu_hotplug(void);
85#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) 97#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
86int cpu_down(unsigned int cpu); 98int cpu_down(unsigned int cpu);
87#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) 99#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
88#else 100
101#else /* CONFIG_HOTPLUG_CPU */
102
103static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex)
104{ }
105static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex)
106{ }
107
89#define lock_cpu_hotplug() do { } while (0) 108#define lock_cpu_hotplug() do { } while (0)
90#define unlock_cpu_hotplug() do { } while (0) 109#define unlock_cpu_hotplug() do { } while (0)
91#define lock_cpu_hotplug_interruptible() 0 110#define lock_cpu_hotplug_interruptible() 0
92#define hotcpu_notifier(fn, pri) do { } while (0) 111#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
93#define register_hotcpu_notifier(nb) do { } while (0) 112#define register_hotcpu_notifier(nb) do { (void)(nb); } while (0)
94#define unregister_hotcpu_notifier(nb) do { } while (0) 113#define unregister_hotcpu_notifier(nb) do { (void)(nb); } while (0)
95 114
96/* CPUs don't go offline once they're online w/o CONFIG_HOTPLUG_CPU */ 115/* CPUs don't go offline once they're online w/o CONFIG_HOTPLUG_CPU */
97static inline int cpu_is_offline(int cpu) { return 0; } 116static inline int cpu_is_offline(int cpu) { return 0; }
98#endif 117#endif /* CONFIG_HOTPLUG_CPU */
99 118
100#ifdef CONFIG_SUSPEND_SMP 119#ifdef CONFIG_SUSPEND_SMP
101extern int disable_nonboot_cpus(void); 120extern int disable_nonboot_cpus(void);
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 4d8adf66368..8821e1f75b4 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -23,6 +23,7 @@ extern void cpuset_fork(struct task_struct *p);
23extern void cpuset_exit(struct task_struct *p); 23extern void cpuset_exit(struct task_struct *p);
24extern cpumask_t cpuset_cpus_allowed(struct task_struct *p); 24extern cpumask_t cpuset_cpus_allowed(struct task_struct *p);
25extern nodemask_t cpuset_mems_allowed(struct task_struct *p); 25extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
26#define cpuset_current_mems_allowed (current->mems_allowed)
26void cpuset_init_current_mems_allowed(void); 27void cpuset_init_current_mems_allowed(void);
27void cpuset_update_task_memory_state(void); 28void cpuset_update_task_memory_state(void);
28#define cpuset_nodes_subset_current_mems_allowed(nodes) \ 29#define cpuset_nodes_subset_current_mems_allowed(nodes) \
@@ -45,7 +46,7 @@ extern int cpuset_excl_nodes_overlap(const struct task_struct *p);
45extern int cpuset_memory_pressure_enabled; 46extern int cpuset_memory_pressure_enabled;
46extern void __cpuset_memory_pressure_bump(void); 47extern void __cpuset_memory_pressure_bump(void);
47 48
48extern struct file_operations proc_cpuset_operations; 49extern const struct file_operations proc_cpuset_operations;
49extern char *cpuset_task_status_allowed(struct task_struct *task, char *buffer); 50extern char *cpuset_task_status_allowed(struct task_struct *task, char *buffer);
50 51
51extern void cpuset_lock(void); 52extern void cpuset_lock(void);
@@ -83,6 +84,7 @@ static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
83 return node_possible_map; 84 return node_possible_map;
84} 85}
85 86
87#define cpuset_current_mems_allowed (node_online_map)
86static inline void cpuset_init_current_mems_allowed(void) {} 88static inline void cpuset_init_current_mems_allowed(void) {}
87static inline void cpuset_update_task_memory_state(void) {} 89static inline void cpuset_update_task_memory_state(void) {}
88#define cpuset_nodes_subset_current_mems_allowed(nodes) (1) 90#define cpuset_nodes_subset_current_mems_allowed(nodes) (1)
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 6485e9716b3..4aa9046601d 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -241,12 +241,8 @@ int crypto_unregister_alg(struct crypto_alg *alg);
241 * Algorithm query interface. 241 * Algorithm query interface.
242 */ 242 */
243#ifdef CONFIG_CRYPTO 243#ifdef CONFIG_CRYPTO
244int crypto_alg_available(const char *name, u32 flags)
245 __deprecated_for_modules;
246int crypto_has_alg(const char *name, u32 type, u32 mask); 244int crypto_has_alg(const char *name, u32 type, u32 mask);
247#else 245#else
248static int crypto_alg_available(const char *name, u32 flags)
249 __deprecated_for_modules;
250static inline int crypto_alg_available(const char *name, u32 flags) 246static inline int crypto_alg_available(const char *name, u32 flags)
251{ 247{
252 return 0; 248 return 0;
@@ -707,16 +703,6 @@ static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
707 dst, src); 703 dst, src);
708} 704}
709 705
710void crypto_digest_init(struct crypto_tfm *tfm) __deprecated_for_modules;
711void crypto_digest_update(struct crypto_tfm *tfm,
712 struct scatterlist *sg, unsigned int nsg)
713 __deprecated_for_modules;
714void crypto_digest_final(struct crypto_tfm *tfm, u8 *out)
715 __deprecated_for_modules;
716void crypto_digest_digest(struct crypto_tfm *tfm,
717 struct scatterlist *sg, unsigned int nsg, u8 *out)
718 __deprecated_for_modules;
719
720static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm) 706static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm)
721{ 707{
722 return (struct crypto_hash *)tfm; 708 return (struct crypto_hash *)tfm;
@@ -729,14 +715,6 @@ static inline struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm)
729 return __crypto_hash_cast(tfm); 715 return __crypto_hash_cast(tfm);
730} 716}
731 717
732static int crypto_digest_setkey(struct crypto_tfm *tfm, const u8 *key,
733 unsigned int keylen) __deprecated;
734static inline int crypto_digest_setkey(struct crypto_tfm *tfm,
735 const u8 *key, unsigned int keylen)
736{
737 return tfm->crt_hash.setkey(crypto_hash_cast(tfm), key, keylen);
738}
739
740static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name, 718static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name,
741 u32 type, u32 mask) 719 u32 type, u32 mask)
742{ 720{
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 952bee79a8f..a1c10b0c4cf 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -24,7 +24,7 @@ extern int debug_locks_off(void);
24 int __ret = 0; \ 24 int __ret = 0; \
25 \ 25 \
26 if (unlikely(c)) { \ 26 if (unlikely(c)) { \
27 if (debug_locks_off()) \ 27 if (debug_locks_silent || debug_locks_off()) \
28 WARN_ON(1); \ 28 WARN_ON(1); \
29 __ret = 1; \ 29 __ret = 1; \
30 } \ 30 } \
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index 561e2a77805..55d1ca5e60f 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -30,7 +30,7 @@
30#ifdef CONFIG_TASK_DELAY_ACCT 30#ifdef CONFIG_TASK_DELAY_ACCT
31 31
32extern int delayacct_on; /* Delay accounting turned on/off */ 32extern int delayacct_on; /* Delay accounting turned on/off */
33extern kmem_cache_t *delayacct_cache; 33extern struct kmem_cache *delayacct_cache;
34extern void delayacct_init(void); 34extern void delayacct_init(void);
35extern void __delayacct_tsk_init(struct task_struct *); 35extern void __delayacct_tsk_init(struct task_struct *);
36extern void __delayacct_tsk_exit(struct task_struct *); 36extern void __delayacct_tsk_exit(struct task_struct *);
diff --git a/include/linux/device.h b/include/linux/device.h
index 583a341e016..49ab53ce92d 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -371,6 +371,9 @@ struct device {
371 core doesn't touch it */ 371 core doesn't touch it */
372 struct dev_pm_info power; 372 struct dev_pm_info power;
373 373
374#ifdef CONFIG_NUMA
375 int numa_node; /* NUMA node this device is close to */
376#endif
374 u64 *dma_mask; /* dma mask (if dma'able device) */ 377 u64 *dma_mask; /* dma mask (if dma'able device) */
375 u64 coherent_dma_mask;/* Like dma_mask, but for 378 u64 coherent_dma_mask;/* Like dma_mask, but for
376 alloc_coherent mappings as 379 alloc_coherent mappings as
@@ -394,6 +397,25 @@ struct device {
394 void (*release)(struct device * dev); 397 void (*release)(struct device * dev);
395}; 398};
396 399
400#ifdef CONFIG_NUMA
401static inline int dev_to_node(struct device *dev)
402{
403 return dev->numa_node;
404}
405static inline void set_dev_node(struct device *dev, int node)
406{
407 dev->numa_node = node;
408}
409#else
410static inline int dev_to_node(struct device *dev)
411{
412 return -1;
413}
414static inline void set_dev_node(struct device *dev, int node)
415{
416}
417#endif
418
397static inline void * 419static inline void *
398dev_get_drvdata (struct device *dev) 420dev_get_drvdata (struct device *dev)
399{ 421{
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 66d621dbcb6..df1c91855f0 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -300,8 +300,9 @@ extern int efi_mem_attribute_range (unsigned long phys_addr, unsigned long size,
300extern int __init efi_uart_console_only (void); 300extern int __init efi_uart_console_only (void);
301extern void efi_initialize_iomem_resources(struct resource *code_resource, 301extern void efi_initialize_iomem_resources(struct resource *code_resource,
302 struct resource *data_resource); 302 struct resource *data_resource);
303extern unsigned long __init efi_get_time(void); 303extern unsigned long efi_get_time(void);
304extern int __init efi_set_rtc_mmss(unsigned long nowtime); 304extern int __init efi_set_rtc_mmss(unsigned long nowtime);
305extern int is_available_memory(efi_memory_desc_t * md);
305extern struct efi_memory_map memmap; 306extern struct efi_memory_map memmap;
306 307
307/** 308/**
diff --git a/include/linux/elf.h b/include/linux/elf.h
index 743d5c8e6d3..60713e6ea29 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -6,6 +6,8 @@
6#include <linux/elf-em.h> 6#include <linux/elf-em.h>
7#include <asm/elf.h> 7#include <asm/elf.h>
8 8
9struct file;
10
9#ifndef elf_read_implies_exec 11#ifndef elf_read_implies_exec
10 /* Executables for which elf_read_implies_exec() returns TRUE will 12 /* Executables for which elf_read_implies_exec() returns TRUE will
11 have the READ_IMPLIES_EXEC personality flag set automatically. 13 have the READ_IMPLIES_EXEC personality flag set automatically.
@@ -358,6 +360,7 @@ extern Elf32_Dyn _DYNAMIC [];
358#define elfhdr elf32_hdr 360#define elfhdr elf32_hdr
359#define elf_phdr elf32_phdr 361#define elf_phdr elf32_phdr
360#define elf_note elf32_note 362#define elf_note elf32_note
363#define elf_addr_t Elf32_Off
361 364
362#else 365#else
363 366
@@ -365,6 +368,7 @@ extern Elf64_Dyn _DYNAMIC [];
365#define elfhdr elf64_hdr 368#define elfhdr elf64_hdr
366#define elf_phdr elf64_phdr 369#define elf_phdr elf64_phdr
367#define elf_note elf64_note 370#define elf_note elf64_note
371#define elf_addr_t Elf64_Off
368 372
369#endif 373#endif
370 374
diff --git a/include/linux/ext3_jbd.h b/include/linux/ext3_jbd.h
index ce0e6109aff..8c43b13a02f 100644
--- a/include/linux/ext3_jbd.h
+++ b/include/linux/ext3_jbd.h
@@ -109,74 +109,32 @@ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode);
109 * been done yet. 109 * been done yet.
110 */ 110 */
111 111
112void ext3_journal_abort_handle(const char *caller, const char *err_fn, 112static inline void ext3_journal_release_buffer(handle_t *handle,
113 struct buffer_head *bh, handle_t *handle, int err); 113 struct buffer_head *bh)
114
115static inline int
116__ext3_journal_get_undo_access(const char *where, handle_t *handle,
117 struct buffer_head *bh)
118{ 114{
119 int err = journal_get_undo_access(handle, bh); 115 journal_release_buffer(handle, bh);
120 if (err)
121 ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
122 return err;
123} 116}
124 117
125static inline int 118void ext3_journal_abort_handle(const char *caller, const char *err_fn,
126__ext3_journal_get_write_access(const char *where, handle_t *handle, 119 struct buffer_head *bh, handle_t *handle, int err);
127 struct buffer_head *bh)
128{
129 int err = journal_get_write_access(handle, bh);
130 if (err)
131 ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
132 return err;
133}
134 120
135static inline void 121int __ext3_journal_get_undo_access(const char *where, handle_t *handle,
136ext3_journal_release_buffer(handle_t *handle, struct buffer_head *bh) 122 struct buffer_head *bh);
137{
138 journal_release_buffer(handle, bh);
139}
140 123
141static inline int 124int __ext3_journal_get_write_access(const char *where, handle_t *handle,
142__ext3_journal_forget(const char *where, handle_t *handle, struct buffer_head *bh) 125 struct buffer_head *bh);
143{
144 int err = journal_forget(handle, bh);
145 if (err)
146 ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
147 return err;
148}
149 126
150static inline int 127int __ext3_journal_forget(const char *where, handle_t *handle,
151__ext3_journal_revoke(const char *where, handle_t *handle, 128 struct buffer_head *bh);
152 unsigned long blocknr, struct buffer_head *bh)
153{
154 int err = journal_revoke(handle, blocknr, bh);
155 if (err)
156 ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
157 return err;
158}
159 129
160static inline int 130int __ext3_journal_revoke(const char *where, handle_t *handle,
161__ext3_journal_get_create_access(const char *where, 131 unsigned long blocknr, struct buffer_head *bh);
162 handle_t *handle, struct buffer_head *bh)
163{
164 int err = journal_get_create_access(handle, bh);
165 if (err)
166 ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
167 return err;
168}
169 132
170static inline int 133int __ext3_journal_get_create_access(const char *where,
171__ext3_journal_dirty_metadata(const char *where, 134 handle_t *handle, struct buffer_head *bh);
172 handle_t *handle, struct buffer_head *bh)
173{
174 int err = journal_dirty_metadata(handle, bh);
175 if (err)
176 ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
177 return err;
178}
179 135
136int __ext3_journal_dirty_metadata(const char *where,
137 handle_t *handle, struct buffer_head *bh);
180 138
181#define ext3_journal_get_undo_access(handle, bh) \ 139#define ext3_journal_get_undo_access(handle, bh) \
182 __ext3_journal_get_undo_access(__FUNCTION__, (handle), (bh)) 140 __ext3_journal_get_undo_access(__FUNCTION__, (handle), (bh))
diff --git a/include/linux/ext4_jbd2.h b/include/linux/ext4_jbd2.h
index 72dd631912e..d716e6392cf 100644
--- a/include/linux/ext4_jbd2.h
+++ b/include/linux/ext4_jbd2.h
@@ -114,74 +114,32 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode);
114 * been done yet. 114 * been done yet.
115 */ 115 */
116 116
117void ext4_journal_abort_handle(const char *caller, const char *err_fn, 117static inline void ext4_journal_release_buffer(handle_t *handle,
118 struct buffer_head *bh, handle_t *handle, int err); 118 struct buffer_head *bh)
119
120static inline int
121__ext4_journal_get_undo_access(const char *where, handle_t *handle,
122 struct buffer_head *bh)
123{ 119{
124 int err = jbd2_journal_get_undo_access(handle, bh); 120 jbd2_journal_release_buffer(handle, bh);
125 if (err)
126 ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
127 return err;
128} 121}
129 122
130static inline int 123void ext4_journal_abort_handle(const char *caller, const char *err_fn,
131__ext4_journal_get_write_access(const char *where, handle_t *handle, 124 struct buffer_head *bh, handle_t *handle, int err);
132 struct buffer_head *bh)
133{
134 int err = jbd2_journal_get_write_access(handle, bh);
135 if (err)
136 ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
137 return err;
138}
139 125
140static inline void 126int __ext4_journal_get_undo_access(const char *where, handle_t *handle,
141ext4_journal_release_buffer(handle_t *handle, struct buffer_head *bh) 127 struct buffer_head *bh);
142{
143 jbd2_journal_release_buffer(handle, bh);
144}
145 128
146static inline int 129int __ext4_journal_get_write_access(const char *where, handle_t *handle,
147__ext4_journal_forget(const char *where, handle_t *handle, struct buffer_head *bh) 130 struct buffer_head *bh);
148{
149 int err = jbd2_journal_forget(handle, bh);
150 if (err)
151 ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
152 return err;
153}
154 131
155static inline int 132int __ext4_journal_forget(const char *where, handle_t *handle,
156__ext4_journal_revoke(const char *where, handle_t *handle, 133 struct buffer_head *bh);
157 ext4_fsblk_t blocknr, struct buffer_head *bh)
158{
159 int err = jbd2_journal_revoke(handle, blocknr, bh);
160 if (err)
161 ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
162 return err;
163}
164 134
165static inline int 135int __ext4_journal_revoke(const char *where, handle_t *handle,
166__ext4_journal_get_create_access(const char *where, 136 ext4_fsblk_t blocknr, struct buffer_head *bh);
167 handle_t *handle, struct buffer_head *bh)
168{
169 int err = jbd2_journal_get_create_access(handle, bh);
170 if (err)
171 ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
172 return err;
173}
174 137
175static inline int 138int __ext4_journal_get_create_access(const char *where,
176__ext4_journal_dirty_metadata(const char *where, 139 handle_t *handle, struct buffer_head *bh);
177 handle_t *handle, struct buffer_head *bh)
178{
179 int err = jbd2_journal_dirty_metadata(handle, bh);
180 if (err)
181 ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
182 return err;
183}
184 140
141int __ext4_journal_dirty_metadata(const char *where,
142 handle_t *handle, struct buffer_head *bh);
185 143
186#define ext4_journal_get_undo_access(handle, bh) \ 144#define ext4_journal_get_undo_access(handle, bh) \
187 __ext4_journal_get_undo_access(__FUNCTION__, (handle), (bh)) 145 __ext4_journal_get_undo_access(__FUNCTION__, (handle), (bh))
diff --git a/include/linux/file.h b/include/linux/file.h
index 74183e6f7f4..6e77b9177f9 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -64,6 +64,8 @@ struct files_struct {
64 64
65#define files_fdtable(files) (rcu_dereference((files)->fdt)) 65#define files_fdtable(files) (rcu_dereference((files)->fdt))
66 66
67extern struct kmem_cache *filp_cachep;
68
67extern void FASTCALL(__fput(struct file *)); 69extern void FASTCALL(__fput(struct file *));
68extern void FASTCALL(fput(struct file *)); 70extern void FASTCALL(fput(struct file *));
69 71
@@ -114,4 +116,6 @@ struct files_struct *get_files_struct(struct task_struct *);
114void FASTCALL(put_files_struct(struct files_struct *fs)); 116void FASTCALL(put_files_struct(struct files_struct *fs));
115void reset_files_struct(struct task_struct *, struct files_struct *); 117void reset_files_struct(struct task_struct *, struct files_struct *);
116 118
119extern struct kmem_cache *files_cachep;
120
117#endif /* __LINUX_FILE_H */ 121#endif /* __LINUX_FILE_H */
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
new file mode 100644
index 00000000000..6e05e3e7ce3
--- /dev/null
+++ b/include/linux/freezer.h
@@ -0,0 +1,87 @@
1/* Freezer declarations */
2
3#ifdef CONFIG_PM
4/*
5 * Check if a process has been frozen
6 */
7static inline int frozen(struct task_struct *p)
8{
9 return p->flags & PF_FROZEN;
10}
11
12/*
13 * Check if there is a request to freeze a process
14 */
15static inline int freezing(struct task_struct *p)
16{
17 return p->flags & PF_FREEZE;
18}
19
20/*
21 * Request that a process be frozen
22 * FIXME: SMP problem. We may not modify other process' flags!
23 */
24static inline void freeze(struct task_struct *p)
25{
26 p->flags |= PF_FREEZE;
27}
28
29/*
30 * Sometimes we may need to cancel the previous 'freeze' request
31 */
32static inline void do_not_freeze(struct task_struct *p)
33{
34 p->flags &= ~PF_FREEZE;
35}
36
37/*
38 * Wake up a frozen process
39 */
40static inline int thaw_process(struct task_struct *p)
41{
42 if (frozen(p)) {
43 p->flags &= ~PF_FROZEN;
44 wake_up_process(p);
45 return 1;
46 }
47 return 0;
48}
49
50/*
51 * freezing is complete, mark process as frozen
52 */
53static inline void frozen_process(struct task_struct *p)
54{
55 p->flags = (p->flags & ~PF_FREEZE) | PF_FROZEN;
56}
57
58extern void refrigerator(void);
59extern int freeze_processes(void);
60extern void thaw_processes(void);
61
62static inline int try_to_freeze(void)
63{
64 if (freezing(current)) {
65 refrigerator();
66 return 1;
67 } else
68 return 0;
69}
70
71extern void thaw_some_processes(int all);
72
73#else
74static inline int frozen(struct task_struct *p) { return 0; }
75static inline int freezing(struct task_struct *p) { return 0; }
76static inline void freeze(struct task_struct *p) { BUG(); }
77static inline int thaw_process(struct task_struct *p) { return 1; }
78static inline void frozen_process(struct task_struct *p) { BUG(); }
79
80static inline void refrigerator(void) {}
81static inline int freeze_processes(void) { BUG(); return 0; }
82static inline void thaw_processes(void) {}
83
84static inline int try_to_freeze(void) { return 0; }
85
86
87#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index cac7b1ef954..70b99fbb560 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -543,19 +543,22 @@ struct inode {
543 struct list_head i_dentry; 543 struct list_head i_dentry;
544 unsigned long i_ino; 544 unsigned long i_ino;
545 atomic_t i_count; 545 atomic_t i_count;
546 umode_t i_mode;
547 unsigned int i_nlink; 546 unsigned int i_nlink;
548 uid_t i_uid; 547 uid_t i_uid;
549 gid_t i_gid; 548 gid_t i_gid;
550 dev_t i_rdev; 549 dev_t i_rdev;
550 unsigned long i_version;
551 loff_t i_size; 551 loff_t i_size;
552#ifdef __NEED_I_SIZE_ORDERED
553 seqcount_t i_size_seqcount;
554#endif
552 struct timespec i_atime; 555 struct timespec i_atime;
553 struct timespec i_mtime; 556 struct timespec i_mtime;
554 struct timespec i_ctime; 557 struct timespec i_ctime;
555 unsigned int i_blkbits; 558 unsigned int i_blkbits;
556 unsigned long i_version;
557 blkcnt_t i_blocks; 559 blkcnt_t i_blocks;
558 unsigned short i_bytes; 560 unsigned short i_bytes;
561 umode_t i_mode;
559 spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ 562 spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
560 struct mutex i_mutex; 563 struct mutex i_mutex;
561 struct rw_semaphore i_alloc_sem; 564 struct rw_semaphore i_alloc_sem;
@@ -598,9 +601,6 @@ struct inode {
598 void *i_security; 601 void *i_security;
599#endif 602#endif
600 void *i_private; /* fs or device private pointer */ 603 void *i_private; /* fs or device private pointer */
601#ifdef __NEED_I_SIZE_ORDERED
602 seqcount_t i_size_seqcount;
603#endif
604}; 604};
605 605
606/* 606/*
@@ -636,7 +636,7 @@ extern void inode_double_unlock(struct inode *inode1, struct inode *inode2);
636 * cmpxchg8b without the need of the lock prefix). For SMP compiles 636 * cmpxchg8b without the need of the lock prefix). For SMP compiles
637 * and 64bit archs it makes no difference if preempt is enabled or not. 637 * and 64bit archs it makes no difference if preempt is enabled or not.
638 */ 638 */
639static inline loff_t i_size_read(struct inode *inode) 639static inline loff_t i_size_read(const struct inode *inode)
640{ 640{
641#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 641#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
642 loff_t i_size; 642 loff_t i_size;
@@ -679,12 +679,12 @@ static inline void i_size_write(struct inode *inode, loff_t i_size)
679#endif 679#endif
680} 680}
681 681
682static inline unsigned iminor(struct inode *inode) 682static inline unsigned iminor(const struct inode *inode)
683{ 683{
684 return MINOR(inode->i_rdev); 684 return MINOR(inode->i_rdev);
685} 685}
686 686
687static inline unsigned imajor(struct inode *inode) 687static inline unsigned imajor(const struct inode *inode)
688{ 688{
689 return MAJOR(inode->i_rdev); 689 return MAJOR(inode->i_rdev);
690} 690}
@@ -1481,7 +1481,9 @@ extern char * getname(const char __user *);
1481extern void __init vfs_caches_init_early(void); 1481extern void __init vfs_caches_init_early(void);
1482extern void __init vfs_caches_init(unsigned long); 1482extern void __init vfs_caches_init(unsigned long);
1483 1483
1484#define __getname() kmem_cache_alloc(names_cachep, SLAB_KERNEL) 1484extern struct kmem_cache *names_cachep;
1485
1486#define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL)
1485#define __putname(name) kmem_cache_free(names_cachep, (void *)(name)) 1487#define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
1486#ifndef CONFIG_AUDITSYSCALL 1488#ifndef CONFIG_AUDITSYSCALL
1487#define putname(name) __putname(name) 1489#define putname(name) __putname(name)
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
index c623d12a486..11a36ceddf7 100644
--- a/include/linux/fs_struct.h
+++ b/include/linux/fs_struct.h
@@ -18,6 +18,8 @@ struct fs_struct {
18 .umask = 0022, \ 18 .umask = 0022, \
19} 19}
20 20
21extern struct kmem_cache *fs_cachep;
22
21extern void exit_fs(struct task_struct *); 23extern void exit_fs(struct task_struct *);
22extern void set_fs_altroot(void); 24extern void set_fs_altroot(void);
23extern void set_fs_root(struct fs_struct *, struct vfsmount *, struct dentry *); 25extern void set_fs_root(struct fs_struct *, struct vfsmount *, struct dentry *);
diff --git a/include/linux/fuse.h b/include/linux/fuse.h
index 9fc48a674b8..534744efe30 100644
--- a/include/linux/fuse.h
+++ b/include/linux/fuse.h
@@ -15,7 +15,7 @@
15#define FUSE_KERNEL_VERSION 7 15#define FUSE_KERNEL_VERSION 7
16 16
17/** Minor version number of this interface */ 17/** Minor version number of this interface */
18#define FUSE_KERNEL_MINOR_VERSION 7 18#define FUSE_KERNEL_MINOR_VERSION 8
19 19
20/** The node ID of the root inode */ 20/** The node ID of the root inode */
21#define FUSE_ROOT_ID 1 21#define FUSE_ROOT_ID 1
@@ -92,6 +92,11 @@ struct fuse_file_lock {
92#define FUSE_ASYNC_READ (1 << 0) 92#define FUSE_ASYNC_READ (1 << 0)
93#define FUSE_POSIX_LOCKS (1 << 1) 93#define FUSE_POSIX_LOCKS (1 << 1)
94 94
95/**
96 * Release flags
97 */
98#define FUSE_RELEASE_FLUSH (1 << 0)
99
95enum fuse_opcode { 100enum fuse_opcode {
96 FUSE_LOOKUP = 1, 101 FUSE_LOOKUP = 1,
97 FUSE_FORGET = 2, /* no reply */ 102 FUSE_FORGET = 2, /* no reply */
@@ -127,6 +132,8 @@ enum fuse_opcode {
127 FUSE_ACCESS = 34, 132 FUSE_ACCESS = 34,
128 FUSE_CREATE = 35, 133 FUSE_CREATE = 35,
129 FUSE_INTERRUPT = 36, 134 FUSE_INTERRUPT = 36,
135 FUSE_BMAP = 37,
136 FUSE_DESTROY = 38,
130}; 137};
131 138
132/* The read buffer is required to be at least 8k, but may be much larger */ 139/* The read buffer is required to be at least 8k, but may be much larger */
@@ -205,12 +212,13 @@ struct fuse_open_out {
205struct fuse_release_in { 212struct fuse_release_in {
206 __u64 fh; 213 __u64 fh;
207 __u32 flags; 214 __u32 flags;
208 __u32 padding; 215 __u32 release_flags;
216 __u64 lock_owner;
209}; 217};
210 218
211struct fuse_flush_in { 219struct fuse_flush_in {
212 __u64 fh; 220 __u64 fh;
213 __u32 flush_flags; 221 __u32 unused;
214 __u32 padding; 222 __u32 padding;
215 __u64 lock_owner; 223 __u64 lock_owner;
216}; 224};
@@ -296,6 +304,16 @@ struct fuse_interrupt_in {
296 __u64 unique; 304 __u64 unique;
297}; 305};
298 306
307struct fuse_bmap_in {
308 __u64 block;
309 __u32 blocksize;
310 __u32 padding;
311};
312
313struct fuse_bmap_out {
314 __u64 block;
315};
316
299struct fuse_in_header { 317struct fuse_in_header {
300 __u32 len; 318 __u32 len;
301 __u32 opcode; 319 __u32 opcode;
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index 9049dc65ae5..f7a93770e1b 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -17,6 +17,9 @@ struct genlmsghdr {
17#define GENL_HDRLEN NLMSG_ALIGN(sizeof(struct genlmsghdr)) 17#define GENL_HDRLEN NLMSG_ALIGN(sizeof(struct genlmsghdr))
18 18
19#define GENL_ADMIN_PERM 0x01 19#define GENL_ADMIN_PERM 0x01
20#define GENL_CMD_CAP_DO 0x02
21#define GENL_CMD_CAP_DUMP 0x04
22#define GENL_CMD_CAP_HASPOL 0x08
20 23
21/* 24/*
22 * List of reserved static generic netlink identifiers: 25 * List of reserved static generic netlink identifiers:
@@ -58,9 +61,6 @@ enum {
58 CTRL_ATTR_OP_UNSPEC, 61 CTRL_ATTR_OP_UNSPEC,
59 CTRL_ATTR_OP_ID, 62 CTRL_ATTR_OP_ID,
60 CTRL_ATTR_OP_FLAGS, 63 CTRL_ATTR_OP_FLAGS,
61 CTRL_ATTR_OP_POLICY,
62 CTRL_ATTR_OP_DOIT,
63 CTRL_ATTR_OP_DUMPIT,
64 __CTRL_ATTR_OP_MAX, 64 __CTRL_ATTR_OP_MAX,
65}; 65};
66 66
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index bf2b6bc3f6f..00c314aedab 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -116,6 +116,9 @@ static inline enum zone_type gfp_zone(gfp_t flags)
116#ifndef HAVE_ARCH_FREE_PAGE 116#ifndef HAVE_ARCH_FREE_PAGE
117static inline void arch_free_page(struct page *page, int order) { } 117static inline void arch_free_page(struct page *page, int order) { }
118#endif 118#endif
119#ifndef HAVE_ARCH_ALLOC_PAGE
120static inline void arch_alloc_page(struct page *page, int order) { }
121#endif
119 122
120extern struct page * 123extern struct page *
121FASTCALL(__alloc_pages(gfp_t, unsigned int, struct zonelist *)); 124FASTCALL(__alloc_pages(gfp_t, unsigned int, struct zonelist *));
diff --git a/include/linux/gfs2_ondisk.h b/include/linux/gfs2_ondisk.h
index a7ae7c177ca..8b7e4c1e32a 100644
--- a/include/linux/gfs2_ondisk.h
+++ b/include/linux/gfs2_ondisk.h
@@ -54,8 +54,13 @@ struct gfs2_inum {
54 __be64 no_addr; 54 __be64 no_addr;
55}; 55};
56 56
57static inline int gfs2_inum_equal(const struct gfs2_inum *ino1, 57struct gfs2_inum_host {
58 const struct gfs2_inum *ino2) 58 __u64 no_formal_ino;
59 __u64 no_addr;
60};
61
62static inline int gfs2_inum_equal(const struct gfs2_inum_host *ino1,
63 const struct gfs2_inum_host *ino2)
59{ 64{
60 return ino1->no_formal_ino == ino2->no_formal_ino && 65 return ino1->no_formal_ino == ino2->no_formal_ino &&
61 ino1->no_addr == ino2->no_addr; 66 ino1->no_addr == ino2->no_addr;
@@ -89,6 +94,12 @@ struct gfs2_meta_header {
89 __be32 __pad1; /* Was incarnation number in gfs1 */ 94 __be32 __pad1; /* Was incarnation number in gfs1 */
90}; 95};
91 96
97struct gfs2_meta_header_host {
98 __u32 mh_magic;
99 __u32 mh_type;
100 __u32 mh_format;
101};
102
92/* 103/*
93 * super-block structure 104 * super-block structure
94 * 105 *
@@ -128,6 +139,23 @@ struct gfs2_sb {
128 /* In gfs1, quota and license dinodes followed */ 139 /* In gfs1, quota and license dinodes followed */
129}; 140};
130 141
142struct gfs2_sb_host {
143 struct gfs2_meta_header_host sb_header;
144
145 __u32 sb_fs_format;
146 __u32 sb_multihost_format;
147
148 __u32 sb_bsize;
149 __u32 sb_bsize_shift;
150
151 struct gfs2_inum_host sb_master_dir; /* Was jindex dinode in gfs1 */
152 struct gfs2_inum_host sb_root_dir;
153
154 char sb_lockproto[GFS2_LOCKNAME_LEN];
155 char sb_locktable[GFS2_LOCKNAME_LEN];
156 /* In gfs1, quota and license dinodes followed */
157};
158
131/* 159/*
132 * resource index structure 160 * resource index structure
133 */ 161 */
@@ -145,6 +173,14 @@ struct gfs2_rindex {
145 __u8 ri_reserved[64]; 173 __u8 ri_reserved[64];
146}; 174};
147 175
176struct gfs2_rindex_host {
177 __u64 ri_addr; /* grp block disk address */
178 __u64 ri_data0; /* first data location */
179 __u32 ri_length; /* length of rgrp header in fs blocks */
180 __u32 ri_data; /* num of data blocks in rgrp */
181 __u32 ri_bitbytes; /* number of bytes in data bitmaps */
182};
183
148/* 184/*
149 * resource group header structure 185 * resource group header structure
150 */ 186 */
@@ -176,6 +212,13 @@ struct gfs2_rgrp {
176 __u8 rg_reserved[80]; /* Several fields from gfs1 now reserved */ 212 __u8 rg_reserved[80]; /* Several fields from gfs1 now reserved */
177}; 213};
178 214
215struct gfs2_rgrp_host {
216 __u32 rg_flags;
217 __u32 rg_free;
218 __u32 rg_dinodes;
219 __u64 rg_igeneration;
220};
221
179/* 222/*
180 * quota structure 223 * quota structure
181 */ 224 */
@@ -187,6 +230,12 @@ struct gfs2_quota {
187 __u8 qu_reserved[64]; 230 __u8 qu_reserved[64];
188}; 231};
189 232
233struct gfs2_quota_host {
234 __u64 qu_limit;
235 __u64 qu_warn;
236 __u64 qu_value;
237};
238
190/* 239/*
191 * dinode structure 240 * dinode structure
192 */ 241 */
@@ -270,6 +319,27 @@ struct gfs2_dinode {
270 __u8 di_reserved[56]; 319 __u8 di_reserved[56];
271}; 320};
272 321
322struct gfs2_dinode_host {
323 __u64 di_size; /* number of bytes in file */
324 __u64 di_blocks; /* number of blocks in file */
325
326 /* This section varies from gfs1. Padding added to align with
327 * remainder of dinode
328 */
329 __u64 di_goal_meta; /* rgrp to alloc from next */
330 __u64 di_goal_data; /* data block goal */
331 __u64 di_generation; /* generation number for NFS */
332
333 __u32 di_flags; /* GFS2_DIF_... */
334 __u16 di_height; /* height of metadata */
335
336 /* These only apply to directories */
337 __u16 di_depth; /* Number of bits in the table */
338 __u32 di_entries; /* The number of entries in the directory */
339
340 __u64 di_eattr; /* extended attribute block number */
341};
342
273/* 343/*
274 * directory structure - many of these per directory file 344 * directory structure - many of these per directory file
275 */ 345 */
@@ -344,6 +414,16 @@ struct gfs2_log_header {
344 __be32 lh_hash; 414 __be32 lh_hash;
345}; 415};
346 416
417struct gfs2_log_header_host {
418 struct gfs2_meta_header_host lh_header;
419
420 __u64 lh_sequence; /* Sequence number of this transaction */
421 __u32 lh_flags; /* GFS2_LOG_HEAD_... */
422 __u32 lh_tail; /* Block number of log tail */
423 __u32 lh_blkno;
424 __u32 lh_hash;
425};
426
347/* 427/*
348 * Log type descriptor 428 * Log type descriptor
349 */ 429 */
@@ -384,6 +464,11 @@ struct gfs2_inum_range {
384 __be64 ir_length; 464 __be64 ir_length;
385}; 465};
386 466
467struct gfs2_inum_range_host {
468 __u64 ir_start;
469 __u64 ir_length;
470};
471
387/* 472/*
388 * Statfs change 473 * Statfs change
389 * Describes an change to the pool of free and allocated 474 * Describes an change to the pool of free and allocated
@@ -396,6 +481,12 @@ struct gfs2_statfs_change {
396 __be64 sc_dinodes; 481 __be64 sc_dinodes;
397}; 482};
398 483
484struct gfs2_statfs_change_host {
485 __u64 sc_total;
486 __u64 sc_free;
487 __u64 sc_dinodes;
488};
489
399/* 490/*
400 * Quota change 491 * Quota change
401 * Describes an allocation change for a particular 492 * Describes an allocation change for a particular
@@ -410,33 +501,38 @@ struct gfs2_quota_change {
410 __be32 qc_id; 501 __be32 qc_id;
411}; 502};
412 503
504struct gfs2_quota_change_host {
505 __u64 qc_change;
506 __u32 qc_flags; /* GFS2_QCF_... */
507 __u32 qc_id;
508};
509
413#ifdef __KERNEL__ 510#ifdef __KERNEL__
414/* Translation functions */ 511/* Translation functions */
415 512
416extern void gfs2_inum_in(struct gfs2_inum *no, const void *buf); 513extern void gfs2_inum_in(struct gfs2_inum_host *no, const void *buf);
417extern void gfs2_inum_out(const struct gfs2_inum *no, void *buf); 514extern void gfs2_inum_out(const struct gfs2_inum_host *no, void *buf);
418extern void gfs2_sb_in(struct gfs2_sb *sb, const void *buf); 515extern void gfs2_sb_in(struct gfs2_sb_host *sb, const void *buf);
419extern void gfs2_rindex_in(struct gfs2_rindex *ri, const void *buf); 516extern void gfs2_rindex_in(struct gfs2_rindex_host *ri, const void *buf);
420extern void gfs2_rindex_out(const struct gfs2_rindex *ri, void *buf); 517extern void gfs2_rindex_out(const struct gfs2_rindex_host *ri, void *buf);
421extern void gfs2_rgrp_in(struct gfs2_rgrp *rg, const void *buf); 518extern void gfs2_rgrp_in(struct gfs2_rgrp_host *rg, const void *buf);
422extern void gfs2_rgrp_out(const struct gfs2_rgrp *rg, void *buf); 519extern void gfs2_rgrp_out(const struct gfs2_rgrp_host *rg, void *buf);
423extern void gfs2_quota_in(struct gfs2_quota *qu, const void *buf); 520extern void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf);
424extern void gfs2_quota_out(const struct gfs2_quota *qu, void *buf); 521struct gfs2_inode;
425extern void gfs2_dinode_in(struct gfs2_dinode *di, const void *buf); 522extern void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf);
426extern void gfs2_dinode_out(const struct gfs2_dinode *di, void *buf);
427extern void gfs2_ea_header_in(struct gfs2_ea_header *ea, const void *buf); 523extern void gfs2_ea_header_in(struct gfs2_ea_header *ea, const void *buf);
428extern void gfs2_ea_header_out(const struct gfs2_ea_header *ea, void *buf); 524extern void gfs2_ea_header_out(const struct gfs2_ea_header *ea, void *buf);
429extern void gfs2_log_header_in(struct gfs2_log_header *lh, const void *buf); 525extern void gfs2_log_header_in(struct gfs2_log_header_host *lh, const void *buf);
430extern void gfs2_inum_range_in(struct gfs2_inum_range *ir, const void *buf); 526extern void gfs2_inum_range_in(struct gfs2_inum_range_host *ir, const void *buf);
431extern void gfs2_inum_range_out(const struct gfs2_inum_range *ir, void *buf); 527extern void gfs2_inum_range_out(const struct gfs2_inum_range_host *ir, void *buf);
432extern void gfs2_statfs_change_in(struct gfs2_statfs_change *sc, const void *buf); 528extern void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf);
433extern void gfs2_statfs_change_out(const struct gfs2_statfs_change *sc, void *buf); 529extern void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf);
434extern void gfs2_quota_change_in(struct gfs2_quota_change *qc, const void *buf); 530extern void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf);
435 531
436/* Printing functions */ 532/* Printing functions */
437 533
438extern void gfs2_rindex_print(const struct gfs2_rindex *ri); 534extern void gfs2_rindex_print(const struct gfs2_rindex_host *ri);
439extern void gfs2_dinode_print(const struct gfs2_dinode *di); 535extern void gfs2_dinode_print(const struct gfs2_inode *ip);
440 536
441#endif /* __KERNEL__ */ 537#endif /* __KERNEL__ */
442 538
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index fd7d12daa94..3d8768b619e 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/fs.h> 4#include <linux/fs.h>
5#include <linux/mm.h> 5#include <linux/mm.h>
6#include <linux/uaccess.h>
6 7
7#include <asm/cacheflush.h> 8#include <asm/cacheflush.h>
8 9
@@ -41,9 +42,10 @@ static inline void *kmap(struct page *page)
41 42
42#define kunmap(page) do { (void) (page); } while (0) 43#define kunmap(page) do { (void) (page); } while (0)
43 44
44#define kmap_atomic(page, idx) page_address(page) 45#define kmap_atomic(page, idx) \
45#define kunmap_atomic(addr, idx) do { } while (0) 46 ({ pagefault_disable(); page_address(page); })
46#define kmap_atomic_pfn(pfn, idx) page_address(pfn_to_page(pfn)) 47#define kunmap_atomic(addr, idx) do { pagefault_enable(); } while (0)
48#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx))
47#define kmap_atomic_to_page(ptr) virt_to_page(ptr) 49#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
48#endif 50#endif
49 51
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index ace64e57e17..a60995afe33 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -35,6 +35,7 @@ extern int sysctl_hugetlb_shm_group;
35 35
36pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr); 36pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr);
37pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr); 37pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
38int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
38struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, 39struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
39 int write); 40 int write);
40struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, 41struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index 1fb02e17f6f..52f53e2e70c 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -490,7 +490,7 @@ struct i2o_dma {
490 */ 490 */
491struct i2o_pool { 491struct i2o_pool {
492 char *name; 492 char *name;
493 kmem_cache_t *slab; 493 struct kmem_cache *slab;
494 mempool_t *mempool; 494 mempool_t *mempool;
495}; 495};
496 496
@@ -986,7 +986,8 @@ extern void i2o_driver_unregister(struct i2o_driver *);
986 986
987/** 987/**
988 * i2o_driver_notify_controller_add - Send notification of added controller 988 * i2o_driver_notify_controller_add - Send notification of added controller
989 * to a single I2O driver 989 * @drv: I2O driver
990 * @c: I2O controller
990 * 991 *
991 * Send notification of added controller to a single registered driver. 992 * Send notification of added controller to a single registered driver.
992 */ 993 */
@@ -998,8 +999,9 @@ static inline void i2o_driver_notify_controller_add(struct i2o_driver *drv,
998}; 999};
999 1000
1000/** 1001/**
1001 * i2o_driver_notify_controller_remove - Send notification of removed 1002 * i2o_driver_notify_controller_remove - Send notification of removed controller
1002 * controller to a single I2O driver 1003 * @drv: I2O driver
1004 * @c: I2O controller
1003 * 1005 *
1004 * Send notification of removed controller to a single registered driver. 1006 * Send notification of removed controller to a single registered driver.
1005 */ 1007 */
@@ -1011,8 +1013,9 @@ static inline void i2o_driver_notify_controller_remove(struct i2o_driver *drv,
1011}; 1013};
1012 1014
1013/** 1015/**
1014 * i2o_driver_notify_device_add - Send notification of added device to a 1016 * i2o_driver_notify_device_add - Send notification of added device
1015 * single I2O driver 1017 * @drv: I2O driver
1018 * @i2o_dev: the added i2o_device
1016 * 1019 *
1017 * Send notification of added device to a single registered driver. 1020 * Send notification of added device to a single registered driver.
1018 */ 1021 */
@@ -1025,7 +1028,8 @@ static inline void i2o_driver_notify_device_add(struct i2o_driver *drv,
1025 1028
1026/** 1029/**
1027 * i2o_driver_notify_device_remove - Send notification of removed device 1030 * i2o_driver_notify_device_remove - Send notification of removed device
1028 * to a single I2O driver 1031 * @drv: I2O driver
1032 * @i2o_dev: the added i2o_device
1029 * 1033 *
1030 * Send notification of removed device to a single registered driver. 1034 * Send notification of removed device to a single registered driver.
1031 */ 1035 */
@@ -1148,7 +1152,7 @@ static inline void i2o_msg_post(struct i2o_controller *c,
1148/** 1152/**
1149 * i2o_msg_post_wait - Post and wait a message and wait until return 1153 * i2o_msg_post_wait - Post and wait a message and wait until return
1150 * @c: controller 1154 * @c: controller
1151 * @m: message to post 1155 * @msg: message to post
1152 * @timeout: time in seconds to wait 1156 * @timeout: time in seconds to wait
1153 * 1157 *
1154 * This API allows an OSM to post a message and then be told whether or 1158 * This API allows an OSM to post a message and then be told whether or
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 33c5daacc74..733790d4f7d 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -73,7 +73,7 @@
73extern struct nsproxy init_nsproxy; 73extern struct nsproxy init_nsproxy;
74#define INIT_NSPROXY(nsproxy) { \ 74#define INIT_NSPROXY(nsproxy) { \
75 .count = ATOMIC_INIT(1), \ 75 .count = ATOMIC_INIT(1), \
76 .nslock = SPIN_LOCK_UNLOCKED, \ 76 .nslock = __SPIN_LOCK_UNLOCKED(nsproxy.nslock), \
77 .uts_ns = &init_uts_ns, \ 77 .uts_ns = &init_uts_ns, \
78 .namespace = NULL, \ 78 .namespace = NULL, \
79 INIT_IPC_NS(ipc_ns) \ 79 INIT_IPC_NS(ipc_ns) \
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 5b83e7b5962..de7593f4e89 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -11,6 +11,7 @@
11#include <linux/hardirq.h> 11#include <linux/hardirq.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/irqflags.h> 13#include <linux/irqflags.h>
14#include <linux/bottom_half.h>
14#include <asm/atomic.h> 15#include <asm/atomic.h>
15#include <asm/ptrace.h> 16#include <asm/ptrace.h>
16#include <asm/system.h> 17#include <asm/system.h>
@@ -217,12 +218,6 @@ static inline void __deprecated save_and_cli(unsigned long *x)
217#define save_and_cli(x) save_and_cli(&x) 218#define save_and_cli(x) save_and_cli(&x)
218#endif /* CONFIG_SMP */ 219#endif /* CONFIG_SMP */
219 220
220extern void local_bh_disable(void);
221extern void __local_bh_enable(void);
222extern void _local_bh_enable(void);
223extern void local_bh_enable(void);
224extern void local_bh_enable_ip(unsigned long ip);
225
226/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high 221/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
227 frequency threaded job scheduling. For almost all the purposes 222 frequency threaded job scheduling. For almost all the purposes
228 tasklets are more than enough. F.e. all serial device BHs et 223 tasklets are more than enough. F.e. all serial device BHs et
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index 796ca009fd4..7a9db390c56 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -208,6 +208,15 @@ struct kernel_ipmi_msg
208 code as the first byte of the incoming data, unlike a response. */ 208 code as the first byte of the incoming data, unlike a response. */
209 209
210 210
211/*
212 * Modes for ipmi_set_maint_mode() and the userland IOCTL. The AUTO
213 * setting is the default and means it will be set on certain
214 * commands. Hard setting it on and off will override automatic
215 * operation.
216 */
217#define IPMI_MAINTENANCE_MODE_AUTO 0
218#define IPMI_MAINTENANCE_MODE_OFF 1
219#define IPMI_MAINTENANCE_MODE_ON 2
211 220
212#ifdef __KERNEL__ 221#ifdef __KERNEL__
213 222
@@ -374,6 +383,35 @@ int ipmi_unregister_for_cmd(ipmi_user_t user,
374 unsigned int chans); 383 unsigned int chans);
375 384
376/* 385/*
386 * Go into a mode where the driver will not autonomously attempt to do
387 * things with the interface. It will still respond to attentions and
388 * interrupts, and it will expect that commands will complete. It
389 * will not automatcially check for flags, events, or things of that
390 * nature.
391 *
392 * This is primarily used for firmware upgrades. The idea is that
393 * when you go into firmware upgrade mode, you do this operation
394 * and the driver will not attempt to do anything but what you tell
395 * it or what the BMC asks for.
396 *
397 * Note that if you send a command that resets the BMC, the driver
398 * will still expect a response from that command. So the BMC should
399 * reset itself *after* the response is sent. Resetting before the
400 * response is just silly.
401 *
402 * If in auto maintenance mode, the driver will automatically go into
403 * maintenance mode for 30 seconds if it sees a cold reset, a warm
404 * reset, or a firmware NetFN. This means that code that uses only
405 * firmware NetFN commands to do upgrades will work automatically
406 * without change, assuming it sends a message every 30 seconds or
407 * less.
408 *
409 * See the IPMI_MAINTENANCE_MODE_xxx defines for what the mode means.
410 */
411int ipmi_get_maintenance_mode(ipmi_user_t user);
412int ipmi_set_maintenance_mode(ipmi_user_t user, int mode);
413
414/*
377 * Allow run-to-completion mode to be set for the interface of 415 * Allow run-to-completion mode to be set for the interface of
378 * a specific user. 416 * a specific user.
379 */ 417 */
@@ -656,4 +694,11 @@ struct ipmi_timing_parms
656#define IPMICTL_GET_TIMING_PARMS_CMD _IOR(IPMI_IOC_MAGIC, 23, \ 694#define IPMICTL_GET_TIMING_PARMS_CMD _IOR(IPMI_IOC_MAGIC, 23, \
657 struct ipmi_timing_parms) 695 struct ipmi_timing_parms)
658 696
697/*
698 * Set the maintenance mode. See ipmi_set_maintenance_mode() above
699 * for a description of what this does.
700 */
701#define IPMICTL_GET_MAINTENANCE_MODE_CMD _IOR(IPMI_IOC_MAGIC, 30, int)
702#define IPMICTL_SET_MAINTENANCE_MODE_CMD _IOW(IPMI_IOC_MAGIC, 31, int)
703
659#endif /* __LINUX_IPMI_H */ 704#endif /* __LINUX_IPMI_H */
diff --git a/include/linux/ipmi_msgdefs.h b/include/linux/ipmi_msgdefs.h
index 4d04d8b58a0..b56a158d587 100644
--- a/include/linux/ipmi_msgdefs.h
+++ b/include/linux/ipmi_msgdefs.h
@@ -46,6 +46,8 @@
46#define IPMI_NETFN_APP_REQUEST 0x06 46#define IPMI_NETFN_APP_REQUEST 0x06
47#define IPMI_NETFN_APP_RESPONSE 0x07 47#define IPMI_NETFN_APP_RESPONSE 0x07
48#define IPMI_GET_DEVICE_ID_CMD 0x01 48#define IPMI_GET_DEVICE_ID_CMD 0x01
49#define IPMI_COLD_RESET_CMD 0x02
50#define IPMI_WARM_RESET_CMD 0x03
49#define IPMI_CLEAR_MSG_FLAGS_CMD 0x30 51#define IPMI_CLEAR_MSG_FLAGS_CMD 0x30
50#define IPMI_GET_DEVICE_GUID_CMD 0x08 52#define IPMI_GET_DEVICE_GUID_CMD 0x08
51#define IPMI_GET_MSG_FLAGS_CMD 0x31 53#define IPMI_GET_MSG_FLAGS_CMD 0x31
@@ -60,20 +62,27 @@
60#define IPMI_NETFN_STORAGE_RESPONSE 0x0b 62#define IPMI_NETFN_STORAGE_RESPONSE 0x0b
61#define IPMI_ADD_SEL_ENTRY_CMD 0x44 63#define IPMI_ADD_SEL_ENTRY_CMD 0x44
62 64
65#define IPMI_NETFN_FIRMWARE_REQUEST 0x08
66#define IPMI_NETFN_FIRMWARE_RESPONSE 0x09
67
63/* The default slave address */ 68/* The default slave address */
64#define IPMI_BMC_SLAVE_ADDR 0x20 69#define IPMI_BMC_SLAVE_ADDR 0x20
65 70
66/* The BT interface on high-end HP systems supports up to 255 bytes in 71/* The BT interface on high-end HP systems supports up to 255 bytes in
67 * one transfer. Its "virtual" BMC supports some commands that are longer 72 * one transfer. Its "virtual" BMC supports some commands that are longer
68 * than 128 bytes. Use the full 256, plus NetFn/LUN, Cmd, cCode, plus 73 * than 128 bytes. Use the full 256, plus NetFn/LUN, Cmd, cCode, plus
69 * some overhead. It would be nice to base this on the "BT Capabilities" 74 * some overhead; it's not worth the effort to dynamically size this based
70 * but that's too hard to propagate to the rest of the driver. */ 75 * on the results of the "Get BT Capabilities" command. */
71#define IPMI_MAX_MSG_LENGTH 272 /* multiple of 16 */ 76#define IPMI_MAX_MSG_LENGTH 272 /* multiple of 16 */
72 77
73#define IPMI_CC_NO_ERROR 0x00 78#define IPMI_CC_NO_ERROR 0x00
74#define IPMI_NODE_BUSY_ERR 0xc0 79#define IPMI_NODE_BUSY_ERR 0xc0
75#define IPMI_INVALID_COMMAND_ERR 0xc1 80#define IPMI_INVALID_COMMAND_ERR 0xc1
81#define IPMI_TIMEOUT_ERR 0xc3
76#define IPMI_ERR_MSG_TRUNCATED 0xc6 82#define IPMI_ERR_MSG_TRUNCATED 0xc6
83#define IPMI_REQ_LEN_INVALID_ERR 0xc7
84#define IPMI_REQ_LEN_EXCEEDED_ERR 0xc8
85#define IPMI_NOT_IN_MY_STATE_ERR 0xd5 /* IPMI 2.0 */
77#define IPMI_LOST_ARBITRATION_ERR 0x81 86#define IPMI_LOST_ARBITRATION_ERR 0x81
78#define IPMI_BUS_ERR 0x82 87#define IPMI_BUS_ERR 0x82
79#define IPMI_NAK_ON_WRITE_ERR 0x83 88#define IPMI_NAK_ON_WRITE_ERR 0x83
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index 6d9c7e4da47..c0633108d05 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -115,6 +115,13 @@ struct ipmi_smi_handlers
115 poll for operations during things like crash dumps. */ 115 poll for operations during things like crash dumps. */
116 void (*poll)(void *send_info); 116 void (*poll)(void *send_info);
117 117
118 /* Enable/disable firmware maintenance mode. Note that this
119 is *not* the modes defined, this is simply an on/off
120 setting. The message handler does the mode handling. Note
121 that this is called from interupt context, so it cannot
122 block. */
123 void (*set_maintenance_mode)(void *send_info, int enable);
124
118 /* Tell the handler that we are using it/not using it. The 125 /* Tell the handler that we are using it/not using it. The
119 message handler get the modules that this handler belongs 126 message handler get the modules that this handler belongs
120 to; this function lets the SMI claim any modules that it 127 to; this function lets the SMI claim any modules that it
@@ -173,6 +180,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
173 void *send_info, 180 void *send_info,
174 struct ipmi_device_id *device_id, 181 struct ipmi_device_id *device_id,
175 struct device *dev, 182 struct device *dev,
183 const char *sysfs_name,
176 unsigned char slave_addr); 184 unsigned char slave_addr);
177 185
178/* 186/*
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index fe89444b1c6..45273755126 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -839,7 +839,6 @@ struct journal_s
839 */ 839 */
840 840
841/* Filing buffers */ 841/* Filing buffers */
842extern void __journal_temp_unlink_buffer(struct journal_head *jh);
843extern void journal_unfile_buffer(journal_t *, struct journal_head *); 842extern void journal_unfile_buffer(journal_t *, struct journal_head *);
844extern void __journal_unfile_buffer(struct journal_head *); 843extern void __journal_unfile_buffer(struct journal_head *);
845extern void __journal_refile_buffer(struct journal_head *); 844extern void __journal_refile_buffer(struct journal_head *);
@@ -949,7 +948,7 @@ void journal_put_journal_head(struct journal_head *jh);
949/* 948/*
950 * handle management 949 * handle management
951 */ 950 */
952extern kmem_cache_t *jbd_handle_cache; 951extern struct kmem_cache *jbd_handle_cache;
953 952
954static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags) 953static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags)
955{ 954{
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index ddb12879578..0e0fedd2039 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -848,7 +848,6 @@ struct journal_s
848 */ 848 */
849 849
850/* Filing buffers */ 850/* Filing buffers */
851extern void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
852extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *); 851extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *);
853extern void __jbd2_journal_unfile_buffer(struct journal_head *); 852extern void __jbd2_journal_unfile_buffer(struct journal_head *);
854extern void __jbd2_journal_refile_buffer(struct journal_head *); 853extern void __jbd2_journal_refile_buffer(struct journal_head *);
@@ -958,7 +957,7 @@ void jbd2_journal_put_journal_head(struct journal_head *jh);
958/* 957/*
959 * handle management 958 * handle management
960 */ 959 */
961extern kmem_cache_t *jbd2_handle_cache; 960extern struct kmem_cache *jbd2_handle_cache;
962 961
963static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags) 962static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags)
964{ 963{
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index a4ede62b339..e3abcec6c51 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -105,6 +105,7 @@ extern struct page *kimage_alloc_control_pages(struct kimage *image,
105 unsigned int order); 105 unsigned int order);
106extern void crash_kexec(struct pt_regs *); 106extern void crash_kexec(struct pt_regs *);
107int kexec_should_crash(struct task_struct *); 107int kexec_should_crash(struct task_struct *);
108void crash_save_cpu(struct pt_regs *regs, int cpu);
108extern struct kimage *kexec_image; 109extern struct kimage *kexec_image;
109extern struct kimage *kexec_crash_image; 110extern struct kimage *kexec_crash_image;
110 111
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index ac4c0559f75..769be39b968 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -165,7 +165,7 @@ extern void arch_disarm_kprobe(struct kprobe *p);
165extern int arch_init_kprobes(void); 165extern int arch_init_kprobes(void);
166extern void show_registers(struct pt_regs *regs); 166extern void show_registers(struct pt_regs *regs);
167extern kprobe_opcode_t *get_insn_slot(void); 167extern kprobe_opcode_t *get_insn_slot(void);
168extern void free_insn_slot(kprobe_opcode_t *slot); 168extern void free_insn_slot(kprobe_opcode_t *slot, int dirty);
169extern void kprobes_inc_nmissed_count(struct kprobe *p); 169extern void kprobes_inc_nmissed_count(struct kprobe *p);
170 170
171/* Get the kprobe at this addr (if any) - called with preemption disabled */ 171/* Get the kprobe at this addr (if any) - called with preemption disabled */
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 84eeecd60a0..611f17f79ee 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -248,9 +248,9 @@ static inline struct timeval ktime_to_timeval(const ktime_t kt)
248 * 248 *
249 * Returns the scalar nanoseconds representation of kt 249 * Returns the scalar nanoseconds representation of kt
250 */ 250 */
251static inline u64 ktime_to_ns(const ktime_t kt) 251static inline s64 ktime_to_ns(const ktime_t kt)
252{ 252{
253 return (u64) kt.tv.sec * NSEC_PER_SEC + kt.tv.nsec; 253 return (s64) kt.tv.sec * NSEC_PER_SEC + kt.tv.nsec;
254} 254}
255 255
256#endif 256#endif
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 862d9730a60..8c39654549d 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -164,14 +164,12 @@ void nlmclnt_next_cookie(struct nlm_cookie *);
164 */ 164 */
165struct nlm_host * nlmclnt_lookup_host(const struct sockaddr_in *, int, int, const char *, int); 165struct nlm_host * nlmclnt_lookup_host(const struct sockaddr_in *, int, int, const char *, int);
166struct nlm_host * nlmsvc_lookup_host(struct svc_rqst *, const char *, int); 166struct nlm_host * nlmsvc_lookup_host(struct svc_rqst *, const char *, int);
167struct nlm_host * nlm_lookup_host(int server, const struct sockaddr_in *, int, int, const char *, int);
168struct rpc_clnt * nlm_bind_host(struct nlm_host *); 167struct rpc_clnt * nlm_bind_host(struct nlm_host *);
169void nlm_rebind_host(struct nlm_host *); 168void nlm_rebind_host(struct nlm_host *);
170struct nlm_host * nlm_get_host(struct nlm_host *); 169struct nlm_host * nlm_get_host(struct nlm_host *);
171void nlm_release_host(struct nlm_host *); 170void nlm_release_host(struct nlm_host *);
172void nlm_shutdown_hosts(void); 171void nlm_shutdown_hosts(void);
173extern void nlm_host_rebooted(const struct sockaddr_in *, const char *, int, u32); 172extern void nlm_host_rebooted(const struct sockaddr_in *, const char *, int, u32);
174struct nsm_handle *nsm_find(const struct sockaddr_in *, const char *, int);
175void nsm_release(struct nsm_handle *); 173void nsm_release(struct nsm_handle *);
176 174
177 175
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 819f08f1310..498bfbd3b4e 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -193,7 +193,6 @@ extern void lockdep_free_key_range(void *start, unsigned long size);
193 193
194extern void lockdep_off(void); 194extern void lockdep_off(void);
195extern void lockdep_on(void); 195extern void lockdep_on(void);
196extern int lockdep_internal(void);
197 196
198/* 197/*
199 * These methods are used by specific locking variants (spinlocks, 198 * These methods are used by specific locking variants (spinlocks,
@@ -243,6 +242,8 @@ extern void lock_release(struct lockdep_map *lock, int nested,
243 242
244# define INIT_LOCKDEP .lockdep_recursion = 0, 243# define INIT_LOCKDEP .lockdep_recursion = 0,
245 244
245#define lockdep_depth(tsk) ((tsk)->lockdep_depth)
246
246#else /* !LOCKDEP */ 247#else /* !LOCKDEP */
247 248
248static inline void lockdep_off(void) 249static inline void lockdep_off(void)
@@ -253,11 +254,6 @@ static inline void lockdep_on(void)
253{ 254{
254} 255}
255 256
256static inline int lockdep_internal(void)
257{
258 return 0;
259}
260
261# define lock_acquire(l, s, t, r, c, i) do { } while (0) 257# define lock_acquire(l, s, t, r, c, i) do { } while (0)
262# define lock_release(l, n, i) do { } while (0) 258# define lock_release(l, n, i) do { } while (0)
263# define lockdep_init() do { } while (0) 259# define lockdep_init() do { } while (0)
@@ -277,6 +273,9 @@ static inline int lockdep_internal(void)
277 * The class key takes no space if lockdep is disabled: 273 * The class key takes no space if lockdep is disabled:
278 */ 274 */
279struct lock_class_key { }; 275struct lock_class_key { };
276
277#define lockdep_depth(tsk) (0)
278
280#endif /* !LOCKDEP */ 279#endif /* !LOCKDEP */
281 280
282#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS) 281#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d538de90196..a17b147c61e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -114,6 +114,8 @@ struct vm_area_struct {
114#endif 114#endif
115}; 115};
116 116
117extern struct kmem_cache *vm_area_cachep;
118
117/* 119/*
118 * This struct defines the per-mm list of VMAs for uClinux. If CONFIG_MMU is 120 * This struct defines the per-mm list of VMAs for uClinux. If CONFIG_MMU is
119 * disabled, then there's a single shared list of VMAs maintained by the 121 * disabled, then there's a single shared list of VMAs maintained by the
@@ -294,6 +296,24 @@ void put_pages_list(struct list_head *pages);
294void split_page(struct page *page, unsigned int order); 296void split_page(struct page *page, unsigned int order);
295 297
296/* 298/*
299 * Compound pages have a destructor function. Provide a
300 * prototype for that function and accessor functions.
301 * These are _only_ valid on the head of a PG_compound page.
302 */
303typedef void compound_page_dtor(struct page *);
304
305static inline void set_compound_page_dtor(struct page *page,
306 compound_page_dtor *dtor)
307{
308 page[1].lru.next = (void *)dtor;
309}
310
311static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
312{
313 return (compound_page_dtor *)page[1].lru.next;
314}
315
316/*
297 * Multiple processes may "see" the same page. E.g. for untouched 317 * Multiple processes may "see" the same page. E.g. for untouched
298 * mappings of /dev/null, all processes see the same page full of 318 * mappings of /dev/null, all processes see the same page full of
299 * zeroes, and text pages of executables and shared libraries have 319 * zeroes, and text pages of executables and shared libraries have
@@ -396,7 +416,9 @@ void split_page(struct page *page, unsigned int order);
396 * We are going to use the flags for the page to node mapping if its in 416 * We are going to use the flags for the page to node mapping if its in
397 * there. This includes the case where there is no node, so it is implicit. 417 * there. This includes the case where there is no node, so it is implicit.
398 */ 418 */
399#define FLAGS_HAS_NODE (NODES_WIDTH > 0 || NODES_SHIFT == 0) 419#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0)
420#define NODE_NOT_IN_PAGE_FLAGS
421#endif
400 422
401#ifndef PFN_SECTION_SHIFT 423#ifndef PFN_SECTION_SHIFT
402#define PFN_SECTION_SHIFT 0 424#define PFN_SECTION_SHIFT 0
@@ -411,13 +433,18 @@ void split_page(struct page *page, unsigned int order);
411#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) 433#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
412#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) 434#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
413 435
414/* NODE:ZONE or SECTION:ZONE is used to lookup the zone from a page. */ 436/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allcator */
415#if FLAGS_HAS_NODE 437#ifdef NODE_NOT_IN_PAGEFLAGS
416#define ZONETABLE_SHIFT (NODES_SHIFT + ZONES_SHIFT) 438#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
439#else
440#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
441#endif
442
443#if ZONES_WIDTH > 0
444#define ZONEID_PGSHIFT ZONES_PGSHIFT
417#else 445#else
418#define ZONETABLE_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) 446#define ZONEID_PGSHIFT NODES_PGOFF
419#endif 447#endif
420#define ZONETABLE_PGSHIFT ZONES_PGSHIFT
421 448
422#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED 449#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED
423#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED 450#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED
@@ -426,26 +453,28 @@ void split_page(struct page *page, unsigned int order);
426#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) 453#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
427#define NODES_MASK ((1UL << NODES_WIDTH) - 1) 454#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
428#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) 455#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
429#define ZONETABLE_MASK ((1UL << ZONETABLE_SHIFT) - 1) 456#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
430 457
431static inline enum zone_type page_zonenum(struct page *page) 458static inline enum zone_type page_zonenum(struct page *page)
432{ 459{
433 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; 460 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
434} 461}
435 462
436struct zone; 463/*
437extern struct zone *zone_table[]; 464 * The identification function is only used by the buddy allocator for
438 465 * determining if two pages could be buddies. We are not really
466 * identifying a zone since we could be using a the section number
467 * id if we have not node id available in page flags.
468 * We guarantee only that it will return the same value for two
469 * combinable pages in a zone.
470 */
439static inline int page_zone_id(struct page *page) 471static inline int page_zone_id(struct page *page)
440{ 472{
441 return (page->flags >> ZONETABLE_PGSHIFT) & ZONETABLE_MASK; 473 BUILD_BUG_ON(ZONEID_PGSHIFT == 0 && ZONEID_MASK);
442} 474 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
443static inline struct zone *page_zone(struct page *page)
444{
445 return zone_table[page_zone_id(page)];
446} 475}
447 476
448static inline unsigned long zone_to_nid(struct zone *zone) 477static inline int zone_to_nid(struct zone *zone)
449{ 478{
450#ifdef CONFIG_NUMA 479#ifdef CONFIG_NUMA
451 return zone->node; 480 return zone->node;
@@ -454,13 +483,20 @@ static inline unsigned long zone_to_nid(struct zone *zone)
454#endif 483#endif
455} 484}
456 485
457static inline unsigned long page_to_nid(struct page *page) 486#ifdef NODE_NOT_IN_PAGE_FLAGS
487extern int page_to_nid(struct page *page);
488#else
489static inline int page_to_nid(struct page *page)
490{
491 return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
492}
493#endif
494
495static inline struct zone *page_zone(struct page *page)
458{ 496{
459 if (FLAGS_HAS_NODE) 497 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
460 return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
461 else
462 return zone_to_nid(page_zone(page));
463} 498}
499
464static inline unsigned long page_to_section(struct page *page) 500static inline unsigned long page_to_section(struct page *page)
465{ 501{
466 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; 502 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
@@ -477,6 +513,7 @@ static inline void set_page_node(struct page *page, unsigned long node)
477 page->flags &= ~(NODES_MASK << NODES_PGSHIFT); 513 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
478 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; 514 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
479} 515}
516
480static inline void set_page_section(struct page *page, unsigned long section) 517static inline void set_page_section(struct page *page, unsigned long section)
481{ 518{
482 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); 519 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
@@ -947,8 +984,6 @@ extern void mem_init(void);
947extern void show_mem(void); 984extern void show_mem(void);
948extern void si_meminfo(struct sysinfo * val); 985extern void si_meminfo(struct sysinfo * val);
949extern void si_meminfo_node(struct sysinfo *val, int nid); 986extern void si_meminfo_node(struct sysinfo *val, int nid);
950extern void zonetable_add(struct zone *zone, int nid, enum zone_type zid,
951 unsigned long pfn, unsigned long size);
952 987
953#ifdef CONFIG_NUMA 988#ifdef CONFIG_NUMA
954extern void setup_per_cpu_pageset(void); 989extern void setup_per_cpu_pageset(void);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index e06683e2bea..e339a7345f2 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -278,7 +278,7 @@ struct zone {
278 /* 278 /*
279 * rarely used fields: 279 * rarely used fields:
280 */ 280 */
281 char *name; 281 const char *name;
282} ____cacheline_internodealigned_in_smp; 282} ____cacheline_internodealigned_in_smp;
283 283
284/* 284/*
@@ -288,19 +288,94 @@ struct zone {
288 */ 288 */
289#define DEF_PRIORITY 12 289#define DEF_PRIORITY 12
290 290
291/* Maximum number of zones on a zonelist */
292#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
293
294#ifdef CONFIG_NUMA
295/*
296 * We cache key information from each zonelist for smaller cache
297 * footprint when scanning for free pages in get_page_from_freelist().
298 *
299 * 1) The BITMAP fullzones tracks which zones in a zonelist have come
300 * up short of free memory since the last time (last_fullzone_zap)
301 * we zero'd fullzones.
302 * 2) The array z_to_n[] maps each zone in the zonelist to its node
303 * id, so that we can efficiently evaluate whether that node is
304 * set in the current tasks mems_allowed.
305 *
306 * Both fullzones and z_to_n[] are one-to-one with the zonelist,
307 * indexed by a zones offset in the zonelist zones[] array.
308 *
309 * The get_page_from_freelist() routine does two scans. During the
310 * first scan, we skip zones whose corresponding bit in 'fullzones'
311 * is set or whose corresponding node in current->mems_allowed (which
312 * comes from cpusets) is not set. During the second scan, we bypass
313 * this zonelist_cache, to ensure we look methodically at each zone.
314 *
315 * Once per second, we zero out (zap) fullzones, forcing us to
316 * reconsider nodes that might have regained more free memory.
317 * The field last_full_zap is the time we last zapped fullzones.
318 *
319 * This mechanism reduces the amount of time we waste repeatedly
320 * reexaming zones for free memory when they just came up low on
321 * memory momentarilly ago.
322 *
323 * The zonelist_cache struct members logically belong in struct
324 * zonelist. However, the mempolicy zonelists constructed for
325 * MPOL_BIND are intentionally variable length (and usually much
326 * shorter). A general purpose mechanism for handling structs with
327 * multiple variable length members is more mechanism than we want
328 * here. We resort to some special case hackery instead.
329 *
330 * The MPOL_BIND zonelists don't need this zonelist_cache (in good
331 * part because they are shorter), so we put the fixed length stuff
332 * at the front of the zonelist struct, ending in a variable length
333 * zones[], as is needed by MPOL_BIND.
334 *
335 * Then we put the optional zonelist cache on the end of the zonelist
336 * struct. This optional stuff is found by a 'zlcache_ptr' pointer in
337 * the fixed length portion at the front of the struct. This pointer
338 * both enables us to find the zonelist cache, and in the case of
339 * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL)
340 * to know that the zonelist cache is not there.
341 *
342 * The end result is that struct zonelists come in two flavors:
343 * 1) The full, fixed length version, shown below, and
344 * 2) The custom zonelists for MPOL_BIND.
345 * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache.
346 *
347 * Even though there may be multiple CPU cores on a node modifying
348 * fullzones or last_full_zap in the same zonelist_cache at the same
349 * time, we don't lock it. This is just hint data - if it is wrong now
350 * and then, the allocator will still function, perhaps a bit slower.
351 */
352
353
354struct zonelist_cache {
355 unsigned short z_to_n[MAX_ZONES_PER_ZONELIST]; /* zone->nid */
356 DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST); /* zone full? */
357 unsigned long last_full_zap; /* when last zap'd (jiffies) */
358};
359#else
360struct zonelist_cache;
361#endif
362
291/* 363/*
292 * One allocation request operates on a zonelist. A zonelist 364 * One allocation request operates on a zonelist. A zonelist
293 * is a list of zones, the first one is the 'goal' of the 365 * is a list of zones, the first one is the 'goal' of the
294 * allocation, the other zones are fallback zones, in decreasing 366 * allocation, the other zones are fallback zones, in decreasing
295 * priority. 367 * priority.
296 * 368 *
297 * Right now a zonelist takes up less than a cacheline. We never 369 * If zlcache_ptr is not NULL, then it is just the address of zlcache,
298 * modify it apart from boot-up, and only a few indices are used, 370 * as explained above. If zlcache_ptr is NULL, there is no zlcache.
299 * so despite the zonelist table being relatively big, the cache
300 * footprint of this construct is very small.
301 */ 371 */
372
302struct zonelist { 373struct zonelist {
303 struct zone *zones[MAX_NUMNODES * MAX_NR_ZONES + 1]; // NULL delimited 374 struct zonelist_cache *zlcache_ptr; // NULL or &zlcache
375 struct zone *zones[MAX_ZONES_PER_ZONELIST + 1]; // NULL delimited
376#ifdef CONFIG_NUMA
377 struct zonelist_cache zlcache; // optional ...
378#endif
304}; 379};
305 380
306#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 381#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 7c0c2c198f1..4a189dadb16 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -63,6 +63,9 @@ struct kparam_array
63 not there, read bits mean it's readable, write bits mean it's 63 not there, read bits mean it's readable, write bits mean it's
64 writable. */ 64 writable. */
65#define __module_param_call(prefix, name, set, get, arg, perm) \ 65#define __module_param_call(prefix, name, set, get, arg, perm) \
66 /* Default value instead of permissions? */ \
67 static int __param_perm_check_##name __attribute__((unused)) = \
68 BUILD_BUG_ON_ZERO((perm) < 0 || (perm) > 0777 || ((perm) & 2)); \
66 static char __param_str_##name[] = prefix #name; \ 69 static char __param_str_##name[] = prefix #name; \
67 static struct kernel_param const __param_##name \ 70 static struct kernel_param const __param_##name \
68 __attribute_used__ \ 71 __attribute_used__ \
diff --git a/include/linux/msg.h b/include/linux/msg.h
index acc7c174ff0..f1b60740d64 100644
--- a/include/linux/msg.h
+++ b/include/linux/msg.h
@@ -92,6 +92,12 @@ struct msg_queue {
92 struct list_head q_senders; 92 struct list_head q_senders;
93}; 93};
94 94
95/* Helper routines for sys_msgsnd and sys_msgrcv */
96extern long do_msgsnd(int msqid, long mtype, void __user *mtext,
97 size_t msgsz, int msgflg);
98extern long do_msgrcv(int msqid, long *pmtype, void __user *mtext,
99 size_t msgsz, long msgtyp, int msgflg);
100
95#endif /* __KERNEL__ */ 101#endif /* __KERNEL__ */
96 102
97#endif /* _LINUX_MSG_H */ 103#endif /* _LINUX_MSG_H */
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 27c48daa318..b2b91c47756 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -94,7 +94,7 @@ do { \
94 94
95#define __MUTEX_INITIALIZER(lockname) \ 95#define __MUTEX_INITIALIZER(lockname) \
96 { .count = ATOMIC_INIT(1) \ 96 { .count = ATOMIC_INIT(1) \
97 , .wait_lock = SPIN_LOCK_UNLOCKED \ 97 , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
98 , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \ 98 , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
99 __DEBUG_MUTEX_INITIALIZER(lockname) \ 99 __DEBUG_MUTEX_INITIALIZER(lockname) \
100 __DEP_MAP_MUTEX_INITIALIZER(lockname) } 100 __DEP_MAP_MUTEX_INITIALIZER(lockname) }
diff --git a/include/linux/nbd.h b/include/linux/nbd.h
index d6b6dc09ad9..0f3e6930254 100644
--- a/include/linux/nbd.h
+++ b/include/linux/nbd.h
@@ -64,6 +64,7 @@ struct nbd_device {
64 struct gendisk *disk; 64 struct gendisk *disk;
65 int blksize; 65 int blksize;
66 u64 bytesize; 66 u64 bytesize;
67 pid_t pid; /* pid of nbd-client, if attached */
67}; 68};
68 69
69#endif 70#endif
diff --git a/include/linux/netfilter/nf_conntrack_pptp.h b/include/linux/netfilter/nf_conntrack_pptp.h
index fb049ec11ff..9d8144a488c 100644
--- a/include/linux/netfilter/nf_conntrack_pptp.h
+++ b/include/linux/netfilter/nf_conntrack_pptp.h
@@ -2,6 +2,8 @@
2#ifndef _NF_CONNTRACK_PPTP_H 2#ifndef _NF_CONNTRACK_PPTP_H
3#define _NF_CONNTRACK_PPTP_H 3#define _NF_CONNTRACK_PPTP_H
4 4
5#include <linux/netfilter/nf_conntrack_common.h>
6
5/* state of the control session */ 7/* state of the control session */
6enum pptp_ctrlsess_state { 8enum pptp_ctrlsess_state {
7 PPTP_SESSION_NONE, /* no session present */ 9 PPTP_SESSION_NONE, /* no session present */
@@ -295,7 +297,6 @@ union pptp_ctrl_union {
295/* crap needed for nf_conntrack_compat.h */ 297/* crap needed for nf_conntrack_compat.h */
296struct nf_conn; 298struct nf_conn;
297struct nf_conntrack_expect; 299struct nf_conntrack_expect;
298enum ip_conntrack_info;
299 300
300extern int 301extern int
301(*nf_nat_pptp_hook_outbound)(struct sk_buff **pskb, 302(*nf_nat_pptp_hook_outbound)(struct sk_buff **pskb,
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index e16904e28c3..acb4ed13024 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -15,9 +15,14 @@
15 * disables interrupts for a long time. This call is stateless. 15 * disables interrupts for a long time. This call is stateless.
16 */ 16 */
17#ifdef ARCH_HAS_NMI_WATCHDOG 17#ifdef ARCH_HAS_NMI_WATCHDOG
18#include <asm/nmi.h>
18extern void touch_nmi_watchdog(void); 19extern void touch_nmi_watchdog(void);
19#else 20#else
20# define touch_nmi_watchdog() touch_softlockup_watchdog() 21# define touch_nmi_watchdog() touch_softlockup_watchdog()
21#endif 22#endif
22 23
24#ifndef trigger_all_cpu_backtrace
25#define trigger_all_cpu_backtrace() do { } while (0)
26#endif
27
23#endif 28#endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index c09da1e30c5..4d972bbef31 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -390,7 +390,7 @@
390#define PCI_DEVICE_ID_NS_CS5535_IDE 0x002d 390#define PCI_DEVICE_ID_NS_CS5535_IDE 0x002d
391#define PCI_DEVICE_ID_NS_CS5535_AUDIO 0x002e 391#define PCI_DEVICE_ID_NS_CS5535_AUDIO 0x002e
392#define PCI_DEVICE_ID_NS_CS5535_USB 0x002f 392#define PCI_DEVICE_ID_NS_CS5535_USB 0x002f
393#define PCI_DEVICE_ID_NS_CS5535_VIDEO 0x0030 393#define PCI_DEVICE_ID_NS_GX_VIDEO 0x0030
394#define PCI_DEVICE_ID_NS_SATURN 0x0035 394#define PCI_DEVICE_ID_NS_SATURN 0x0035
395#define PCI_DEVICE_ID_NS_SCx200_BRIDGE 0x0500 395#define PCI_DEVICE_ID_NS_SCx200_BRIDGE 0x0500
396#define PCI_DEVICE_ID_NS_SCx200_SMI 0x0501 396#define PCI_DEVICE_ID_NS_SCx200_SMI 0x0501
@@ -403,8 +403,7 @@
403#define PCI_DEVICE_ID_NS_SC1100_XBUS 0x0515 403#define PCI_DEVICE_ID_NS_SC1100_XBUS 0x0515
404#define PCI_DEVICE_ID_NS_87410 0xd001 404#define PCI_DEVICE_ID_NS_87410 0xd001
405 405
406#define PCI_DEVICE_ID_NS_CS5535_HOST_BRIDGE 0x0028 406#define PCI_DEVICE_ID_NS_GX_HOST_BRIDGE 0x0028
407#define PCI_DEVICE_ID_NS_CS5535_ISA_BRIDGE 0x002b
408 407
409#define PCI_VENDOR_ID_TSENG 0x100c 408#define PCI_VENDOR_ID_TSENG 0x100c
410#define PCI_DEVICE_ID_TSENG_W32P_2 0x3202 409#define PCI_DEVICE_ID_TSENG_W32P_2 0x3202
@@ -1864,6 +1863,7 @@
1864#define PCI_DEVICE_ID_OXSEMI_16PCI95N 0x9511 1863#define PCI_DEVICE_ID_OXSEMI_16PCI95N 0x9511
1865#define PCI_DEVICE_ID_OXSEMI_16PCI954PP 0x9513 1864#define PCI_DEVICE_ID_OXSEMI_16PCI954PP 0x9513
1866#define PCI_DEVICE_ID_OXSEMI_16PCI952 0x9521 1865#define PCI_DEVICE_ID_OXSEMI_16PCI952 0x9521
1866#define PCI_DEVICE_ID_OXSEMI_16PCI952PP 0x9523
1867 1867
1868#define PCI_VENDOR_ID_SAMSUNG 0x144d 1868#define PCI_VENDOR_ID_SAMSUNG 0x144d
1869 1869
@@ -1931,6 +1931,7 @@
1931#define PCI_DEVICE_ID_TIGON3_5750M 0x167c 1931#define PCI_DEVICE_ID_TIGON3_5750M 0x167c
1932#define PCI_DEVICE_ID_TIGON3_5751M 0x167d 1932#define PCI_DEVICE_ID_TIGON3_5751M 0x167d
1933#define PCI_DEVICE_ID_TIGON3_5751F 0x167e 1933#define PCI_DEVICE_ID_TIGON3_5751F 0x167e
1934#define PCI_DEVICE_ID_TIGON3_5787F 0x167f
1934#define PCI_DEVICE_ID_TIGON3_5787M 0x1693 1935#define PCI_DEVICE_ID_TIGON3_5787M 0x1693
1935#define PCI_DEVICE_ID_TIGON3_5782 0x1696 1936#define PCI_DEVICE_ID_TIGON3_5782 0x1696
1936#define PCI_DEVICE_ID_TIGON3_5786 0x169a 1937#define PCI_DEVICE_ID_TIGON3_5786 0x169a
@@ -2002,6 +2003,8 @@
2002#define PCI_DEVICE_ID_FARSITE_TE1 0x1610 2003#define PCI_DEVICE_ID_FARSITE_TE1 0x1610
2003#define PCI_DEVICE_ID_FARSITE_TE1C 0x1612 2004#define PCI_DEVICE_ID_FARSITE_TE1C 0x1612
2004 2005
2006#define PCI_VENDOR_ID_ARIMA 0x161f
2007
2005#define PCI_VENDOR_ID_SIBYTE 0x166d 2008#define PCI_VENDOR_ID_SIBYTE 0x166d
2006#define PCI_DEVICE_ID_BCM1250_PCI 0x0001 2009#define PCI_DEVICE_ID_BCM1250_PCI 0x0001
2007#define PCI_DEVICE_ID_BCM1250_HT 0x0002 2010#define PCI_DEVICE_ID_BCM1250_HT 0x0002
diff --git a/include/linux/pfkeyv2.h b/include/linux/pfkeyv2.h
index 0f0b880c428..265bafab649 100644
--- a/include/linux/pfkeyv2.h
+++ b/include/linux/pfkeyv2.h
@@ -285,6 +285,7 @@ struct sadb_x_sec_ctx {
285#define SADB_X_AALG_SHA2_384HMAC 6 285#define SADB_X_AALG_SHA2_384HMAC 6
286#define SADB_X_AALG_SHA2_512HMAC 7 286#define SADB_X_AALG_SHA2_512HMAC 7
287#define SADB_X_AALG_RIPEMD160HMAC 8 287#define SADB_X_AALG_RIPEMD160HMAC 8
288#define SADB_X_AALG_AES_XCBC_MAC 9
288#define SADB_X_AALG_NULL 251 /* kame */ 289#define SADB_X_AALG_NULL 251 /* kame */
289#define SADB_AALG_MAX 251 290#define SADB_AALG_MAX 251
290 291
diff --git a/include/linux/profile.h b/include/linux/profile.h
index acce53fd38b..5670b340c4e 100644
--- a/include/linux/profile.h
+++ b/include/linux/profile.h
@@ -6,10 +6,15 @@
6#include <linux/kernel.h> 6#include <linux/kernel.h>
7#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/cpumask.h> 8#include <linux/cpumask.h>
9#include <linux/cache.h>
10
9#include <asm/errno.h> 11#include <asm/errno.h>
10 12
13extern int prof_on __read_mostly;
14
11#define CPU_PROFILING 1 15#define CPU_PROFILING 1
12#define SCHED_PROFILING 2 16#define SCHED_PROFILING 2
17#define SLEEP_PROFILING 3
13 18
14struct proc_dir_entry; 19struct proc_dir_entry;
15struct pt_regs; 20struct pt_regs;
@@ -18,7 +23,24 @@ struct notifier_block;
18/* init basic kernel profiler */ 23/* init basic kernel profiler */
19void __init profile_init(void); 24void __init profile_init(void);
20void profile_tick(int); 25void profile_tick(int);
21void profile_hit(int, void *); 26
27/*
28 * Add multiple profiler hits to a given address:
29 */
30void profile_hits(int, void *ip, unsigned int nr_hits);
31
32/*
33 * Single profiler hit:
34 */
35static inline void profile_hit(int type, void *ip)
36{
37 /*
38 * Speedup for the common (no profiling enabled) case:
39 */
40 if (unlikely(prof_on == type))
41 profile_hits(type, ip, 1);
42}
43
22#ifdef CONFIG_PROC_FS 44#ifdef CONFIG_PROC_FS
23void create_prof_cpu_mask(struct proc_dir_entry *); 45void create_prof_cpu_mask(struct proc_dir_entry *);
24#else 46#else
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 5110201a415..90c23f690c0 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -37,6 +37,9 @@ extern int dquot_release(struct dquot *dquot);
37extern int dquot_commit_info(struct super_block *sb, int type); 37extern int dquot_commit_info(struct super_block *sb, int type);
38extern int dquot_mark_dquot_dirty(struct dquot *dquot); 38extern int dquot_mark_dquot_dirty(struct dquot *dquot);
39 39
40int remove_inode_dquot_ref(struct inode *inode, int type,
41 struct list_head *tofree_head);
42
40extern int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path); 43extern int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path);
41extern int vfs_quota_on_mount(struct super_block *sb, char *qf_name, 44extern int vfs_quota_on_mount(struct super_block *sb, char *qf_name,
42 int format_id, int type); 45 int format_id, int type);
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index cbfa1153742..0deb842541a 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -1,6 +1,7 @@
1/* 1/*
2 * Copyright (C) 2001 Momchil Velikov 2 * Copyright (C) 2001 Momchil Velikov
3 * Portions Copyright (C) 2001 Christoph Hellwig 3 * Portions Copyright (C) 2001 Christoph Hellwig
4 * Copyright (C) 2006 Nick Piggin
4 * 5 *
5 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as 7 * modify it under the terms of the GNU General Public License as
@@ -21,6 +22,35 @@
21 22
22#include <linux/preempt.h> 23#include <linux/preempt.h>
23#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/kernel.h>
26#include <linux/rcupdate.h>
27
28/*
29 * A direct pointer (root->rnode pointing directly to a data item,
30 * rather than another radix_tree_node) is signalled by the low bit
31 * set in the root->rnode pointer.
32 *
33 * In this case root->height is also NULL, but the direct pointer tests are
34 * needed for RCU lookups when root->height is unreliable.
35 */
36#define RADIX_TREE_DIRECT_PTR 1
37
38static inline void *radix_tree_ptr_to_direct(void *ptr)
39{
40 return (void *)((unsigned long)ptr | RADIX_TREE_DIRECT_PTR);
41}
42
43static inline void *radix_tree_direct_to_ptr(void *ptr)
44{
45 return (void *)((unsigned long)ptr & ~RADIX_TREE_DIRECT_PTR);
46}
47
48static inline int radix_tree_is_direct_ptr(void *ptr)
49{
50 return (int)((unsigned long)ptr & RADIX_TREE_DIRECT_PTR);
51}
52
53/*** radix-tree API starts here ***/
24 54
25#define RADIX_TREE_MAX_TAGS 2 55#define RADIX_TREE_MAX_TAGS 2
26 56
@@ -47,6 +77,77 @@ do { \
47 (root)->rnode = NULL; \ 77 (root)->rnode = NULL; \
48} while (0) 78} while (0)
49 79
80/**
81 * Radix-tree synchronization
82 *
83 * The radix-tree API requires that users provide all synchronisation (with
84 * specific exceptions, noted below).
85 *
86 * Synchronization of access to the data items being stored in the tree, and
87 * management of their lifetimes must be completely managed by API users.
88 *
89 * For API usage, in general,
90 * - any function _modifying_ the the tree or tags (inserting or deleting
91 * items, setting or clearing tags must exclude other modifications, and
92 * exclude any functions reading the tree.
93 * - any function _reading_ the the tree or tags (looking up items or tags,
94 * gang lookups) must exclude modifications to the tree, but may occur
95 * concurrently with other readers.
96 *
97 * The notable exceptions to this rule are the following functions:
98 * radix_tree_lookup
99 * radix_tree_tag_get
100 * radix_tree_gang_lookup
101 * radix_tree_gang_lookup_tag
102 * radix_tree_tagged
103 *
104 * The first 4 functions are able to be called locklessly, using RCU. The
105 * caller must ensure calls to these functions are made within rcu_read_lock()
106 * regions. Other readers (lock-free or otherwise) and modifications may be
107 * running concurrently.
108 *
109 * It is still required that the caller manage the synchronization and lifetimes
110 * of the items. So if RCU lock-free lookups are used, typically this would mean
111 * that the items have their own locks, or are amenable to lock-free access; and
112 * that the items are freed by RCU (or only freed after having been deleted from
113 * the radix tree *and* a synchronize_rcu() grace period).
114 *
115 * (Note, rcu_assign_pointer and rcu_dereference are not needed to control
116 * access to data items when inserting into or looking up from the radix tree)
117 *
118 * radix_tree_tagged is able to be called without locking or RCU.
119 */
120
121/**
122 * radix_tree_deref_slot - dereference a slot
123 * @pslot: pointer to slot, returned by radix_tree_lookup_slot
124 * Returns: item that was stored in that slot with any direct pointer flag
125 * removed.
126 *
127 * For use with radix_tree_lookup_slot(). Caller must hold tree at least read
128 * locked across slot lookup and dereference. More likely, will be used with
129 * radix_tree_replace_slot(), as well, so caller will hold tree write locked.
130 */
131static inline void *radix_tree_deref_slot(void **pslot)
132{
133 return radix_tree_direct_to_ptr(*pslot);
134}
135/**
136 * radix_tree_replace_slot - replace item in a slot
137 * @pslot: pointer to slot, returned by radix_tree_lookup_slot
138 * @item: new item to store in the slot.
139 *
140 * For use with radix_tree_lookup_slot(). Caller must hold tree write locked
141 * across slot lookup and replacement.
142 */
143static inline void radix_tree_replace_slot(void **pslot, void *item)
144{
145 BUG_ON(radix_tree_is_direct_ptr(item));
146 rcu_assign_pointer(*pslot,
147 (void *)((unsigned long)item |
148 ((unsigned long)*pslot & RADIX_TREE_DIRECT_PTR)));
149}
150
50int radix_tree_insert(struct radix_tree_root *, unsigned long, void *); 151int radix_tree_insert(struct radix_tree_root *, unsigned long, void *);
51void *radix_tree_lookup(struct radix_tree_root *, unsigned long); 152void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
52void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long); 153void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long);
diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h
index f13299a1559..03636d7918f 100644
--- a/include/linux/raid/raid5.h
+++ b/include/linux/raid/raid5.h
@@ -235,7 +235,7 @@ struct raid5_private_data {
235 */ 235 */
236 int active_name; 236 int active_name;
237 char cache_name[2][20]; 237 char cache_name[2][20];
238 kmem_cache_t *slab_cache; /* for allocating stripes */ 238 struct kmem_cache *slab_cache; /* for allocating stripes */
239 239
240 int seq_flush, seq_write; 240 int seq_flush, seq_write;
241 int quiesce; 241 int quiesce;
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index 7bc6bfb8625..d0e4dce33ad 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -739,7 +739,7 @@ struct block_head {
739#define PUT_B_FREE_SPACE(p_s_bh,val) do { set_blkh_free_space(B_BLK_HEAD(p_s_bh),val); } while (0) 739#define PUT_B_FREE_SPACE(p_s_bh,val) do { set_blkh_free_space(B_BLK_HEAD(p_s_bh),val); } while (0)
740 740
741/* Get right delimiting key. -- little endian */ 741/* Get right delimiting key. -- little endian */
742#define B_PRIGHT_DELIM_KEY(p_s_bh) (&(blk_right_delim_key(B_BLK_HEAD(p_s_bh)) 742#define B_PRIGHT_DELIM_KEY(p_s_bh) (&(blk_right_delim_key(B_BLK_HEAD(p_s_bh))))
743 743
744/* Does the buffer contain a disk leaf. */ 744/* Does the buffer contain a disk leaf. */
745#define B_IS_ITEMS_LEVEL(p_s_bh) (B_LEVEL(p_s_bh) == DISK_LEAF_NODE_LEVEL) 745#define B_IS_ITEMS_LEVEL(p_s_bh) (B_LEVEL(p_s_bh) == DISK_LEAF_NODE_LEVEL)
diff --git a/include/linux/relay.h b/include/linux/relay.h
index 0e3d91b7699..c6a48bfc8b1 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -274,7 +274,7 @@ static inline void subbuf_start_reserve(struct rchan_buf *buf,
274/* 274/*
275 * exported relay file operations, kernel/relay.c 275 * exported relay file operations, kernel/relay.c
276 */ 276 */
277extern struct file_operations relay_file_operations; 277extern const struct file_operations relay_file_operations;
278 278
279#endif /* _LINUX_RELAY_H */ 279#endif /* _LINUX_RELAY_H */
280 280
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index db2c1df4fef..36f850373d2 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -30,11 +30,11 @@ struct anon_vma {
30 30
31#ifdef CONFIG_MMU 31#ifdef CONFIG_MMU
32 32
33extern kmem_cache_t *anon_vma_cachep; 33extern struct kmem_cache *anon_vma_cachep;
34 34
35static inline struct anon_vma *anon_vma_alloc(void) 35static inline struct anon_vma *anon_vma_alloc(void)
36{ 36{
37 return kmem_cache_alloc(anon_vma_cachep, SLAB_KERNEL); 37 return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
38} 38}
39 39
40static inline void anon_vma_free(struct anon_vma *anon_vma) 40static inline void anon_vma_free(struct anon_vma *anon_vma)
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index 5d41dee82f8..b0090e9f788 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -63,7 +63,7 @@ struct hrtimer_sleeper;
63#endif 63#endif
64 64
65#define __RT_MUTEX_INITIALIZER(mutexname) \ 65#define __RT_MUTEX_INITIALIZER(mutexname) \
66 { .wait_lock = SPIN_LOCK_UNLOCKED \ 66 { .wait_lock = __SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
67 , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \ 67 , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \
68 , .owner = NULL \ 68 , .owner = NULL \
69 __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} 69 __DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index ae1fcadd598..813cee13da0 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -44,7 +44,8 @@ struct rw_semaphore {
44#endif 44#endif
45 45
46#define __RWSEM_INITIALIZER(name) \ 46#define __RWSEM_INITIALIZER(name) \
47{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } 47{ 0, __SPIN_LOCK_UNLOCKED(name.wait_lock), LIST_HEAD_INIT((name).wait_list) \
48 __RWSEM_DEP_MAP_INIT(name) }
48 49
49#define DECLARE_RWSEM(name) \ 50#define DECLARE_RWSEM(name) \
50 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 51 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index eafe4a7b823..dede82c6344 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -194,7 +194,16 @@ extern void init_idle(struct task_struct *idle, int cpu);
194 194
195extern cpumask_t nohz_cpu_mask; 195extern cpumask_t nohz_cpu_mask;
196 196
197extern void show_state(void); 197/*
198 * Only dump TASK_* tasks. (-1 for all tasks)
199 */
200extern void show_state_filter(unsigned long state_filter);
201
202static inline void show_state(void)
203{
204 show_state_filter(-1);
205}
206
198extern void show_regs(struct pt_regs *); 207extern void show_regs(struct pt_regs *);
199 208
200/* 209/*
@@ -338,15 +347,23 @@ struct mm_struct {
338 347
339 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ 348 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
340 349
341 unsigned dumpable:2;
342 cpumask_t cpu_vm_mask; 350 cpumask_t cpu_vm_mask;
343 351
344 /* Architecture-specific MM context */ 352 /* Architecture-specific MM context */
345 mm_context_t context; 353 mm_context_t context;
346 354
347 /* Token based thrashing protection. */ 355 /* Swap token stuff */
348 unsigned long swap_token_time; 356 /*
349 char recent_pagein; 357 * Last value of global fault stamp as seen by this process.
358 * In other words, this value gives an indication of how long
359 * it has been since this task got the token.
360 * Look at mm/thrash.c
361 */
362 unsigned int faultstamp;
363 unsigned int token_priority;
364 unsigned int last_interval;
365
366 unsigned char dumpable:2;
350 367
351 /* coredumping support */ 368 /* coredumping support */
352 int core_waiters; 369 int core_waiters;
@@ -556,7 +573,7 @@ struct sched_info {
556#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ 573#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
557 574
558#ifdef CONFIG_SCHEDSTATS 575#ifdef CONFIG_SCHEDSTATS
559extern struct file_operations proc_schedstat_operations; 576extern const struct file_operations proc_schedstat_operations;
560#endif /* CONFIG_SCHEDSTATS */ 577#endif /* CONFIG_SCHEDSTATS */
561 578
562#ifdef CONFIG_TASK_DELAY_ACCT 579#ifdef CONFIG_TASK_DELAY_ACCT
@@ -1288,7 +1305,6 @@ extern int kill_pgrp(struct pid *pid, int sig, int priv);
1288extern int kill_pid(struct pid *pid, int sig, int priv); 1305extern int kill_pid(struct pid *pid, int sig, int priv);
1289extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp); 1306extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp);
1290extern int kill_pg_info(int, struct siginfo *, pid_t); 1307extern int kill_pg_info(int, struct siginfo *, pid_t);
1291extern int kill_proc_info(int, struct siginfo *, pid_t);
1292extern void do_notify_parent(struct task_struct *, int); 1308extern void do_notify_parent(struct task_struct *, int);
1293extern void force_sig(int, struct task_struct *); 1309extern void force_sig(int, struct task_struct *);
1294extern void force_sig_specific(int, struct task_struct *); 1310extern void force_sig_specific(int, struct task_struct *);
@@ -1610,87 +1626,6 @@ extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls);
1610 1626
1611extern void normalize_rt_tasks(void); 1627extern void normalize_rt_tasks(void);
1612 1628
1613#ifdef CONFIG_PM
1614/*
1615 * Check if a process has been frozen
1616 */
1617static inline int frozen(struct task_struct *p)
1618{
1619 return p->flags & PF_FROZEN;
1620}
1621
1622/*
1623 * Check if there is a request to freeze a process
1624 */
1625static inline int freezing(struct task_struct *p)
1626{
1627 return p->flags & PF_FREEZE;
1628}
1629
1630/*
1631 * Request that a process be frozen
1632 * FIXME: SMP problem. We may not modify other process' flags!
1633 */
1634static inline void freeze(struct task_struct *p)
1635{
1636 p->flags |= PF_FREEZE;
1637}
1638
1639/*
1640 * Sometimes we may need to cancel the previous 'freeze' request
1641 */
1642static inline void do_not_freeze(struct task_struct *p)
1643{
1644 p->flags &= ~PF_FREEZE;
1645}
1646
1647/*
1648 * Wake up a frozen process
1649 */
1650static inline int thaw_process(struct task_struct *p)
1651{
1652 if (frozen(p)) {
1653 p->flags &= ~PF_FROZEN;
1654 wake_up_process(p);
1655 return 1;
1656 }
1657 return 0;
1658}
1659
1660/*
1661 * freezing is complete, mark process as frozen
1662 */
1663static inline void frozen_process(struct task_struct *p)
1664{
1665 p->flags = (p->flags & ~PF_FREEZE) | PF_FROZEN;
1666}
1667
1668extern void refrigerator(void);
1669extern int freeze_processes(void);
1670extern void thaw_processes(void);
1671
1672static inline int try_to_freeze(void)
1673{
1674 if (freezing(current)) {
1675 refrigerator();
1676 return 1;
1677 } else
1678 return 0;
1679}
1680#else
1681static inline int frozen(struct task_struct *p) { return 0; }
1682static inline int freezing(struct task_struct *p) { return 0; }
1683static inline void freeze(struct task_struct *p) { BUG(); }
1684static inline int thaw_process(struct task_struct *p) { return 1; }
1685static inline void frozen_process(struct task_struct *p) { BUG(); }
1686
1687static inline void refrigerator(void) {}
1688static inline int freeze_processes(void) { BUG(); return 0; }
1689static inline void thaw_processes(void) {}
1690
1691static inline int try_to_freeze(void) { return 0; }
1692
1693#endif /* CONFIG_PM */
1694#endif /* __KERNEL__ */ 1629#endif /* __KERNEL__ */
1695 1630
1696#endif 1631#endif
diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
index 2925e66a673..b02308ee766 100644
--- a/include/linux/screen_info.h
+++ b/include/linux/screen_info.h
@@ -42,7 +42,8 @@ struct screen_info {
42 u16 pages; /* 0x32 */ 42 u16 pages; /* 0x32 */
43 u16 vesa_attributes; /* 0x34 */ 43 u16 vesa_attributes; /* 0x34 */
44 u32 capabilities; /* 0x36 */ 44 u32 capabilities; /* 0x36 */
45 /* 0x3a -- 0x3f reserved for future expansion */ 45 /* 0x3a -- 0x3b reserved for future expansion */
46 /* 0x3c -- 0x3f micro stack for relocatable kernels */
46}; 47};
47 48
48extern struct screen_info screen_info; 49extern struct screen_info screen_info;
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index b95f6eb7254..3e3cccbb1ca 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -20,7 +20,7 @@ struct seq_file {
20 loff_t index; 20 loff_t index;
21 loff_t version; 21 loff_t version;
22 struct mutex lock; 22 struct mutex lock;
23 struct seq_operations *op; 23 const struct seq_operations *op;
24 void *private; 24 void *private;
25}; 25};
26 26
@@ -31,7 +31,7 @@ struct seq_operations {
31 int (*show) (struct seq_file *m, void *v); 31 int (*show) (struct seq_file *m, void *v);
32}; 32};
33 33
34int seq_open(struct file *, struct seq_operations *); 34int seq_open(struct file *, const struct seq_operations *);
35ssize_t seq_read(struct file *, char __user *, size_t, loff_t *); 35ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
36loff_t seq_lseek(struct file *, loff_t, int); 36loff_t seq_lseek(struct file *, loff_t, int);
37int seq_release(struct inode *, struct file *); 37int seq_release(struct inode *, struct file *);
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 8e968141372..71310d80c09 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -41,6 +41,7 @@ enum {
41 PLAT8250_DEV_FOURPORT, 41 PLAT8250_DEV_FOURPORT,
42 PLAT8250_DEV_ACCENT, 42 PLAT8250_DEV_ACCENT,
43 PLAT8250_DEV_BOCA, 43 PLAT8250_DEV_BOCA,
44 PLAT8250_DEV_EXAR_ST16C554,
44 PLAT8250_DEV_HUB6, 45 PLAT8250_DEV_HUB6,
45 PLAT8250_DEV_MCA, 46 PLAT8250_DEV_MCA,
46 PLAT8250_DEV_AU1X00, 47 PLAT8250_DEV_AU1X00,
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 463ab953b09..82767213664 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -132,6 +132,8 @@
132 132
133#define PORT_S3C2412 73 133#define PORT_S3C2412 73
134 134
135/* Xilinx uartlite */
136#define PORT_UARTLITE 74
135 137
136#ifdef __KERNEL__ 138#ifdef __KERNEL__
137 139
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 117135e33d6..14749056dd6 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -241,6 +241,8 @@ extern int sigprocmask(int, sigset_t *, sigset_t *);
241struct pt_regs; 241struct pt_regs;
242extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie); 242extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie);
243 243
244extern struct kmem_cache *sighand_cachep;
245
244#endif /* __KERNEL__ */ 246#endif /* __KERNEL__ */
245 247
246#endif /* _LINUX_SIGNAL_H */ 248#endif /* _LINUX_SIGNAL_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index a05a5f7c0b7..4ff3940210d 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -332,20 +332,20 @@ struct sk_buff {
332extern void kfree_skb(struct sk_buff *skb); 332extern void kfree_skb(struct sk_buff *skb);
333extern void __kfree_skb(struct sk_buff *skb); 333extern void __kfree_skb(struct sk_buff *skb);
334extern struct sk_buff *__alloc_skb(unsigned int size, 334extern struct sk_buff *__alloc_skb(unsigned int size,
335 gfp_t priority, int fclone); 335 gfp_t priority, int fclone, int node);
336static inline struct sk_buff *alloc_skb(unsigned int size, 336static inline struct sk_buff *alloc_skb(unsigned int size,
337 gfp_t priority) 337 gfp_t priority)
338{ 338{
339 return __alloc_skb(size, priority, 0); 339 return __alloc_skb(size, priority, 0, -1);
340} 340}
341 341
342static inline struct sk_buff *alloc_skb_fclone(unsigned int size, 342static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
343 gfp_t priority) 343 gfp_t priority)
344{ 344{
345 return __alloc_skb(size, priority, 1); 345 return __alloc_skb(size, priority, 1, -1);
346} 346}
347 347
348extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, 348extern struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp,
349 unsigned int size, 349 unsigned int size,
350 gfp_t priority); 350 gfp_t priority);
351extern void kfree_skbmem(struct sk_buff *skb); 351extern void kfree_skbmem(struct sk_buff *skb);
diff --git a/include/linux/slab.h b/include/linux/slab.h
index c4947b8a2c0..2271886744f 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -7,27 +7,17 @@
7#ifndef _LINUX_SLAB_H 7#ifndef _LINUX_SLAB_H
8#define _LINUX_SLAB_H 8#define _LINUX_SLAB_H
9 9
10#if defined(__KERNEL__) 10#ifdef __KERNEL__
11 11
12typedef struct kmem_cache kmem_cache_t; 12#include <linux/gfp.h>
13#include <linux/init.h>
14#include <linux/types.h>
15#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
16#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
17#include <linux/compiler.h>
13 18
14#include <linux/gfp.h> 19/* kmem_cache_t exists for legacy reasons and is not used by code in mm */
15#include <linux/init.h> 20typedef struct kmem_cache kmem_cache_t __deprecated;
16#include <linux/types.h>
17#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
18#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
19
20/* flags for kmem_cache_alloc() */
21#define SLAB_NOFS GFP_NOFS
22#define SLAB_NOIO GFP_NOIO
23#define SLAB_ATOMIC GFP_ATOMIC
24#define SLAB_USER GFP_USER
25#define SLAB_KERNEL GFP_KERNEL
26#define SLAB_DMA GFP_DMA
27
28#define SLAB_LEVEL_MASK GFP_LEVEL_MASK
29
30#define SLAB_NO_GROW __GFP_NO_GROW /* don't grow a cache */
31 21
32/* flags to pass to kmem_cache_create(). 22/* flags to pass to kmem_cache_create().
33 * The first 3 are only valid when the allocator as been build 23 * The first 3 are only valid when the allocator as been build
@@ -57,22 +47,23 @@ typedef struct kmem_cache kmem_cache_t;
57/* prototypes */ 47/* prototypes */
58extern void __init kmem_cache_init(void); 48extern void __init kmem_cache_init(void);
59 49
60extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned long, 50extern struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
61 void (*)(void *, kmem_cache_t *, unsigned long), 51 unsigned long,
62 void (*)(void *, kmem_cache_t *, unsigned long)); 52 void (*)(void *, struct kmem_cache *, unsigned long),
63extern void kmem_cache_destroy(kmem_cache_t *); 53 void (*)(void *, struct kmem_cache *, unsigned long));
64extern int kmem_cache_shrink(kmem_cache_t *); 54extern void kmem_cache_destroy(struct kmem_cache *);
65extern void *kmem_cache_alloc(kmem_cache_t *, gfp_t); 55extern int kmem_cache_shrink(struct kmem_cache *);
56extern void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
66extern void *kmem_cache_zalloc(struct kmem_cache *, gfp_t); 57extern void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
67extern void kmem_cache_free(kmem_cache_t *, void *); 58extern void kmem_cache_free(struct kmem_cache *, void *);
68extern unsigned int kmem_cache_size(kmem_cache_t *); 59extern unsigned int kmem_cache_size(struct kmem_cache *);
69extern const char *kmem_cache_name(kmem_cache_t *); 60extern const char *kmem_cache_name(struct kmem_cache *);
70 61
71/* Size description struct for general caches. */ 62/* Size description struct for general caches. */
72struct cache_sizes { 63struct cache_sizes {
73 size_t cs_size; 64 size_t cs_size;
74 kmem_cache_t *cs_cachep; 65 struct kmem_cache *cs_cachep;
75 kmem_cache_t *cs_dmacachep; 66 struct kmem_cache *cs_dmacachep;
76}; 67};
77extern struct cache_sizes malloc_sizes[]; 68extern struct cache_sizes malloc_sizes[];
78 69
@@ -211,7 +202,7 @@ extern unsigned int ksize(const void *);
211extern int slab_is_available(void); 202extern int slab_is_available(void);
212 203
213#ifdef CONFIG_NUMA 204#ifdef CONFIG_NUMA
214extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node); 205extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
215extern void *__kmalloc_node(size_t size, gfp_t flags, int node); 206extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
216 207
217static inline void *kmalloc_node(size_t size, gfp_t flags, int node) 208static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
@@ -236,8 +227,27 @@ found:
236 } 227 }
237 return __kmalloc_node(size, flags, node); 228 return __kmalloc_node(size, flags, node);
238} 229}
230
231/*
232 * kmalloc_node_track_caller is a special version of kmalloc_node that
233 * records the calling function of the routine calling it for slab leak
234 * tracking instead of just the calling function (confusing, eh?).
235 * It's useful when the call to kmalloc_node comes from a widely-used
236 * standard allocator where we care about the real place the memory
237 * allocation request comes from.
238 */
239#ifndef CONFIG_DEBUG_SLAB
240#define kmalloc_node_track_caller(size, flags, node) \
241 __kmalloc_node(size, flags, node)
239#else 242#else
240static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int node) 243extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
244#define kmalloc_node_track_caller(size, flags, node) \
245 __kmalloc_node_track_caller(size, flags, node, \
246 __builtin_return_address(0))
247#endif
248#else /* CONFIG_NUMA */
249static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
250 gfp_t flags, int node)
241{ 251{
242 return kmem_cache_alloc(cachep, flags); 252 return kmem_cache_alloc(cachep, flags);
243} 253}
@@ -245,10 +255,13 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
245{ 255{
246 return kmalloc(size, flags); 256 return kmalloc(size, flags);
247} 257}
258
259#define kmalloc_node_track_caller(size, flags, node) \
260 kmalloc_track_caller(size, flags)
248#endif 261#endif
249 262
250extern int FASTCALL(kmem_cache_reap(int)); 263extern int FASTCALL(kmem_cache_reap(int));
251extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)); 264extern int FASTCALL(kmem_ptr_validate(struct kmem_cache *cachep, void *ptr));
252 265
253#else /* CONFIG_SLOB */ 266#else /* CONFIG_SLOB */
254 267
@@ -283,16 +296,9 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
283#define kzalloc(s, f) __kzalloc(s, f) 296#define kzalloc(s, f) __kzalloc(s, f)
284#define kmalloc_track_caller kmalloc 297#define kmalloc_track_caller kmalloc
285 298
286#endif /* CONFIG_SLOB */ 299#define kmalloc_node_track_caller kmalloc_node
287 300
288/* System wide caches */ 301#endif /* CONFIG_SLOB */
289extern kmem_cache_t *vm_area_cachep;
290extern kmem_cache_t *names_cachep;
291extern kmem_cache_t *files_cachep;
292extern kmem_cache_t *filp_cachep;
293extern kmem_cache_t *fs_cachep;
294extern kmem_cache_t *sighand_cachep;
295extern kmem_cache_t *bio_cachep;
296 302
297#endif /* __KERNEL__ */ 303#endif /* __KERNEL__ */
298 304
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 51649987f69..7ba23ec8211 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -99,6 +99,13 @@ static inline int up_smp_call_function(void)
99static inline void smp_send_reschedule(int cpu) { } 99static inline void smp_send_reschedule(int cpu) { }
100#define num_booting_cpus() 1 100#define num_booting_cpus() 1
101#define smp_prepare_boot_cpu() do {} while (0) 101#define smp_prepare_boot_cpu() do {} while (0)
102static inline int smp_call_function_single(int cpuid, void (*func) (void *info),
103 void *info, int retry, int wait)
104{
105 /* Disable interrupts here? */
106 func(info);
107 return 0;
108}
102 109
103#endif /* !SMP */ 110#endif /* !SMP */
104 111
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 8451052ca66..94b767d6427 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -52,6 +52,7 @@
52#include <linux/thread_info.h> 52#include <linux/thread_info.h>
53#include <linux/kernel.h> 53#include <linux/kernel.h>
54#include <linux/stringify.h> 54#include <linux/stringify.h>
55#include <linux/bottom_half.h>
55 56
56#include <asm/system.h> 57#include <asm/system.h>
57 58
diff --git a/include/linux/start_kernel.h b/include/linux/start_kernel.h
new file mode 100644
index 00000000000..d3e5f275654
--- /dev/null
+++ b/include/linux/start_kernel.h
@@ -0,0 +1,12 @@
1#ifndef _LINUX_START_KERNEL_H
2#define _LINUX_START_KERNEL_H
3
4#include <linux/linkage.h>
5#include <linux/init.h>
6
7/* Define the prototype for start_kernel here, rather than cluttering
8 up something else. */
9
10extern asmlinkage void __init start_kernel(void);
11
12#endif /* _LINUX_START_KERNEL_H */
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index b6b6ad6253b..97c76165258 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -217,7 +217,7 @@ struct rpc_wait_queue {
217 217
218#ifndef RPC_DEBUG 218#ifndef RPC_DEBUG
219# define RPC_WAITQ_INIT(var,qname) { \ 219# define RPC_WAITQ_INIT(var,qname) { \
220 .lock = SPIN_LOCK_UNLOCKED, \ 220 .lock = __SPIN_LOCK_UNLOCKED(var.lock), \
221 .tasks = { \ 221 .tasks = { \
222 [0] = LIST_HEAD_INIT(var.tasks[0]), \ 222 [0] = LIST_HEAD_INIT(var.tasks[0]), \
223 [1] = LIST_HEAD_INIT(var.tasks[1]), \ 223 [1] = LIST_HEAD_INIT(var.tasks[1]), \
@@ -226,7 +226,7 @@ struct rpc_wait_queue {
226 } 226 }
227#else 227#else
228# define RPC_WAITQ_INIT(var,qname) { \ 228# define RPC_WAITQ_INIT(var,qname) { \
229 .lock = SPIN_LOCK_UNLOCKED, \ 229 .lock = __SPIN_LOCK_UNLOCKED(var.lock), \
230 .tasks = { \ 230 .tasks = { \
231 [0] = LIST_HEAD_INIT(var.tasks[0]), \ 231 [0] = LIST_HEAD_INIT(var.tasks[0]), \
232 [1] = LIST_HEAD_INIT(var.tasks[1]), \ 232 [1] = LIST_HEAD_INIT(var.tasks[1]), \
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index b1237f16ecd..bf99bd49f8e 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -9,10 +9,13 @@
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/pm.h> 10#include <linux/pm.h>
11 11
12/* page backup entry */ 12/* struct pbe is used for creating lists of pages that should be restored
13 * atomically during the resume from disk, because the page frames they have
14 * occupied before the suspend are in use.
15 */
13struct pbe { 16struct pbe {
14 unsigned long address; /* address of the copy */ 17 void *address; /* address of the copy */
15 unsigned long orig_address; /* original address of page */ 18 void *orig_address; /* original address of a page */
16 struct pbe *next; 19 struct pbe *next;
17}; 20};
18 21
diff --git a/include/linux/swap.h b/include/linux/swap.h
index e7c36ba2a2d..add51cebc8d 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -218,8 +218,6 @@ extern void swap_unplug_io_fn(struct backing_dev_info *, struct page *);
218/* linux/mm/page_io.c */ 218/* linux/mm/page_io.c */
219extern int swap_readpage(struct file *, struct page *); 219extern int swap_readpage(struct file *, struct page *);
220extern int swap_writepage(struct page *page, struct writeback_control *wbc); 220extern int swap_writepage(struct page *page, struct writeback_control *wbc);
221extern int rw_swap_page_sync(int rw, swp_entry_t entry, struct page *page,
222 struct bio **bio_chain);
223extern int end_swap_bio_read(struct bio *bio, unsigned int bytes_done, int err); 221extern int end_swap_bio_read(struct bio *bio, unsigned int bytes_done, int err);
224 222
225/* linux/mm/swap_state.c */ 223/* linux/mm/swap_state.c */
@@ -247,9 +245,10 @@ extern int swap_duplicate(swp_entry_t);
247extern int valid_swaphandles(swp_entry_t, unsigned long *); 245extern int valid_swaphandles(swp_entry_t, unsigned long *);
248extern void swap_free(swp_entry_t); 246extern void swap_free(swp_entry_t);
249extern void free_swap_and_cache(swp_entry_t); 247extern void free_swap_and_cache(swp_entry_t);
250extern int swap_type_of(dev_t); 248extern int swap_type_of(dev_t, sector_t);
251extern unsigned int count_swap_pages(int, int); 249extern unsigned int count_swap_pages(int, int);
252extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t); 250extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t);
251extern sector_t swapdev_block(int, pgoff_t);
253extern struct swap_info_struct *get_swap_info_struct(unsigned); 252extern struct swap_info_struct *get_swap_info_struct(unsigned);
254extern int can_share_swap_page(struct page *); 253extern int can_share_swap_page(struct page *);
255extern int remove_exclusive_swap_page(struct page *); 254extern int remove_exclusive_swap_page(struct page *);
@@ -259,7 +258,6 @@ extern spinlock_t swap_lock;
259 258
260/* linux/mm/thrash.c */ 259/* linux/mm/thrash.c */
261extern struct mm_struct * swap_token_mm; 260extern struct mm_struct * swap_token_mm;
262extern unsigned long swap_token_default_timeout;
263extern void grab_swap_token(void); 261extern void grab_swap_token(void);
264extern void __put_swap_token(struct mm_struct *); 262extern void __put_swap_token(struct mm_struct *);
265 263
diff --git a/include/linux/taskstats_kern.h b/include/linux/taskstats_kern.h
index 6562a2050a2..7e9680f4afd 100644
--- a/include/linux/taskstats_kern.h
+++ b/include/linux/taskstats_kern.h
@@ -12,64 +12,27 @@
12#include <net/genetlink.h> 12#include <net/genetlink.h>
13 13
14#ifdef CONFIG_TASKSTATS 14#ifdef CONFIG_TASKSTATS
15extern kmem_cache_t *taskstats_cache; 15extern struct kmem_cache *taskstats_cache;
16extern struct mutex taskstats_exit_mutex; 16extern struct mutex taskstats_exit_mutex;
17 17
18static inline void taskstats_exit_free(struct taskstats *tidstats)
19{
20 if (tidstats)
21 kmem_cache_free(taskstats_cache, tidstats);
22}
23
24static inline void taskstats_tgid_init(struct signal_struct *sig) 18static inline void taskstats_tgid_init(struct signal_struct *sig)
25{ 19{
26 sig->stats = NULL; 20 sig->stats = NULL;
27} 21}
28 22
29static inline void taskstats_tgid_alloc(struct task_struct *tsk)
30{
31 struct signal_struct *sig = tsk->signal;
32 struct taskstats *stats;
33
34 if (sig->stats != NULL)
35 return;
36
37 /* No problem if kmem_cache_zalloc() fails */
38 stats = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL);
39
40 spin_lock_irq(&tsk->sighand->siglock);
41 if (!sig->stats) {
42 sig->stats = stats;
43 stats = NULL;
44 }
45 spin_unlock_irq(&tsk->sighand->siglock);
46
47 if (stats)
48 kmem_cache_free(taskstats_cache, stats);
49}
50
51static inline void taskstats_tgid_free(struct signal_struct *sig) 23static inline void taskstats_tgid_free(struct signal_struct *sig)
52{ 24{
53 if (sig->stats) 25 if (sig->stats)
54 kmem_cache_free(taskstats_cache, sig->stats); 26 kmem_cache_free(taskstats_cache, sig->stats);
55} 27}
56 28
57extern void taskstats_exit_alloc(struct taskstats **, unsigned int *); 29extern void taskstats_exit(struct task_struct *, int group_dead);
58extern void taskstats_exit_send(struct task_struct *, struct taskstats *, int, unsigned int);
59extern void taskstats_init_early(void); 30extern void taskstats_init_early(void);
60#else 31#else
61static inline void taskstats_exit_alloc(struct taskstats **ptidstats, unsigned int *mycpu) 32static inline void taskstats_exit(struct task_struct *tsk, int group_dead)
62{}
63static inline void taskstats_exit_free(struct taskstats *ptidstats)
64{}
65static inline void taskstats_exit_send(struct task_struct *tsk,
66 struct taskstats *tidstats,
67 int group_dead, unsigned int cpu)
68{} 33{}
69static inline void taskstats_tgid_init(struct signal_struct *sig) 34static inline void taskstats_tgid_init(struct signal_struct *sig)
70{} 35{}
71static inline void taskstats_tgid_alloc(struct task_struct *tsk)
72{}
73static inline void taskstats_tgid_free(struct signal_struct *sig) 36static inline void taskstats_tgid_free(struct signal_struct *sig)
74{} 37{}
75static inline void taskstats_init_early(void) 38static inline void taskstats_init_early(void)
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index a48d7f11c7b..975c963e578 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -1,8 +1,43 @@
1#ifndef __LINUX_UACCESS_H__ 1#ifndef __LINUX_UACCESS_H__
2#define __LINUX_UACCESS_H__ 2#define __LINUX_UACCESS_H__
3 3
4#include <linux/preempt.h>
4#include <asm/uaccess.h> 5#include <asm/uaccess.h>
5 6
7/*
8 * These routines enable/disable the pagefault handler in that
9 * it will not take any locks and go straight to the fixup table.
10 *
11 * They have great resemblance to the preempt_disable/enable calls
12 * and in fact they are identical; this is because currently there is
13 * no other way to make the pagefault handlers do this. So we do
14 * disable preemption but we don't necessarily care about that.
15 */
16static inline void pagefault_disable(void)
17{
18 inc_preempt_count();
19 /*
20 * make sure to have issued the store before a pagefault
21 * can hit.
22 */
23 barrier();
24}
25
26static inline void pagefault_enable(void)
27{
28 /*
29 * make sure to issue those last loads/stores before enabling
30 * the pagefault handler again.
31 */
32 barrier();
33 dec_preempt_count();
34 /*
35 * make sure we do..
36 */
37 barrier();
38 preempt_check_resched();
39}
40
6#ifndef ARCH_HAS_NOCACHE_UACCESS 41#ifndef ARCH_HAS_NOCACHE_UACCESS
7 42
8static inline unsigned long __copy_from_user_inatomic_nocache(void *to, 43static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
@@ -30,14 +65,22 @@ static inline unsigned long __copy_from_user_nocache(void *to,
30 * do_page_fault() doesn't attempt to take mmap_sem. This makes 65 * do_page_fault() doesn't attempt to take mmap_sem. This makes
31 * probe_kernel_address() suitable for use within regions where the caller 66 * probe_kernel_address() suitable for use within regions where the caller
32 * already holds mmap_sem, or other locks which nest inside mmap_sem. 67 * already holds mmap_sem, or other locks which nest inside mmap_sem.
68 * This must be a macro because __get_user() needs to know the types of the
69 * args.
70 *
71 * We don't include enough header files to be able to do the set_fs(). We
72 * require that the probe_kernel_address() caller will do that.
33 */ 73 */
34#define probe_kernel_address(addr, retval) \ 74#define probe_kernel_address(addr, retval) \
35 ({ \ 75 ({ \
36 long ret; \ 76 long ret; \
77 mm_segment_t old_fs = get_fs(); \
37 \ 78 \
38 inc_preempt_count(); \ 79 set_fs(KERNEL_DS); \
39 ret = __get_user(retval, addr); \ 80 pagefault_disable(); \
40 dec_preempt_count(); \ 81 ret = __get_user(retval, (__force typeof(retval) __user *)(addr)); \
82 pagefault_enable(); \
83 set_fs(old_fs); \
41 ret; \ 84 ret; \
42 }) 85 })
43 86
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 4a3ea83c6d1..edef8d50b26 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -147,9 +147,11 @@ struct execute_work {
147 147
148 148
149extern struct workqueue_struct *__create_workqueue(const char *name, 149extern struct workqueue_struct *__create_workqueue(const char *name,
150 int singlethread); 150 int singlethread,
151#define create_workqueue(name) __create_workqueue((name), 0) 151 int freezeable);
152#define create_singlethread_workqueue(name) __create_workqueue((name), 1) 152#define create_workqueue(name) __create_workqueue((name), 0, 0)
153#define create_freezeable_workqueue(name) __create_workqueue((name), 0, 1)
154#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0)
153 155
154extern void destroy_workqueue(struct workqueue_struct *wq); 156extern void destroy_workqueue(struct workqueue_struct *wq);
155 157
@@ -160,6 +162,7 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
160extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq)); 162extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
161 163
162extern int FASTCALL(schedule_work(struct work_struct *work)); 164extern int FASTCALL(schedule_work(struct work_struct *work));
165extern int FASTCALL(run_scheduled_work(struct work_struct *work));
163extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay)); 166extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay));
164 167
165extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay); 168extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay);
diff --git a/include/net/dst.h b/include/net/dst.h
index e156e38e4ac..62b7e7598e9 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -98,7 +98,7 @@ struct dst_ops
98 int entry_size; 98 int entry_size;
99 99
100 atomic_t entries; 100 atomic_t entries;
101 kmem_cache_t *kmem_cachep; 101 struct kmem_cache *kmem_cachep;
102}; 102};
103 103
104#ifdef __KERNEL__ 104#ifdef __KERNEL__
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index a9eb2eaf094..34cc76e3ddb 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -125,7 +125,7 @@ struct inet_hashinfo {
125 rwlock_t lhash_lock ____cacheline_aligned; 125 rwlock_t lhash_lock ____cacheline_aligned;
126 atomic_t lhash_users; 126 atomic_t lhash_users;
127 wait_queue_head_t lhash_wait; 127 wait_queue_head_t lhash_wait;
128 kmem_cache_t *bind_bucket_cachep; 128 struct kmem_cache *bind_bucket_cachep;
129}; 129};
130 130
131static inline struct inet_ehash_bucket *inet_ehash_bucket( 131static inline struct inet_ehash_bucket *inet_ehash_bucket(
@@ -136,10 +136,10 @@ static inline struct inet_ehash_bucket *inet_ehash_bucket(
136} 136}
137 137
138extern struct inet_bind_bucket * 138extern struct inet_bind_bucket *
139 inet_bind_bucket_create(kmem_cache_t *cachep, 139 inet_bind_bucket_create(struct kmem_cache *cachep,
140 struct inet_bind_hashbucket *head, 140 struct inet_bind_hashbucket *head,
141 const unsigned short snum); 141 const unsigned short snum);
142extern void inet_bind_bucket_destroy(kmem_cache_t *cachep, 142extern void inet_bind_bucket_destroy(struct kmem_cache *cachep,
143 struct inet_bind_bucket *tb); 143 struct inet_bind_bucket *tb);
144 144
145static inline int inet_bhashfn(const __u16 lport, const int bhash_size) 145static inline int inet_bhashfn(const __u16 lport, const int bhash_size)
diff --git a/include/net/irda/irlan_filter.h b/include/net/irda/irlan_filter.h
index 492dedaa8ac..1720539ac2c 100644
--- a/include/net/irda/irlan_filter.h
+++ b/include/net/irda/irlan_filter.h
@@ -28,6 +28,8 @@
28void irlan_check_command_param(struct irlan_cb *self, char *param, 28void irlan_check_command_param(struct irlan_cb *self, char *param,
29 char *value); 29 char *value);
30void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb); 30void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb);
31#ifdef CONFIG_PROC_FS
31void irlan_print_filter(struct seq_file *seq, int filter_type); 32void irlan_print_filter(struct seq_file *seq, int filter_type);
33#endif
32 34
33#endif /* IRLAN_FILTER_H */ 35#endif /* IRLAN_FILTER_H */
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index c8aacbd2e33..23967031ddb 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -160,7 +160,7 @@ struct neigh_table
160 atomic_t entries; 160 atomic_t entries;
161 rwlock_t lock; 161 rwlock_t lock;
162 unsigned long last_rand; 162 unsigned long last_rand;
163 kmem_cache_t *kmem_cachep; 163 struct kmem_cache *kmem_cachep;
164 struct neigh_statistics *stats; 164 struct neigh_statistics *stats;
165 struct neighbour **hash_buckets; 165 struct neighbour **hash_buckets;
166 unsigned int hash_mask; 166 unsigned int hash_mask;
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
index cef3136e22a..41bcc9eb420 100644
--- a/include/net/netfilter/nf_conntrack_expect.h
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -7,7 +7,7 @@
7#include <net/netfilter/nf_conntrack.h> 7#include <net/netfilter/nf_conntrack.h>
8 8
9extern struct list_head nf_conntrack_expect_list; 9extern struct list_head nf_conntrack_expect_list;
10extern kmem_cache_t *nf_conntrack_expect_cachep; 10extern struct kmem_cache *nf_conntrack_expect_cachep;
11extern struct file_operations exp_file_ops; 11extern struct file_operations exp_file_ops;
12 12
13struct nf_conntrack_expect 13struct nf_conntrack_expect
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index e37baaf2080..7aed02ce2b6 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -29,7 +29,7 @@ struct proto;
29struct request_sock_ops { 29struct request_sock_ops {
30 int family; 30 int family;
31 int obj_size; 31 int obj_size;
32 kmem_cache_t *slab; 32 struct kmem_cache *slab;
33 int (*rtx_syn_ack)(struct sock *sk, 33 int (*rtx_syn_ack)(struct sock *sk,
34 struct request_sock *req, 34 struct request_sock *req,
35 struct dst_entry *dst); 35 struct dst_entry *dst);
@@ -60,7 +60,7 @@ struct request_sock {
60 60
61static inline struct request_sock *reqsk_alloc(const struct request_sock_ops *ops) 61static inline struct request_sock *reqsk_alloc(const struct request_sock_ops *ops)
62{ 62{
63 struct request_sock *req = kmem_cache_alloc(ops->slab, SLAB_ATOMIC); 63 struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC);
64 64
65 if (req != NULL) 65 if (req != NULL)
66 req->rsk_ops = ops; 66 req->rsk_ops = ops;
diff --git a/include/net/sock.h b/include/net/sock.h
index fe3a33fad03..03684e702d1 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -571,7 +571,7 @@ struct proto {
571 int *sysctl_rmem; 571 int *sysctl_rmem;
572 int max_header; 572 int max_header;
573 573
574 kmem_cache_t *slab; 574 struct kmem_cache *slab;
575 unsigned int obj_size; 575 unsigned int obj_size;
576 576
577 atomic_t *orphan_count; 577 atomic_t *orphan_count;
@@ -746,6 +746,25 @@ static inline int sk_stream_wmem_schedule(struct sock *sk, int size)
746 */ 746 */
747#define sock_owned_by_user(sk) ((sk)->sk_lock.owner) 747#define sock_owned_by_user(sk) ((sk)->sk_lock.owner)
748 748
749/*
750 * Macro so as to not evaluate some arguments when
751 * lockdep is not enabled.
752 *
753 * Mark both the sk_lock and the sk_lock.slock as a
754 * per-address-family lock class.
755 */
756#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
757do { \
758 sk->sk_lock.owner = NULL; \
759 init_waitqueue_head(&sk->sk_lock.wq); \
760 spin_lock_init(&(sk)->sk_lock.slock); \
761 debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
762 sizeof((sk)->sk_lock)); \
763 lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
764 (skey), (sname)); \
765 lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
766} while (0)
767
749extern void FASTCALL(lock_sock_nested(struct sock *sk, int subclass)); 768extern void FASTCALL(lock_sock_nested(struct sock *sk, int subclass));
750 769
751static inline void lock_sock(struct sock *sk) 770static inline void lock_sock(struct sock *sk)
diff --git a/include/net/timewait_sock.h b/include/net/timewait_sock.h
index d7a306ea560..1e1ee3253fd 100644
--- a/include/net/timewait_sock.h
+++ b/include/net/timewait_sock.h
@@ -15,7 +15,7 @@
15#include <net/sock.h> 15#include <net/sock.h>
16 16
17struct timewait_sock_ops { 17struct timewait_sock_ops {
18 kmem_cache_t *twsk_slab; 18 struct kmem_cache *twsk_slab;
19 unsigned int twsk_obj_size; 19 unsigned int twsk_obj_size;
20 int (*twsk_unique)(struct sock *sk, 20 int (*twsk_unique)(struct sock *sk,
21 struct sock *sktw, void *twp); 21 struct sock *sktw, void *twp);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 15ec19dcf9c..e4765413cf8 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -392,6 +392,20 @@ extern int xfrm_unregister_km(struct xfrm_mgr *km);
392 392
393extern unsigned int xfrm_policy_count[XFRM_POLICY_MAX*2]; 393extern unsigned int xfrm_policy_count[XFRM_POLICY_MAX*2];
394 394
395/* Audit Information */
396struct xfrm_audit
397{
398 uid_t loginuid;
399 u32 secid;
400};
401
402#ifdef CONFIG_AUDITSYSCALL
403extern void xfrm_audit_log(uid_t auid, u32 secid, int type, int result,
404 struct xfrm_policy *xp, struct xfrm_state *x);
405#else
406#define xfrm_audit_log(a,s,t,r,p,x) do { ; } while (0)
407#endif /* CONFIG_AUDITSYSCALL */
408
395static inline void xfrm_pol_hold(struct xfrm_policy *policy) 409static inline void xfrm_pol_hold(struct xfrm_policy *policy)
396{ 410{
397 if (likely(policy != NULL)) 411 if (likely(policy != NULL))
@@ -906,7 +920,7 @@ static inline int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **s
906#endif 920#endif
907extern struct xfrm_state *xfrm_find_acq_byseq(u32 seq); 921extern struct xfrm_state *xfrm_find_acq_byseq(u32 seq);
908extern int xfrm_state_delete(struct xfrm_state *x); 922extern int xfrm_state_delete(struct xfrm_state *x);
909extern void xfrm_state_flush(u8 proto); 923extern void xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info);
910extern int xfrm_replay_check(struct xfrm_state *x, __be32 seq); 924extern int xfrm_replay_check(struct xfrm_state *x, __be32 seq);
911extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq); 925extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq);
912extern void xfrm_replay_notify(struct xfrm_state *x, int event); 926extern void xfrm_replay_notify(struct xfrm_state *x, int event);
@@ -959,13 +973,13 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(u8 type, int dir,
959 struct xfrm_selector *sel, 973 struct xfrm_selector *sel,
960 struct xfrm_sec_ctx *ctx, int delete); 974 struct xfrm_sec_ctx *ctx, int delete);
961struct xfrm_policy *xfrm_policy_byid(u8, int dir, u32 id, int delete); 975struct xfrm_policy *xfrm_policy_byid(u8, int dir, u32 id, int delete);
962void xfrm_policy_flush(u8 type); 976void xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info);
963u32 xfrm_get_acqseq(void); 977u32 xfrm_get_acqseq(void);
964void xfrm_alloc_spi(struct xfrm_state *x, __be32 minspi, __be32 maxspi); 978void xfrm_alloc_spi(struct xfrm_state *x, __be32 minspi, __be32 maxspi);
965struct xfrm_state * xfrm_find_acq(u8 mode, u32 reqid, u8 proto, 979struct xfrm_state * xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
966 xfrm_address_t *daddr, xfrm_address_t *saddr, 980 xfrm_address_t *daddr, xfrm_address_t *saddr,
967 int create, unsigned short family); 981 int create, unsigned short family);
968extern void xfrm_policy_flush(u8 type); 982extern void xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info);
969extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol); 983extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
970extern int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *xdst, 984extern int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *xdst,
971 struct flowi *fl, int family, int strict); 985 struct flowi *fl, int family, int strict);
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 9233ed5de66..0c775fceb67 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -557,7 +557,7 @@ struct sas_task {
557 557
558static inline struct sas_task *sas_alloc_task(gfp_t flags) 558static inline struct sas_task *sas_alloc_task(gfp_t flags)
559{ 559{
560 extern kmem_cache_t *sas_task_cache; 560 extern struct kmem_cache *sas_task_cache;
561 struct sas_task *task = kmem_cache_alloc(sas_task_cache, flags); 561 struct sas_task *task = kmem_cache_alloc(sas_task_cache, flags);
562 562
563 if (task) { 563 if (task) {
@@ -575,7 +575,7 @@ static inline struct sas_task *sas_alloc_task(gfp_t flags)
575static inline void sas_free_task(struct sas_task *task) 575static inline void sas_free_task(struct sas_task *task)
576{ 576{
577 if (task) { 577 if (task) {
578 extern kmem_cache_t *sas_task_cache; 578 extern struct kmem_cache *sas_task_cache;
579 BUG_ON(!list_empty(&task->list)); 579 BUG_ON(!list_empty(&task->list));
580 kmem_cache_free(sas_task_cache, task); 580 kmem_cache_free(sas_task_cache, task);
581 } 581 }