aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h2
-rw-r--r--include/linux/aio.h2
-rw-r--r--include/linux/binfmts.h19
-rw-r--r--include/linux/coda_linux.h3
-rw-r--r--include/linux/coda_proc.h76
-rw-r--r--include/linux/coda_psdev.h12
-rw-r--r--include/linux/edac.h29
-rw-r--r--include/linux/freezer.h9
-rw-r--r--include/linux/fs.h26
-rw-r--r--include/linux/genetlink.h13
-rw-r--r--include/linux/highmem.h15
-rw-r--r--include/linux/kprobes.h6
-rw-r--r--include/linux/lguest.h85
-rw-r--r--include/linux/lguest_bus.h48
-rw-r--r--include/linux/lguest_launcher.h73
-rw-r--r--include/linux/lockdep.h71
-rw-r--r--include/linux/mm.h103
-rw-r--r--include/linux/namei.h4
-rw-r--r--include/linux/netlink.h2
-rw-r--r--include/linux/nfsd/export.h13
-rw-r--r--include/linux/notifier.h6
-rw-r--r--include/linux/page-flags.h45
-rw-r--r--include/linux/pci_ids.h3
-rw-r--r--include/linux/pm.h1
-rw-r--r--include/linux/sched.h30
-rw-r--r--include/linux/spinlock_types.h4
-rw-r--r--include/linux/spinlock_types_up.h9
-rw-r--r--include/linux/stacktrace.h2
-rw-r--r--include/linux/suspend.h52
-rw-r--r--include/linux/user_namespace.h2
30 files changed, 540 insertions, 225 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index fccd8b548d93..dc234c508a6f 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -122,7 +122,7 @@ extern struct acpi_mcfg_allocation *pci_mmcfg_config;
122extern int pci_mmcfg_config_num; 122extern int pci_mmcfg_config_num;
123 123
124extern int sbf_port; 124extern int sbf_port;
125extern unsigned long acpi_video_flags; 125extern unsigned long acpi_realmode_flags;
126 126
127#else /* !CONFIG_ACPI */ 127#else /* !CONFIG_ACPI */
128 128
diff --git a/include/linux/aio.h b/include/linux/aio.h
index b903fc02bdb7..d10e608f232d 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -86,7 +86,7 @@ struct kioctx;
86 */ 86 */
87struct kiocb { 87struct kiocb {
88 struct list_head ki_run_list; 88 struct list_head ki_run_list;
89 long ki_flags; 89 unsigned long ki_flags;
90 int ki_users; 90 int ki_users;
91 unsigned ki_key; /* id of this request */ 91 unsigned ki_key; /* id of this request */
92 92
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index e1a708337be3..91c8c07fe8b7 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -6,11 +6,13 @@
6struct pt_regs; 6struct pt_regs;
7 7
8/* 8/*
9 * MAX_ARG_PAGES defines the number of pages allocated for arguments 9 * These are the maximum length and maximum number of strings passed to the
10 * and envelope for the new program. 32 should suffice, this gives 10 * execve() system call. MAX_ARG_STRLEN is essentially random but serves to
11 * a maximum env+arg of 128kB w/4KB pages! 11 * prevent the kernel from being unduly impacted by misaddressed pointers.
12 * MAX_ARG_STRINGS is chosen to fit in a signed 32-bit integer.
12 */ 13 */
13#define MAX_ARG_PAGES 32 14#define MAX_ARG_STRLEN (PAGE_SIZE * 32)
15#define MAX_ARG_STRINGS 0x7FFFFFFF
14 16
15/* sizeof(linux_binprm->buf) */ 17/* sizeof(linux_binprm->buf) */
16#define BINPRM_BUF_SIZE 128 18#define BINPRM_BUF_SIZE 128
@@ -24,7 +26,12 @@ struct pt_regs;
24 */ 26 */
25struct linux_binprm{ 27struct linux_binprm{
26 char buf[BINPRM_BUF_SIZE]; 28 char buf[BINPRM_BUF_SIZE];
29#ifdef CONFIG_MMU
30 struct vm_area_struct *vma;
31#else
32# define MAX_ARG_PAGES 32
27 struct page *page[MAX_ARG_PAGES]; 33 struct page *page[MAX_ARG_PAGES];
34#endif
28 struct mm_struct *mm; 35 struct mm_struct *mm;
29 unsigned long p; /* current top of mem */ 36 unsigned long p; /* current top of mem */
30 int sh_bang; 37 int sh_bang;
@@ -40,6 +47,7 @@ struct linux_binprm{
40 unsigned interp_flags; 47 unsigned interp_flags;
41 unsigned interp_data; 48 unsigned interp_data;
42 unsigned long loader, exec; 49 unsigned long loader, exec;
50 unsigned long argv_len;
43}; 51};
44 52
45#define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0 53#define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
@@ -68,7 +76,7 @@ extern int register_binfmt(struct linux_binfmt *);
68extern int unregister_binfmt(struct linux_binfmt *); 76extern int unregister_binfmt(struct linux_binfmt *);
69 77
70extern int prepare_binprm(struct linux_binprm *); 78extern int prepare_binprm(struct linux_binprm *);
71extern void remove_arg_zero(struct linux_binprm *); 79extern int __must_check remove_arg_zero(struct linux_binprm *);
72extern int search_binary_handler(struct linux_binprm *,struct pt_regs *); 80extern int search_binary_handler(struct linux_binprm *,struct pt_regs *);
73extern int flush_old_exec(struct linux_binprm * bprm); 81extern int flush_old_exec(struct linux_binprm * bprm);
74 82
@@ -85,6 +93,7 @@ extern int suid_dumpable;
85extern int setup_arg_pages(struct linux_binprm * bprm, 93extern int setup_arg_pages(struct linux_binprm * bprm,
86 unsigned long stack_top, 94 unsigned long stack_top,
87 int executable_stack); 95 int executable_stack);
96extern int bprm_mm_init(struct linux_binprm *bprm);
88extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm); 97extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm);
89extern void compute_creds(struct linux_binprm *binprm); 98extern void compute_creds(struct linux_binprm *binprm);
90extern int do_coredump(long signr, int exit_code, struct pt_regs * regs); 99extern int do_coredump(long signr, int exit_code, struct pt_regs * regs);
diff --git a/include/linux/coda_linux.h b/include/linux/coda_linux.h
index e4ac016ad272..c4079b403e9e 100644
--- a/include/linux/coda_linux.h
+++ b/include/linux/coda_linux.h
@@ -43,9 +43,6 @@ int coda_revalidate_inode(struct dentry *);
43int coda_getattr(struct vfsmount *, struct dentry *, struct kstat *); 43int coda_getattr(struct vfsmount *, struct dentry *, struct kstat *);
44int coda_setattr(struct dentry *, struct iattr *); 44int coda_setattr(struct dentry *, struct iattr *);
45 45
46/* global variables */
47extern int coda_fake_statfs;
48
49/* this file: heloers */ 46/* this file: heloers */
50static __inline__ struct CodaFid *coda_i2f(struct inode *); 47static __inline__ struct CodaFid *coda_i2f(struct inode *);
51static __inline__ char *coda_i2s(struct inode *); 48static __inline__ char *coda_i2s(struct inode *);
diff --git a/include/linux/coda_proc.h b/include/linux/coda_proc.h
deleted file mode 100644
index 0dc1b0458e75..000000000000
--- a/include/linux/coda_proc.h
+++ /dev/null
@@ -1,76 +0,0 @@
1/*
2 * coda_statis.h
3 *
4 * CODA operation statistics
5 *
6 * (c) March, 1998
7 * by Michihiro Kuramochi, Zhenyu Xia and Zhanyong Wan
8 * zhanyong.wan@yale.edu
9 *
10 */
11
12#ifndef _CODA_PROC_H
13#define _CODA_PROC_H
14
15void coda_sysctl_init(void);
16void coda_sysctl_clean(void);
17
18#include <linux/sysctl.h>
19#include <linux/coda_fs_i.h>
20#include <linux/coda.h>
21
22/* these four files are presented to show the result of the statistics:
23 *
24 * /proc/fs/coda/vfs_stats
25 * cache_inv_stats
26 *
27 * these four files are presented to reset the statistics to 0:
28 *
29 * /proc/sys/coda/vfs_stats
30 * cache_inv_stats
31 */
32
33/* VFS operation statistics */
34struct coda_vfs_stats
35{
36 /* file operations */
37 int open;
38 int flush;
39 int release;
40 int fsync;
41
42 /* dir operations */
43 int readdir;
44
45 /* inode operations */
46 int create;
47 int lookup;
48 int link;
49 int unlink;
50 int symlink;
51 int mkdir;
52 int rmdir;
53 int rename;
54 int permission;
55
56 /* symlink operatoins*/
57 int follow_link;
58 int readlink;
59};
60
61/* cache invalidation statistics */
62struct coda_cache_inv_stats
63{
64 int flush;
65 int purge_user;
66 int zap_dir;
67 int zap_file;
68 int zap_vnode;
69 int purge_fid;
70 int replace;
71};
72
73/* these global variables hold the actual statistics data */
74extern struct coda_vfs_stats coda_vfs_stat;
75
76#endif /* _CODA_PROC_H */
diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h
index b541bb3d1f4b..aa8f454b3b77 100644
--- a/include/linux/coda_psdev.h
+++ b/include/linux/coda_psdev.h
@@ -8,11 +8,6 @@
8 8
9struct kstatfs; 9struct kstatfs;
10 10
11struct coda_sb_info
12{
13 struct venus_comm *sbi_vcomm;
14};
15
16/* communication pending/processing queues */ 11/* communication pending/processing queues */
17struct venus_comm { 12struct venus_comm {
18 u_long vc_seq; 13 u_long vc_seq;
@@ -24,9 +19,9 @@ struct venus_comm {
24}; 19};
25 20
26 21
27static inline struct coda_sb_info *coda_sbp(struct super_block *sb) 22static inline struct venus_comm *coda_vcp(struct super_block *sb)
28{ 23{
29 return ((struct coda_sb_info *)((sb)->s_fs_info)); 24 return (struct venus_comm *)((sb)->s_fs_info);
30} 25}
31 26
32 27
@@ -74,8 +69,6 @@ int venus_statfs(struct dentry *dentry, struct kstatfs *sfs);
74 69
75 70
76/* messages between coda filesystem in kernel and Venus */ 71/* messages between coda filesystem in kernel and Venus */
77extern int coda_hard;
78extern unsigned long coda_timeout;
79struct upc_req { 72struct upc_req {
80 struct list_head uc_chain; 73 struct list_head uc_chain;
81 caddr_t uc_data; 74 caddr_t uc_data;
@@ -85,7 +78,6 @@ struct upc_req {
85 u_short uc_opcode; /* copied from data to save lookup */ 78 u_short uc_opcode; /* copied from data to save lookup */
86 int uc_unique; 79 int uc_unique;
87 wait_queue_head_t uc_sleep; /* process' wait queue */ 80 wait_queue_head_t uc_sleep; /* process' wait queue */
88 unsigned long uc_posttime;
89}; 81};
90 82
91#define REQ_ASYNC 0x1 83#define REQ_ASYNC 0x1
diff --git a/include/linux/edac.h b/include/linux/edac.h
new file mode 100644
index 000000000000..eab451e69a91
--- /dev/null
+++ b/include/linux/edac.h
@@ -0,0 +1,29 @@
1/*
2 * Generic EDAC defs
3 *
4 * Author: Dave Jiang <djiang@mvista.com>
5 *
6 * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
7 * the terms of the GNU General Public License version 2. This program
8 * is licensed "as is" without any warranty of any kind, whether express
9 * or implied.
10 *
11 */
12#ifndef _LINUX_EDAC_H_
13#define _LINUX_EDAC_H_
14
15#include <asm/atomic.h>
16
17#define EDAC_OPSTATE_INVAL -1
18#define EDAC_OPSTATE_POLL 0
19#define EDAC_OPSTATE_NMI 1
20#define EDAC_OPSTATE_INT 2
21
22extern int edac_op_state;
23extern int edac_err_assert;
24extern atomic_t edac_handlers;
25
26extern int edac_handler_set(void);
27extern void edac_atomic_assert_error(void);
28
29#endif
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 2d38b1a74662..c8e02de737f6 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -25,7 +25,7 @@ static inline int freezing(struct task_struct *p)
25/* 25/*
26 * Request that a process be frozen 26 * Request that a process be frozen
27 */ 27 */
28static inline void freeze(struct task_struct *p) 28static inline void set_freeze_flag(struct task_struct *p)
29{ 29{
30 set_tsk_thread_flag(p, TIF_FREEZE); 30 set_tsk_thread_flag(p, TIF_FREEZE);
31} 31}
@@ -33,7 +33,7 @@ static inline void freeze(struct task_struct *p)
33/* 33/*
34 * Sometimes we may need to cancel the previous 'freeze' request 34 * Sometimes we may need to cancel the previous 'freeze' request
35 */ 35 */
36static inline void do_not_freeze(struct task_struct *p) 36static inline void clear_freeze_flag(struct task_struct *p)
37{ 37{
38 clear_tsk_thread_flag(p, TIF_FREEZE); 38 clear_tsk_thread_flag(p, TIF_FREEZE);
39} 39}
@@ -56,7 +56,7 @@ static inline int thaw_process(struct task_struct *p)
56 wake_up_process(p); 56 wake_up_process(p);
57 return 1; 57 return 1;
58 } 58 }
59 clear_tsk_thread_flag(p, TIF_FREEZE); 59 clear_freeze_flag(p);
60 task_unlock(p); 60 task_unlock(p);
61 return 0; 61 return 0;
62} 62}
@@ -129,7 +129,8 @@ static inline void set_freezable(void)
129#else 129#else
130static inline int frozen(struct task_struct *p) { return 0; } 130static inline int frozen(struct task_struct *p) { return 0; }
131static inline int freezing(struct task_struct *p) { return 0; } 131static inline int freezing(struct task_struct *p) { return 0; }
132static inline void freeze(struct task_struct *p) { BUG(); } 132static inline void set_freeze_flag(struct task_struct *p) {}
133static inline void clear_freeze_flag(struct task_struct *p) {}
133static inline int thaw_process(struct task_struct *p) { return 1; } 134static inline int thaw_process(struct task_struct *p) { return 1; }
134 135
135static inline void refrigerator(void) {} 136static inline void refrigerator(void) {}
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 9562a59b3703..d33beadd9a43 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -697,20 +697,26 @@ struct fown_struct {
697 * Track a single file's readahead state 697 * Track a single file's readahead state
698 */ 698 */
699struct file_ra_state { 699struct file_ra_state {
700 unsigned long start; /* Current window */ 700 pgoff_t start; /* where readahead started */
701 unsigned long size; 701 unsigned long size; /* # of readahead pages */
702 unsigned long flags; /* ra flags RA_FLAG_xxx*/ 702 unsigned long async_size; /* do asynchronous readahead when
703 unsigned long cache_hit; /* cache hit count*/ 703 there are only # of pages ahead */
704 unsigned long prev_index; /* Cache last read() position */ 704
705 unsigned long ahead_start; /* Ahead window */
706 unsigned long ahead_size;
707 unsigned long ra_pages; /* Maximum readahead window */ 705 unsigned long ra_pages; /* Maximum readahead window */
708 unsigned long mmap_hit; /* Cache hit stat for mmap accesses */ 706 unsigned long mmap_hit; /* Cache hit stat for mmap accesses */
709 unsigned long mmap_miss; /* Cache miss stat for mmap accesses */ 707 unsigned long mmap_miss; /* Cache miss stat for mmap accesses */
708 unsigned long prev_index; /* Cache last read() position */
710 unsigned int prev_offset; /* Offset where last read() ended in a page */ 709 unsigned int prev_offset; /* Offset where last read() ended in a page */
711}; 710};
712#define RA_FLAG_MISS 0x01 /* a cache miss occured against this file */ 711
713#define RA_FLAG_INCACHE 0x02 /* file is already in cache */ 712/*
713 * Check if @index falls in the readahead windows.
714 */
715static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
716{
717 return (index >= ra->start &&
718 index < ra->start + ra->size);
719}
714 720
715struct file { 721struct file {
716 /* 722 /*
@@ -1463,7 +1469,7 @@ extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
1463extern int register_chrdev_region(dev_t, unsigned, const char *); 1469extern int register_chrdev_region(dev_t, unsigned, const char *);
1464extern int register_chrdev(unsigned int, const char *, 1470extern int register_chrdev(unsigned int, const char *,
1465 const struct file_operations *); 1471 const struct file_operations *);
1466extern int unregister_chrdev(unsigned int, const char *); 1472extern void unregister_chrdev(unsigned int, const char *);
1467extern void unregister_chrdev_region(dev_t, unsigned); 1473extern void unregister_chrdev_region(dev_t, unsigned);
1468extern int chrdev_open(struct inode *, struct file *); 1474extern int chrdev_open(struct inode *, struct file *);
1469extern void chrdev_show(struct seq_file *,off_t); 1475extern void chrdev_show(struct seq_file *,off_t);
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index f7a93770e1be..7da02c93002b 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -39,6 +39,9 @@ enum {
39 CTRL_CMD_NEWOPS, 39 CTRL_CMD_NEWOPS,
40 CTRL_CMD_DELOPS, 40 CTRL_CMD_DELOPS,
41 CTRL_CMD_GETOPS, 41 CTRL_CMD_GETOPS,
42 CTRL_CMD_NEWMCAST_GRP,
43 CTRL_CMD_DELMCAST_GRP,
44 CTRL_CMD_GETMCAST_GRP, /* unused */
42 __CTRL_CMD_MAX, 45 __CTRL_CMD_MAX,
43}; 46};
44 47
@@ -52,6 +55,7 @@ enum {
52 CTRL_ATTR_HDRSIZE, 55 CTRL_ATTR_HDRSIZE,
53 CTRL_ATTR_MAXATTR, 56 CTRL_ATTR_MAXATTR,
54 CTRL_ATTR_OPS, 57 CTRL_ATTR_OPS,
58 CTRL_ATTR_MCAST_GROUPS,
55 __CTRL_ATTR_MAX, 59 __CTRL_ATTR_MAX,
56}; 60};
57 61
@@ -66,4 +70,13 @@ enum {
66 70
67#define CTRL_ATTR_OP_MAX (__CTRL_ATTR_OP_MAX - 1) 71#define CTRL_ATTR_OP_MAX (__CTRL_ATTR_OP_MAX - 1)
68 72
73enum {
74 CTRL_ATTR_MCAST_GRP_UNSPEC,
75 CTRL_ATTR_MCAST_GRP_NAME,
76 CTRL_ATTR_MCAST_GRP_ID,
77 __CTRL_ATTR_MCAST_GRP_MAX,
78};
79
80#define CTRL_ATTR_MCAST_GRP_MAX (__CTRL_ATTR_MCAST_GRP_MAX - 1)
81
69#endif /* __LINUX_GENERIC_NETLINK_H */ 82#endif /* __LINUX_GENERIC_NETLINK_H */
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 12c5e4e3135a..1fcb0033179e 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -103,21 +103,6 @@ __alloc_zeroed_user_highpage(gfp_t movableflags,
103#endif 103#endif
104 104
105/** 105/**
106 * alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA
107 * @vma: The VMA the page is to be allocated for
108 * @vaddr: The virtual address the page will be inserted into
109 *
110 * This function will allocate a page for a VMA that the caller knows will
111 * not be able to move in the future using move_pages() or reclaim. If it
112 * is known that the page can move, use alloc_zeroed_user_highpage_movable
113 */
114static inline struct page *
115alloc_zeroed_user_highpage(struct vm_area_struct *vma, unsigned long vaddr)
116{
117 return __alloc_zeroed_user_highpage(0, vma, vaddr);
118}
119
120/**
121 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move 106 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
122 * @vma: The VMA the page is to be allocated for 107 * @vma: The VMA the page is to be allocated for
123 * @vaddr: The virtual address the page will be inserted into 108 * @vaddr: The virtual address the page will be inserted into
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 23adf6075ae4..51464d12a4e5 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -116,9 +116,12 @@ struct kprobe {
116 */ 116 */
117struct jprobe { 117struct jprobe {
118 struct kprobe kp; 118 struct kprobe kp;
119 kprobe_opcode_t *entry; /* probe handling code to jump to */ 119 void *entry; /* probe handling code to jump to */
120}; 120};
121 121
122/* For backward compatibility with old code using JPROBE_ENTRY() */
123#define JPROBE_ENTRY(handler) (handler)
124
122DECLARE_PER_CPU(struct kprobe *, current_kprobe); 125DECLARE_PER_CPU(struct kprobe *, current_kprobe);
123DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 126DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
124 127
@@ -211,6 +214,7 @@ int longjmp_break_handler(struct kprobe *, struct pt_regs *);
211int register_jprobe(struct jprobe *p); 214int register_jprobe(struct jprobe *p);
212void unregister_jprobe(struct jprobe *p); 215void unregister_jprobe(struct jprobe *p);
213void jprobe_return(void); 216void jprobe_return(void);
217unsigned long arch_deref_entry_point(void *);
214 218
215int register_kretprobe(struct kretprobe *rp); 219int register_kretprobe(struct kretprobe *rp);
216void unregister_kretprobe(struct kretprobe *rp); 220void unregister_kretprobe(struct kretprobe *rp);
diff --git a/include/linux/lguest.h b/include/linux/lguest.h
new file mode 100644
index 000000000000..500aace21ca7
--- /dev/null
+++ b/include/linux/lguest.h
@@ -0,0 +1,85 @@
1/* Things the lguest guest needs to know. Note: like all lguest interfaces,
2 * this is subject to wild and random change between versions. */
3#ifndef _ASM_LGUEST_H
4#define _ASM_LGUEST_H
5
6#ifndef __ASSEMBLY__
7#include <asm/irq.h>
8
9#define LHCALL_FLUSH_ASYNC 0
10#define LHCALL_LGUEST_INIT 1
11#define LHCALL_CRASH 2
12#define LHCALL_LOAD_GDT 3
13#define LHCALL_NEW_PGTABLE 4
14#define LHCALL_FLUSH_TLB 5
15#define LHCALL_LOAD_IDT_ENTRY 6
16#define LHCALL_SET_STACK 7
17#define LHCALL_TS 8
18#define LHCALL_SET_CLOCKEVENT 9
19#define LHCALL_HALT 10
20#define LHCALL_GET_WALLCLOCK 11
21#define LHCALL_BIND_DMA 12
22#define LHCALL_SEND_DMA 13
23#define LHCALL_SET_PTE 14
24#define LHCALL_SET_PMD 15
25#define LHCALL_LOAD_TLS 16
26
27#define LG_CLOCK_MIN_DELTA 100UL
28#define LG_CLOCK_MAX_DELTA ULONG_MAX
29
30#define LGUEST_TRAP_ENTRY 0x1F
31
32static inline unsigned long
33hcall(unsigned long call,
34 unsigned long arg1, unsigned long arg2, unsigned long arg3)
35{
36 asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY)
37 : "=a"(call)
38 : "a"(call), "d"(arg1), "b"(arg2), "c"(arg3)
39 : "memory");
40 return call;
41}
42
43void async_hcall(unsigned long call,
44 unsigned long arg1, unsigned long arg2, unsigned long arg3);
45
46/* Can't use our min() macro here: needs to be a constant */
47#define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32)
48
49#define LHCALL_RING_SIZE 64
50struct hcall_ring
51{
52 u32 eax, edx, ebx, ecx;
53};
54
55/* All the good stuff happens here: guest registers it with LGUEST_INIT */
56struct lguest_data
57{
58/* Fields which change during running: */
59 /* 512 == enabled (same as eflags) */
60 unsigned int irq_enabled;
61 /* Interrupts blocked by guest. */
62 DECLARE_BITMAP(blocked_interrupts, LGUEST_IRQS);
63
64 /* Virtual address of page fault. */
65 unsigned long cr2;
66
67 /* Async hypercall ring. 0xFF == done, 0 == pending. */
68 u8 hcall_status[LHCALL_RING_SIZE];
69 struct hcall_ring hcalls[LHCALL_RING_SIZE];
70
71/* Fields initialized by the hypervisor at boot: */
72 /* Memory not to try to access */
73 unsigned long reserve_mem;
74 /* ID of this guest (used by network driver to set ethernet address) */
75 u16 guestid;
76 /* KHz for the TSC clock. */
77 u32 tsc_khz;
78
79/* Fields initialized by the guest at boot: */
80 /* Instruction range to suppress interrupts even if enabled */
81 unsigned long noirq_start, noirq_end;
82};
83extern struct lguest_data lguest_data;
84#endif /* __ASSEMBLY__ */
85#endif /* _ASM_LGUEST_H */
diff --git a/include/linux/lguest_bus.h b/include/linux/lguest_bus.h
new file mode 100644
index 000000000000..c9b4e05fee49
--- /dev/null
+++ b/include/linux/lguest_bus.h
@@ -0,0 +1,48 @@
1#ifndef _ASM_LGUEST_DEVICE_H
2#define _ASM_LGUEST_DEVICE_H
3/* Everything you need to know about lguest devices. */
4#include <linux/device.h>
5#include <linux/lguest.h>
6#include <linux/lguest_launcher.h>
7
8struct lguest_device {
9 /* Unique busid, and index into lguest_page->devices[] */
10 unsigned int index;
11
12 struct device dev;
13
14 /* Driver can hang data off here. */
15 void *private;
16};
17
18/* By convention, each device can use irq index+1 if it wants to. */
19static inline int lgdev_irq(const struct lguest_device *dev)
20{
21 return dev->index + 1;
22}
23
24/* dma args must not be vmalloced! */
25void lguest_send_dma(unsigned long key, struct lguest_dma *dma);
26int lguest_bind_dma(unsigned long key, struct lguest_dma *dmas,
27 unsigned int num, u8 irq);
28void lguest_unbind_dma(unsigned long key, struct lguest_dma *dmas);
29
30/* Map the virtual device space */
31void *lguest_map(unsigned long phys_addr, unsigned long pages);
32void lguest_unmap(void *);
33
34struct lguest_driver {
35 const char *name;
36 struct module *owner;
37 u16 device_type;
38 int (*probe)(struct lguest_device *dev);
39 void (*remove)(struct lguest_device *dev);
40
41 struct device_driver drv;
42};
43
44extern int register_lguest_driver(struct lguest_driver *drv);
45extern void unregister_lguest_driver(struct lguest_driver *drv);
46
47extern struct lguest_device_desc *lguest_devices; /* Just past max_pfn */
48#endif /* _ASM_LGUEST_DEVICE_H */
diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h
new file mode 100644
index 000000000000..0ba414a40c80
--- /dev/null
+++ b/include/linux/lguest_launcher.h
@@ -0,0 +1,73 @@
1#ifndef _ASM_LGUEST_USER
2#define _ASM_LGUEST_USER
3/* Everything the "lguest" userspace program needs to know. */
4/* They can register up to 32 arrays of lguest_dma. */
5#define LGUEST_MAX_DMA 32
6/* At most we can dma 16 lguest_dma in one op. */
7#define LGUEST_MAX_DMA_SECTIONS 16
8
9/* How many devices? Assume each one wants up to two dma arrays per device. */
10#define LGUEST_MAX_DEVICES (LGUEST_MAX_DMA/2)
11
12struct lguest_dma
13{
14 /* 0 if free to be used, filled by hypervisor. */
15 u32 used_len;
16 unsigned long addr[LGUEST_MAX_DMA_SECTIONS];
17 u16 len[LGUEST_MAX_DMA_SECTIONS];
18};
19
20struct lguest_block_page
21{
22 /* 0 is a read, 1 is a write. */
23 int type;
24 u32 sector; /* Offset in device = sector * 512. */
25 u32 bytes; /* Length expected to be read/written in bytes */
26 /* 0 = pending, 1 = done, 2 = done, error */
27 int result;
28 u32 num_sectors; /* Disk length = num_sectors * 512 */
29};
30
31/* There is a shared page of these. */
32struct lguest_net
33{
34 /* Simply the mac address (with multicast bit meaning promisc). */
35 unsigned char mac[6];
36};
37
38/* Where the Host expects the Guest to SEND_DMA console output to. */
39#define LGUEST_CONSOLE_DMA_KEY 0
40
41/* We have a page of these descriptors in the lguest_device page. */
42struct lguest_device_desc {
43 u16 type;
44#define LGUEST_DEVICE_T_CONSOLE 1
45#define LGUEST_DEVICE_T_NET 2
46#define LGUEST_DEVICE_T_BLOCK 3
47
48 u16 features;
49#define LGUEST_NET_F_NOCSUM 0x4000 /* Don't bother checksumming */
50#define LGUEST_DEVICE_F_RANDOMNESS 0x8000 /* IRQ is fairly random */
51
52 u16 status;
53/* 256 and above are device specific. */
54#define LGUEST_DEVICE_S_ACKNOWLEDGE 1 /* We have seen device. */
55#define LGUEST_DEVICE_S_DRIVER 2 /* We have found a driver */
56#define LGUEST_DEVICE_S_DRIVER_OK 4 /* Driver says OK! */
57#define LGUEST_DEVICE_S_REMOVED 8 /* Device has gone away. */
58#define LGUEST_DEVICE_S_REMOVED_ACK 16 /* Driver has been told. */
59#define LGUEST_DEVICE_S_FAILED 128 /* Something actually failed */
60
61 u16 num_pages;
62 u32 pfn;
63};
64
65/* Write command first word is a request. */
66enum lguest_req
67{
68 LHREQ_INITIALIZE, /* + pfnlimit, pgdir, start, pageoffset */
69 LHREQ_GETDMA, /* + addr (returns &lguest_dma, irq in ->used_len) */
70 LHREQ_IRQ, /* + irq */
71 LHREQ_BREAK, /* + on/off flag (on blocks until someone does off) */
72};
73#endif /* _ASM_LGUEST_USER */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 14c937d345cb..0e843bf65877 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -1,7 +1,8 @@
1/* 1/*
2 * Runtime locking correctness validator 2 * Runtime locking correctness validator
3 * 3 *
4 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 4 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
5 * 6 *
6 * see Documentation/lockdep-design.txt for more details. 7 * see Documentation/lockdep-design.txt for more details.
7 */ 8 */
@@ -9,6 +10,7 @@
9#define __LINUX_LOCKDEP_H 10#define __LINUX_LOCKDEP_H
10 11
11struct task_struct; 12struct task_struct;
13struct lockdep_map;
12 14
13#ifdef CONFIG_LOCKDEP 15#ifdef CONFIG_LOCKDEP
14 16
@@ -114,8 +116,44 @@ struct lock_class {
114 116
115 const char *name; 117 const char *name;
116 int name_version; 118 int name_version;
119
120#ifdef CONFIG_LOCK_STAT
121 unsigned long contention_point[4];
122#endif
123};
124
125#ifdef CONFIG_LOCK_STAT
126struct lock_time {
127 s64 min;
128 s64 max;
129 s64 total;
130 unsigned long nr;
131};
132
133enum bounce_type {
134 bounce_acquired_write,
135 bounce_acquired_read,
136 bounce_contended_write,
137 bounce_contended_read,
138 nr_bounce_types,
139
140 bounce_acquired = bounce_acquired_write,
141 bounce_contended = bounce_contended_write,
117}; 142};
118 143
144struct lock_class_stats {
145 unsigned long contention_point[4];
146 struct lock_time read_waittime;
147 struct lock_time write_waittime;
148 struct lock_time read_holdtime;
149 struct lock_time write_holdtime;
150 unsigned long bounces[nr_bounce_types];
151};
152
153struct lock_class_stats lock_stats(struct lock_class *class);
154void clear_lock_stats(struct lock_class *class);
155#endif
156
119/* 157/*
120 * Map the lock object (the lock instance) to the lock-class object. 158 * Map the lock object (the lock instance) to the lock-class object.
121 * This is embedded into specific lock instances: 159 * This is embedded into specific lock instances:
@@ -124,6 +162,9 @@ struct lockdep_map {
124 struct lock_class_key *key; 162 struct lock_class_key *key;
125 struct lock_class *class_cache; 163 struct lock_class *class_cache;
126 const char *name; 164 const char *name;
165#ifdef CONFIG_LOCK_STAT
166 int cpu;
167#endif
127}; 168};
128 169
129/* 170/*
@@ -165,6 +206,10 @@ struct held_lock {
165 unsigned long acquire_ip; 206 unsigned long acquire_ip;
166 struct lockdep_map *instance; 207 struct lockdep_map *instance;
167 208
209#ifdef CONFIG_LOCK_STAT
210 u64 waittime_stamp;
211 u64 holdtime_stamp;
212#endif
168 /* 213 /*
169 * The lock-stack is unified in that the lock chains of interrupt 214 * The lock-stack is unified in that the lock chains of interrupt
170 * contexts nest ontop of process context chains, but we 'separate' 215 * contexts nest ontop of process context chains, but we 'separate'
@@ -281,6 +326,30 @@ struct lock_class_key { };
281 326
282#endif /* !LOCKDEP */ 327#endif /* !LOCKDEP */
283 328
329#ifdef CONFIG_LOCK_STAT
330
331extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
332extern void lock_acquired(struct lockdep_map *lock);
333
334#define LOCK_CONTENDED(_lock, try, lock) \
335do { \
336 if (!try(_lock)) { \
337 lock_contended(&(_lock)->dep_map, _RET_IP_); \
338 lock(_lock); \
339 } \
340 lock_acquired(&(_lock)->dep_map); \
341} while (0)
342
343#else /* CONFIG_LOCK_STAT */
344
345#define lock_contended(lockdep_map, ip) do {} while (0)
346#define lock_acquired(lockdep_map) do {} while (0)
347
348#define LOCK_CONTENDED(_lock, try, lock) \
349 lock(_lock)
350
351#endif /* CONFIG_LOCK_STAT */
352
284#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS) 353#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
285extern void early_init_irq_lock_class(void); 354extern void early_init_irq_lock_class(void);
286#else 355#else
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a5c451816fdc..c456c3a1c28e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -168,6 +168,8 @@ extern unsigned int kobjsize(const void *objp);
168#define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */ 168#define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */
169#define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */ 169#define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */
170 170
171#define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
172
171#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ 173#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
172#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS 174#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
173#endif 175#endif
@@ -190,6 +192,30 @@ extern unsigned int kobjsize(const void *objp);
190 */ 192 */
191extern pgprot_t protection_map[16]; 193extern pgprot_t protection_map[16];
192 194
195#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */
196#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */
197
198
199/*
200 * vm_fault is filled by the the pagefault handler and passed to the vma's
201 * ->fault function. The vma's ->fault is responsible for returning a bitmask
202 * of VM_FAULT_xxx flags that give details about how the fault was handled.
203 *
204 * pgoff should be used in favour of virtual_address, if possible. If pgoff
205 * is used, one may set VM_CAN_NONLINEAR in the vma->vm_flags to get nonlinear
206 * mapping support.
207 */
208struct vm_fault {
209 unsigned int flags; /* FAULT_FLAG_xxx flags */
210 pgoff_t pgoff; /* Logical page offset based on vma */
211 void __user *virtual_address; /* Faulting virtual address */
212
213 struct page *page; /* ->fault handlers should return a
214 * page here, unless VM_FAULT_NOPAGE
215 * is set (which is also implied by
216 * VM_FAULT_ERROR).
217 */
218};
193 219
194/* 220/*
195 * These are the virtual MM functions - opening of an area, closing and 221 * These are the virtual MM functions - opening of an area, closing and
@@ -199,9 +225,11 @@ extern pgprot_t protection_map[16];
199struct vm_operations_struct { 225struct vm_operations_struct {
200 void (*open)(struct vm_area_struct * area); 226 void (*open)(struct vm_area_struct * area);
201 void (*close)(struct vm_area_struct * area); 227 void (*close)(struct vm_area_struct * area);
202 struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int *type); 228 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
203 unsigned long (*nopfn)(struct vm_area_struct * area, unsigned long address); 229 struct page *(*nopage)(struct vm_area_struct *area,
204 int (*populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock); 230 unsigned long address, int *type);
231 unsigned long (*nopfn)(struct vm_area_struct *area,
232 unsigned long address);
205 233
206 /* notification that a previously read-only page is about to become 234 /* notification that a previously read-only page is about to become
207 * writable, if an error is returned it will cause a SIGBUS */ 235 * writable, if an error is returned it will cause a SIGBUS */
@@ -655,7 +683,6 @@ static inline int page_mapped(struct page *page)
655 */ 683 */
656#define NOPAGE_SIGBUS (NULL) 684#define NOPAGE_SIGBUS (NULL)
657#define NOPAGE_OOM ((struct page *) (-1)) 685#define NOPAGE_OOM ((struct page *) (-1))
658#define NOPAGE_REFAULT ((struct page *) (-2)) /* Return to userspace, rerun */
659 686
660/* 687/*
661 * Error return values for the *_nopfn functions 688 * Error return values for the *_nopfn functions
@@ -669,16 +696,18 @@ static inline int page_mapped(struct page *page)
669 * Used to decide whether a process gets delivered SIGBUS or 696 * Used to decide whether a process gets delivered SIGBUS or
670 * just gets major/minor fault counters bumped up. 697 * just gets major/minor fault counters bumped up.
671 */ 698 */
672#define VM_FAULT_OOM 0x00 699
673#define VM_FAULT_SIGBUS 0x01 700#define VM_FAULT_MINOR 0 /* For backwards compat. Remove me quickly. */
674#define VM_FAULT_MINOR 0x02 701
675#define VM_FAULT_MAJOR 0x03 702#define VM_FAULT_OOM 0x0001
676 703#define VM_FAULT_SIGBUS 0x0002
677/* 704#define VM_FAULT_MAJOR 0x0004
678 * Special case for get_user_pages. 705#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */
679 * Must be in a distinct bit from the above VM_FAULT_ flags. 706
680 */ 707#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
681#define VM_FAULT_WRITE 0x10 708#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
709
710#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS)
682 711
683#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) 712#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
684 713
@@ -762,20 +791,10 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
762 791
763extern int vmtruncate(struct inode * inode, loff_t offset); 792extern int vmtruncate(struct inode * inode, loff_t offset);
764extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); 793extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end);
765extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot);
766extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot);
767 794
768#ifdef CONFIG_MMU 795#ifdef CONFIG_MMU
769extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, 796extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
770 unsigned long address, int write_access); 797 unsigned long address, int write_access);
771
772static inline int handle_mm_fault(struct mm_struct *mm,
773 struct vm_area_struct *vma, unsigned long address,
774 int write_access)
775{
776 return __handle_mm_fault(mm, vma, address, write_access) &
777 (~VM_FAULT_WRITE);
778}
779#else 798#else
780static inline int handle_mm_fault(struct mm_struct *mm, 799static inline int handle_mm_fault(struct mm_struct *mm,
781 struct vm_area_struct *vma, unsigned long address, 800 struct vm_area_struct *vma, unsigned long address,
@@ -789,7 +808,6 @@ static inline int handle_mm_fault(struct mm_struct *mm,
789 808
790extern int make_pages_present(unsigned long addr, unsigned long end); 809extern int make_pages_present(unsigned long addr, unsigned long end);
791extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); 810extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
792void install_arg_page(struct vm_area_struct *, struct page *, unsigned long);
793 811
794int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, 812int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
795 int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); 813 int len, int write, int force, struct page **pages, struct vm_area_struct **vmas);
@@ -806,9 +824,15 @@ int FASTCALL(set_page_dirty(struct page *page));
806int set_page_dirty_lock(struct page *page); 824int set_page_dirty_lock(struct page *page);
807int clear_page_dirty_for_io(struct page *page); 825int clear_page_dirty_for_io(struct page *page);
808 826
827extern unsigned long move_page_tables(struct vm_area_struct *vma,
828 unsigned long old_addr, struct vm_area_struct *new_vma,
829 unsigned long new_addr, unsigned long len);
809extern unsigned long do_mremap(unsigned long addr, 830extern unsigned long do_mremap(unsigned long addr,
810 unsigned long old_len, unsigned long new_len, 831 unsigned long old_len, unsigned long new_len,
811 unsigned long flags, unsigned long new_addr); 832 unsigned long flags, unsigned long new_addr);
833extern int mprotect_fixup(struct vm_area_struct *vma,
834 struct vm_area_struct **pprev, unsigned long start,
835 unsigned long end, unsigned long newflags);
812 836
813/* 837/*
814 * A callback you can register to apply pressure to ageable caches. 838 * A callback you can register to apply pressure to ageable caches.
@@ -1104,9 +1128,7 @@ extern void truncate_inode_pages_range(struct address_space *,
1104 loff_t lstart, loff_t lend); 1128 loff_t lstart, loff_t lend);
1105 1129
1106/* generic vm_area_ops exported for stackable file systems */ 1130/* generic vm_area_ops exported for stackable file systems */
1107extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int *); 1131extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
1108extern int filemap_populate(struct vm_area_struct *, unsigned long,
1109 unsigned long, pgprot_t, unsigned long, int);
1110 1132
1111/* mm/page-writeback.c */ 1133/* mm/page-writeback.c */
1112int write_one_page(struct page *page, int wait); 1134int write_one_page(struct page *page, int wait);
@@ -1121,13 +1143,20 @@ int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
1121 pgoff_t offset, unsigned long nr_to_read); 1143 pgoff_t offset, unsigned long nr_to_read);
1122int force_page_cache_readahead(struct address_space *mapping, struct file *filp, 1144int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
1123 pgoff_t offset, unsigned long nr_to_read); 1145 pgoff_t offset, unsigned long nr_to_read);
1124unsigned long page_cache_readahead(struct address_space *mapping, 1146
1125 struct file_ra_state *ra, 1147void page_cache_sync_readahead(struct address_space *mapping,
1126 struct file *filp, 1148 struct file_ra_state *ra,
1127 pgoff_t offset, 1149 struct file *filp,
1128 unsigned long size); 1150 pgoff_t offset,
1129void handle_ra_miss(struct address_space *mapping, 1151 unsigned long size);
1130 struct file_ra_state *ra, pgoff_t offset); 1152
1153void page_cache_async_readahead(struct address_space *mapping,
1154 struct file_ra_state *ra,
1155 struct file *filp,
1156 struct page *pg,
1157 pgoff_t offset,
1158 unsigned long size);
1159
1131unsigned long max_sane_readahead(unsigned long nr); 1160unsigned long max_sane_readahead(unsigned long nr);
1132 1161
1133/* Do stack extension */ 1162/* Do stack extension */
@@ -1135,6 +1164,8 @@ extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
1135#ifdef CONFIG_IA64 1164#ifdef CONFIG_IA64
1136extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); 1165extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
1137#endif 1166#endif
1167extern int expand_stack_downwards(struct vm_area_struct *vma,
1168 unsigned long address);
1138 1169
1139/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 1170/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
1140extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); 1171extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
diff --git a/include/linux/namei.h b/include/linux/namei.h
index b7dd24917f0d..6c38efbd810f 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -69,8 +69,8 @@ extern int FASTCALL(__user_walk_fd(int dfd, const char __user *, unsigned, struc
69#define user_path_walk_link(name,nd) \ 69#define user_path_walk_link(name,nd) \
70 __user_walk_fd(AT_FDCWD, name, 0, nd) 70 __user_walk_fd(AT_FDCWD, name, 0, nd)
71extern int FASTCALL(path_lookup(const char *, unsigned, struct nameidata *)); 71extern int FASTCALL(path_lookup(const char *, unsigned, struct nameidata *));
72extern int FASTCALL(path_walk(const char *, struct nameidata *)); 72extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
73extern int FASTCALL(link_path_walk(const char *, struct nameidata *)); 73 const char *, unsigned int, struct nameidata *);
74extern void path_release(struct nameidata *); 74extern void path_release(struct nameidata *);
75extern void path_release_on_umount(struct nameidata *); 75extern void path_release_on_umount(struct nameidata *);
76 76
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 2e23353c28a5..83d8239f0cce 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -161,6 +161,8 @@ extern struct sock *netlink_kernel_create(int unit, unsigned int groups,
161 void (*input)(struct sock *sk, int len), 161 void (*input)(struct sock *sk, int len),
162 struct mutex *cb_mutex, 162 struct mutex *cb_mutex,
163 struct module *module); 163 struct module *module);
164extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
165extern void netlink_clear_multicast_users(struct sock *sk, unsigned int group);
164extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err); 166extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
165extern int netlink_has_listeners(struct sock *sk, unsigned int group); 167extern int netlink_has_listeners(struct sock *sk, unsigned int group);
166extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock); 168extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock);
diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h
index 78feb7beff75..5cd192469096 100644
--- a/include/linux/nfsd/export.h
+++ b/include/linux/nfsd/export.h
@@ -116,18 +116,7 @@ struct svc_expkey {
116#define EX_NOHIDE(exp) ((exp)->ex_flags & NFSEXP_NOHIDE) 116#define EX_NOHIDE(exp) ((exp)->ex_flags & NFSEXP_NOHIDE)
117#define EX_WGATHER(exp) ((exp)->ex_flags & NFSEXP_GATHERED_WRITES) 117#define EX_WGATHER(exp) ((exp)->ex_flags & NFSEXP_GATHERED_WRITES)
118 118
119static inline int EX_RDONLY(struct svc_export *exp, struct svc_rqst *rqstp) 119int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp);
120{
121 struct exp_flavor_info *f;
122 struct exp_flavor_info *end = exp->ex_flavors + exp->ex_nflavors;
123
124 for (f = exp->ex_flavors; f < end; f++) {
125 if (f->pseudoflavor == rqstp->rq_flavor)
126 return f->flags & NFSEXP_READONLY;
127 }
128 return exp->ex_flags & NFSEXP_READONLY;
129}
130
131__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp); 120__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp);
132 121
133/* 122/*
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 576f2bb34cc8..be3f2bb6fcf3 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -212,5 +212,11 @@ extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
212#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN) 212#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
213#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN) 213#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
214 214
215/* Hibernation and suspend events */
216#define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */
217#define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */
218#define PM_SUSPEND_PREPARE 0x0003 /* Going to suspend the system */
219#define PM_POST_SUSPEND 0x0004 /* Suspend finished */
220
215#endif /* __KERNEL__ */ 221#endif /* __KERNEL__ */
216#endif /* _LINUX_NOTIFIER_H */ 222#endif /* _LINUX_NOTIFIER_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 731cd2ac3227..209d3a47f50f 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -90,6 +90,9 @@
90#define PG_reclaim 17 /* To be reclaimed asap */ 90#define PG_reclaim 17 /* To be reclaimed asap */
91#define PG_buddy 19 /* Page is free, on buddy lists */ 91#define PG_buddy 19 /* Page is free, on buddy lists */
92 92
93/* PG_readahead is only used for file reads; PG_reclaim is only for writes */
94#define PG_readahead PG_reclaim /* Reminder to do async read-ahead */
95
93/* PG_owner_priv_1 users should have descriptive aliases */ 96/* PG_owner_priv_1 users should have descriptive aliases */
94#define PG_checked PG_owner_priv_1 /* Used by some filesystems */ 97#define PG_checked PG_owner_priv_1 /* Used by some filesystems */
95#define PG_pinned PG_owner_priv_1 /* Xen pinned pagetable */ 98#define PG_pinned PG_owner_priv_1 /* Xen pinned pagetable */
@@ -186,37 +189,15 @@ static inline void SetPageUptodate(struct page *page)
186#define __SetPagePrivate(page) __set_bit(PG_private, &(page)->flags) 189#define __SetPagePrivate(page) __set_bit(PG_private, &(page)->flags)
187#define __ClearPagePrivate(page) __clear_bit(PG_private, &(page)->flags) 190#define __ClearPagePrivate(page) __clear_bit(PG_private, &(page)->flags)
188 191
192/*
193 * Only test-and-set exist for PG_writeback. The unconditional operators are
194 * risky: they bypass page accounting.
195 */
189#define PageWriteback(page) test_bit(PG_writeback, &(page)->flags) 196#define PageWriteback(page) test_bit(PG_writeback, &(page)->flags)
190#define SetPageWriteback(page) \ 197#define TestSetPageWriteback(page) test_and_set_bit(PG_writeback, \
191 do { \ 198 &(page)->flags)
192 if (!test_and_set_bit(PG_writeback, \ 199#define TestClearPageWriteback(page) test_and_clear_bit(PG_writeback, \
193 &(page)->flags)) \ 200 &(page)->flags)
194 inc_zone_page_state(page, NR_WRITEBACK); \
195 } while (0)
196#define TestSetPageWriteback(page) \
197 ({ \
198 int ret; \
199 ret = test_and_set_bit(PG_writeback, \
200 &(page)->flags); \
201 if (!ret) \
202 inc_zone_page_state(page, NR_WRITEBACK); \
203 ret; \
204 })
205#define ClearPageWriteback(page) \
206 do { \
207 if (test_and_clear_bit(PG_writeback, \
208 &(page)->flags)) \
209 dec_zone_page_state(page, NR_WRITEBACK); \
210 } while (0)
211#define TestClearPageWriteback(page) \
212 ({ \
213 int ret; \
214 ret = test_and_clear_bit(PG_writeback, \
215 &(page)->flags); \
216 if (ret) \
217 dec_zone_page_state(page, NR_WRITEBACK); \
218 ret; \
219 })
220 201
221#define PageBuddy(page) test_bit(PG_buddy, &(page)->flags) 202#define PageBuddy(page) test_bit(PG_buddy, &(page)->flags)
222#define __SetPageBuddy(page) __set_bit(PG_buddy, &(page)->flags) 203#define __SetPageBuddy(page) __set_bit(PG_buddy, &(page)->flags)
@@ -226,6 +207,10 @@ static inline void SetPageUptodate(struct page *page)
226#define SetPageMappedToDisk(page) set_bit(PG_mappedtodisk, &(page)->flags) 207#define SetPageMappedToDisk(page) set_bit(PG_mappedtodisk, &(page)->flags)
227#define ClearPageMappedToDisk(page) clear_bit(PG_mappedtodisk, &(page)->flags) 208#define ClearPageMappedToDisk(page) clear_bit(PG_mappedtodisk, &(page)->flags)
228 209
210#define PageReadahead(page) test_bit(PG_readahead, &(page)->flags)
211#define SetPageReadahead(page) set_bit(PG_readahead, &(page)->flags)
212#define ClearPageReadahead(page) clear_bit(PG_readahead, &(page)->flags)
213
229#define PageReclaim(page) test_bit(PG_reclaim, &(page)->flags) 214#define PageReclaim(page) test_bit(PG_reclaim, &(page)->flags)
230#define SetPageReclaim(page) set_bit(PG_reclaim, &(page)->flags) 215#define SetPageReclaim(page) set_bit(PG_reclaim, &(page)->flags)
231#define ClearPageReclaim(page) clear_bit(PG_reclaim, &(page)->flags) 216#define ClearPageReclaim(page) clear_bit(PG_reclaim, &(page)->flags)
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 2c7add169539..b15c6498fe67 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -495,6 +495,8 @@
495 495
496#define PCI_VENDOR_ID_AMD 0x1022 496#define PCI_VENDOR_ID_AMD 0x1022
497#define PCI_DEVICE_ID_AMD_K8_NB 0x1100 497#define PCI_DEVICE_ID_AMD_K8_NB 0x1100
498#define PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP 0x1101
499#define PCI_DEVICE_ID_AMD_K8_NB_MEMCTL 0x1102
498#define PCI_DEVICE_ID_AMD_K8_NB_MISC 0x1103 500#define PCI_DEVICE_ID_AMD_K8_NB_MISC 0x1103
499#define PCI_DEVICE_ID_AMD_LANCE 0x2000 501#define PCI_DEVICE_ID_AMD_LANCE 0x2000
500#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 502#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
@@ -2209,6 +2211,7 @@
2209#define PCI_DEVICE_ID_INTEL_82915GM_IG 0x2592 2211#define PCI_DEVICE_ID_INTEL_82915GM_IG 0x2592
2210#define PCI_DEVICE_ID_INTEL_82945G_HB 0x2770 2212#define PCI_DEVICE_ID_INTEL_82945G_HB 0x2770
2211#define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772 2213#define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772
2214#define PCI_DEVICE_ID_INTEL_3000_HB 0x2778
2212#define PCI_DEVICE_ID_INTEL_82945GM_HB 0x27A0 2215#define PCI_DEVICE_ID_INTEL_82945GM_HB 0x27A0
2213#define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27A2 2216#define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27A2
2214#define PCI_DEVICE_ID_INTEL_ICH6_0 0x2640 2217#define PCI_DEVICE_ID_INTEL_ICH6_0 0x2640
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 2735b7cadd20..ad3cc2eb0d34 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -101,6 +101,7 @@ struct pm_dev
101 */ 101 */
102extern void (*pm_idle)(void); 102extern void (*pm_idle)(void);
103extern void (*pm_power_off)(void); 103extern void (*pm_power_off)(void);
104extern void (*pm_power_off_prepare)(void);
104 105
105typedef int __bitwise suspend_state_t; 106typedef int __bitwise suspend_state_t;
106 107
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 731edaca8ffd..33b9b4841ee7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -345,6 +345,27 @@ typedef unsigned long mm_counter_t;
345 (mm)->hiwater_vm = (mm)->total_vm; \ 345 (mm)->hiwater_vm = (mm)->total_vm; \
346} while (0) 346} while (0)
347 347
348extern void set_dumpable(struct mm_struct *mm, int value);
349extern int get_dumpable(struct mm_struct *mm);
350
351/* mm flags */
352/* dumpable bits */
353#define MMF_DUMPABLE 0 /* core dump is permitted */
354#define MMF_DUMP_SECURELY 1 /* core file is readable only by root */
355#define MMF_DUMPABLE_BITS 2
356
357/* coredump filter bits */
358#define MMF_DUMP_ANON_PRIVATE 2
359#define MMF_DUMP_ANON_SHARED 3
360#define MMF_DUMP_MAPPED_PRIVATE 4
361#define MMF_DUMP_MAPPED_SHARED 5
362#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
363#define MMF_DUMP_FILTER_BITS 4
364#define MMF_DUMP_FILTER_MASK \
365 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
366#define MMF_DUMP_FILTER_DEFAULT \
367 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED))
368
348struct mm_struct { 369struct mm_struct {
349 struct vm_area_struct * mmap; /* list of VMAs */ 370 struct vm_area_struct * mmap; /* list of VMAs */
350 struct rb_root mm_rb; 371 struct rb_root mm_rb;
@@ -402,7 +423,7 @@ struct mm_struct {
402 unsigned int token_priority; 423 unsigned int token_priority;
403 unsigned int last_interval; 424 unsigned int last_interval;
404 425
405 unsigned char dumpable:2; 426 unsigned long flags; /* Must use atomic bitops to access the bits */
406 427
407 /* coredumping support */ 428 /* coredumping support */
408 int core_waiters; 429 int core_waiters;
@@ -1327,6 +1348,13 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1327#endif 1348#endif
1328 1349
1329extern unsigned long long sched_clock(void); 1350extern unsigned long long sched_clock(void);
1351
1352/*
1353 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
1354 * clock constructed from sched_clock():
1355 */
1356extern unsigned long long cpu_clock(int cpu);
1357
1330extern unsigned long long 1358extern unsigned long long
1331task_sched_runtime(struct task_struct *task); 1359task_sched_runtime(struct task_struct *task);
1332 1360
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 210549ba4ef4..f6a3a951b79e 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -9,14 +9,14 @@
9 * Released under the General Public License (GPL). 9 * Released under the General Public License (GPL).
10 */ 10 */
11 11
12#include <linux/lockdep.h>
13
14#if defined(CONFIG_SMP) 12#if defined(CONFIG_SMP)
15# include <asm/spinlock_types.h> 13# include <asm/spinlock_types.h>
16#else 14#else
17# include <linux/spinlock_types_up.h> 15# include <linux/spinlock_types_up.h>
18#endif 16#endif
19 17
18#include <linux/lockdep.h>
19
20typedef struct { 20typedef struct {
21 raw_spinlock_t raw_lock; 21 raw_spinlock_t raw_lock;
22#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) 22#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP)
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h
index 27644af20b7c..04135b0e198e 100644
--- a/include/linux/spinlock_types_up.h
+++ b/include/linux/spinlock_types_up.h
@@ -12,14 +12,10 @@
12 * Released under the General Public License (GPL). 12 * Released under the General Public License (GPL).
13 */ 13 */
14 14
15#if defined(CONFIG_DEBUG_SPINLOCK) || \ 15#ifdef CONFIG_DEBUG_SPINLOCK
16 defined(CONFIG_DEBUG_LOCK_ALLOC)
17 16
18typedef struct { 17typedef struct {
19 volatile unsigned int slock; 18 volatile unsigned int slock;
20#ifdef CONFIG_DEBUG_LOCK_ALLOC
21 struct lockdep_map dep_map;
22#endif
23} raw_spinlock_t; 19} raw_spinlock_t;
24 20
25#define __RAW_SPIN_LOCK_UNLOCKED { 1 } 21#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
@@ -34,9 +30,6 @@ typedef struct { } raw_spinlock_t;
34 30
35typedef struct { 31typedef struct {
36 /* no debug version on UP */ 32 /* no debug version on UP */
37#ifdef CONFIG_DEBUG_LOCK_ALLOC
38 struct lockdep_map dep_map;
39#endif
40} raw_rwlock_t; 33} raw_rwlock_t;
41 34
42#define __RAW_RW_LOCK_UNLOCKED { } 35#define __RAW_RW_LOCK_UNLOCKED { }
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 1d2b084c0185..e7fa657d0c49 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -13,7 +13,7 @@ extern void save_stack_trace(struct stack_trace *trace);
13extern void print_stack_trace(struct stack_trace *trace, int spaces); 13extern void print_stack_trace(struct stack_trace *trace, int spaces);
14#else 14#else
15# define save_stack_trace(trace) do { } while (0) 15# define save_stack_trace(trace) do { } while (0)
16# define print_stack_trace(trace) do { } while (0) 16# define print_stack_trace(trace, spaces) do { } while (0)
17#endif 17#endif
18 18
19#endif 19#endif
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 9c7cb6430666..e8e6da394c92 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -43,14 +43,19 @@ static inline void pm_restore_console(void) {}
43 * @prepare: prepare system for hibernation 43 * @prepare: prepare system for hibernation
44 * @enter: shut down system after state has been saved to disk 44 * @enter: shut down system after state has been saved to disk
45 * @finish: finish/clean up after state has been reloaded 45 * @finish: finish/clean up after state has been reloaded
46 * @pre_restore: prepare system for the restoration from a hibernation image
47 * @restore_cleanup: clean up after a failing image restoration
46 */ 48 */
47struct hibernation_ops { 49struct hibernation_ops {
48 int (*prepare)(void); 50 int (*prepare)(void);
49 int (*enter)(void); 51 int (*enter)(void);
50 void (*finish)(void); 52 void (*finish)(void);
53 int (*pre_restore)(void);
54 void (*restore_cleanup)(void);
51}; 55};
52 56
53#if defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND) 57#ifdef CONFIG_PM
58#ifdef CONFIG_SOFTWARE_SUSPEND
54/* kernel/power/snapshot.c */ 59/* kernel/power/snapshot.c */
55extern void __register_nosave_region(unsigned long b, unsigned long e, int km); 60extern void __register_nosave_region(unsigned long b, unsigned long e, int km);
56static inline void register_nosave_region(unsigned long b, unsigned long e) 61static inline void register_nosave_region(unsigned long b, unsigned long e)
@@ -68,16 +73,14 @@ extern unsigned long get_safe_page(gfp_t gfp_mask);
68 73
69extern void hibernation_set_ops(struct hibernation_ops *ops); 74extern void hibernation_set_ops(struct hibernation_ops *ops);
70extern int hibernate(void); 75extern int hibernate(void);
71#else 76#else /* CONFIG_SOFTWARE_SUSPEND */
72static inline void register_nosave_region(unsigned long b, unsigned long e) {}
73static inline void register_nosave_region_late(unsigned long b, unsigned long e) {}
74static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } 77static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
75static inline void swsusp_set_page_free(struct page *p) {} 78static inline void swsusp_set_page_free(struct page *p) {}
76static inline void swsusp_unset_page_free(struct page *p) {} 79static inline void swsusp_unset_page_free(struct page *p) {}
77 80
78static inline void hibernation_set_ops(struct hibernation_ops *ops) {} 81static inline void hibernation_set_ops(struct hibernation_ops *ops) {}
79static inline int hibernate(void) { return -ENOSYS; } 82static inline int hibernate(void) { return -ENOSYS; }
80#endif /* defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND) */ 83#endif /* CONFIG_SOFTWARE_SUSPEND */
81 84
82void save_processor_state(void); 85void save_processor_state(void);
83void restore_processor_state(void); 86void restore_processor_state(void);
@@ -85,4 +88,43 @@ struct saved_context;
85void __save_processor_state(struct saved_context *ctxt); 88void __save_processor_state(struct saved_context *ctxt);
86void __restore_processor_state(struct saved_context *ctxt); 89void __restore_processor_state(struct saved_context *ctxt);
87 90
91/* kernel/power/main.c */
92extern struct blocking_notifier_head pm_chain_head;
93
94static inline int register_pm_notifier(struct notifier_block *nb)
95{
96 return blocking_notifier_chain_register(&pm_chain_head, nb);
97}
98
99static inline int unregister_pm_notifier(struct notifier_block *nb)
100{
101 return blocking_notifier_chain_unregister(&pm_chain_head, nb);
102}
103
104#define pm_notifier(fn, pri) { \
105 static struct notifier_block fn##_nb = \
106 { .notifier_call = fn, .priority = pri }; \
107 register_pm_notifier(&fn##_nb); \
108}
109#else /* CONFIG_PM */
110
111static inline int register_pm_notifier(struct notifier_block *nb)
112{
113 return 0;
114}
115
116static inline int unregister_pm_notifier(struct notifier_block *nb)
117{
118 return 0;
119}
120
121#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
122#endif /* CONFIG_PM */
123
124#if !defined CONFIG_SOFTWARE_SUSPEND || !defined(CONFIG_PM)
125static inline void register_nosave_region(unsigned long b, unsigned long e)
126{
127}
128#endif
129
88#endif /* _LINUX_SWSUSP_H */ 130#endif /* _LINUX_SWSUSP_H */
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index bb320573bb9e..1101b0ce878f 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -49,7 +49,7 @@ static inline struct user_namespace *copy_user_ns(int flags,
49 if (flags & CLONE_NEWUSER) 49 if (flags & CLONE_NEWUSER)
50 return ERR_PTR(-EINVAL); 50 return ERR_PTR(-EINVAL);
51 51
52 return NULL; 52 return old_ns;
53} 53}
54 54
55static inline void put_user_ns(struct user_namespace *ns) 55static inline void put_user_ns(struct user_namespace *ns)