aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/aio.h12
-rw-r--r--include/linux/bit_spinlock.h26
-rw-r--r--include/linux/capability.h4
-rw-r--r--include/linux/clocksource.h5
-rw-r--r--include/linux/compiler-gcc.h12
-rw-r--r--include/linux/compiler.h14
-rw-r--r--include/linux/console.h7
-rw-r--r--include/linux/cpu.h5
-rw-r--r--include/linux/cyclades.h27
-rw-r--r--include/linux/dma-mapping.h30
-rw-r--r--include/linux/ext3_fs.h4
-rw-r--r--include/linux/ext4_fs.h103
-rw-r--r--include/linux/ext4_fs_extents.h4
-rw-r--r--include/linux/ext4_fs_i.h5
-rw-r--r--include/linux/ext4_fs_sb.h3
-rw-r--r--include/linux/ext4_jbd2.h6
-rw-r--r--include/linux/filter.h1
-rw-r--r--include/linux/freezer.h38
-rw-r--r--include/linux/fs.h1
-rw-r--r--include/linux/fuse.h65
-rw-r--r--include/linux/hrtimer.h2
-rw-r--r--include/linux/ide.h58
-rw-r--r--include/linux/ipmi.h10
-rw-r--r--include/linux/ipmi_smi.h36
-rw-r--r--include/linux/jbd.h17
-rw-r--r--include/linux/jbd2.h49
-rw-r--r--include/linux/kernel_stat.h2
-rw-r--r--include/linux/libata.h12
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/netfilter/xt_sctp.h13
-rw-r--r--include/linux/of_platform.h4
-rw-r--r--include/linux/parport.h1
-rw-r--r--include/linux/pm.h98
-rw-r--r--include/linux/poison.h3
-rw-r--r--include/linux/sched.h51
-rw-r--r--include/linux/security.h7
-rw-r--r--include/linux/suspend.h169
-rw-r--r--include/linux/sysctl.h58
-rw-r--r--include/linux/taskstats.h7
39 files changed, 563 insertions, 408 deletions
diff --git a/include/linux/aio.h b/include/linux/aio.h
index d10e608f232d..7ef8de662001 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -232,18 +232,6 @@ int FASTCALL(io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
232 __put_ioctx(kioctx); \ 232 __put_ioctx(kioctx); \
233} while (0) 233} while (0)
234 234
235#define in_aio() (unlikely(!is_sync_wait(current->io_wait)))
236
237/* may be used for debugging */
238#define warn_if_async() \
239do { \
240 if (in_aio()) { \
241 printk(KERN_ERR "%s(%s:%d) called in async context!\n", \
242 __FUNCTION__, __FILE__, __LINE__); \
243 dump_stack(); \
244 } \
245} while (0)
246
247#define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait) 235#define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait)
248 236
249#include <linux/aio_abi.h> 237#include <linux/aio_abi.h>
diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h
index 6b20af0bbb79..7113a32a86ea 100644
--- a/include/linux/bit_spinlock.h
+++ b/include/linux/bit_spinlock.h
@@ -18,7 +18,7 @@ static inline void bit_spin_lock(int bitnum, unsigned long *addr)
18 */ 18 */
19 preempt_disable(); 19 preempt_disable();
20#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 20#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
21 while (test_and_set_bit(bitnum, addr)) { 21 while (unlikely(test_and_set_bit_lock(bitnum, addr))) {
22 while (test_bit(bitnum, addr)) { 22 while (test_bit(bitnum, addr)) {
23 preempt_enable(); 23 preempt_enable();
24 cpu_relax(); 24 cpu_relax();
@@ -36,7 +36,7 @@ static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
36{ 36{
37 preempt_disable(); 37 preempt_disable();
38#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 38#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
39 if (test_and_set_bit(bitnum, addr)) { 39 if (unlikely(test_and_set_bit_lock(bitnum, addr))) {
40 preempt_enable(); 40 preempt_enable();
41 return 0; 41 return 0;
42 } 42 }
@@ -50,10 +50,28 @@ static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
50 */ 50 */
51static inline void bit_spin_unlock(int bitnum, unsigned long *addr) 51static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
52{ 52{
53#ifdef CONFIG_DEBUG_SPINLOCK
54 BUG_ON(!test_bit(bitnum, addr));
55#endif
53#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 56#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
57 clear_bit_unlock(bitnum, addr);
58#endif
59 preempt_enable();
60 __release(bitlock);
61}
62
63/*
64 * bit-based spin_unlock()
65 * non-atomic version, which can be used eg. if the bit lock itself is
66 * protecting the rest of the flags in the word.
67 */
68static inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
69{
70#ifdef CONFIG_DEBUG_SPINLOCK
54 BUG_ON(!test_bit(bitnum, addr)); 71 BUG_ON(!test_bit(bitnum, addr));
55 smp_mb__before_clear_bit(); 72#endif
56 clear_bit(bitnum, addr); 73#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
74 __clear_bit_unlock(bitnum, addr);
57#endif 75#endif
58 preempt_enable(); 76 preempt_enable();
59 __release(bitlock); 77 __release(bitlock);
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 8961e7fb755c..7a8d7ade28a0 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -310,10 +310,6 @@ typedef __u32 kernel_cap_t;
310#define CAP_SETFCAP 31 310#define CAP_SETFCAP 31
311 311
312#ifdef __KERNEL__ 312#ifdef __KERNEL__
313/*
314 * Bounding set
315 */
316extern kernel_cap_t cap_bset;
317 313
318/* 314/*
319 * Internal kernel functions only 315 * Internal kernel functions only
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 16ea3374dddf..107787aacb64 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -221,10 +221,15 @@ extern void clocksource_resume(void);
221 221
222#ifdef CONFIG_GENERIC_TIME_VSYSCALL 222#ifdef CONFIG_GENERIC_TIME_VSYSCALL
223extern void update_vsyscall(struct timespec *ts, struct clocksource *c); 223extern void update_vsyscall(struct timespec *ts, struct clocksource *c);
224extern void update_vsyscall_tz(void);
224#else 225#else
225static inline void update_vsyscall(struct timespec *ts, struct clocksource *c) 226static inline void update_vsyscall(struct timespec *ts, struct clocksource *c)
226{ 227{
227} 228}
229
230static inline void update_vsyscall_tz(void)
231{
232}
228#endif 233#endif
229 234
230#endif /* _LINUX_CLOCKSOURCE_H */ 235#endif /* _LINUX_CLOCKSOURCE_H */
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index acd583384bd9..fe23792f05c1 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -36,10 +36,20 @@
36#define __weak __attribute__((weak)) 36#define __weak __attribute__((weak))
37#define __naked __attribute__((naked)) 37#define __naked __attribute__((naked))
38#define __noreturn __attribute__((noreturn)) 38#define __noreturn __attribute__((noreturn))
39
40/*
41 * From the GCC manual:
42 *
43 * Many functions have no effects except the return value and their
44 * return value depends only on the parameters and/or global
45 * variables. Such a function can be subject to common subexpression
46 * elimination and loop optimization just as an arithmetic operator
47 * would be.
48 * [...]
49 */
39#define __pure __attribute__((pure)) 50#define __pure __attribute__((pure))
40#define __aligned(x) __attribute__((aligned(x))) 51#define __aligned(x) __attribute__((aligned(x)))
41#define __printf(a,b) __attribute__((format(printf,a,b))) 52#define __printf(a,b) __attribute__((format(printf,a,b)))
42#define noinline __attribute__((noinline)) 53#define noinline __attribute__((noinline))
43#define __attribute_pure__ __attribute__((pure))
44#define __attribute_const__ __attribute__((__const__)) 54#define __attribute_const__ __attribute__((__const__))
45#define __maybe_unused __attribute__((unused)) 55#define __maybe_unused __attribute__((unused))
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 86f9a3a6137d..c811c8b979ac 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -132,20 +132,6 @@ extern void __chk_io_ptr(const volatile void __iomem *);
132# define __maybe_unused /* unimplemented */ 132# define __maybe_unused /* unimplemented */
133#endif 133#endif
134 134
135/*
136 * From the GCC manual:
137 *
138 * Many functions have no effects except the return value and their
139 * return value depends only on the parameters and/or global
140 * variables. Such a function can be subject to common subexpression
141 * elimination and loop optimization just as an arithmetic operator
142 * would be.
143 * [...]
144 */
145#ifndef __attribute_pure__
146# define __attribute_pure__ /* unimplemented */
147#endif
148
149#ifndef noinline 135#ifndef noinline
150#define noinline 136#define noinline
151#endif 137#endif
diff --git a/include/linux/console.h b/include/linux/console.h
index 0a4542ddb73d..a5f88a6a259d 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -122,14 +122,11 @@ extern void console_stop(struct console *);
122extern void console_start(struct console *); 122extern void console_start(struct console *);
123extern int is_console_locked(void); 123extern int is_console_locked(void);
124 124
125#ifndef CONFIG_DISABLE_CONSOLE_SUSPEND 125extern int console_suspend_enabled;
126
126/* Suspend and resume console messages over PM events */ 127/* Suspend and resume console messages over PM events */
127extern void suspend_console(void); 128extern void suspend_console(void);
128extern void resume_console(void); 129extern void resume_console(void);
129#else
130static inline void suspend_console(void) {}
131static inline void resume_console(void) {}
132#endif /* CONFIG_DISABLE_CONSOLE_SUSPEND */
133 130
134int mda_console_init(void); 131int mda_console_init(void);
135void prom_con_init(void); 132void prom_con_init(void);
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 0ad72c4cf312..b79c57569367 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -119,8 +119,9 @@ static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex)
119#define lock_cpu_hotplug() do { } while (0) 119#define lock_cpu_hotplug() do { } while (0)
120#define unlock_cpu_hotplug() do { } while (0) 120#define unlock_cpu_hotplug() do { } while (0)
121#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) 121#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
122#define register_hotcpu_notifier(nb) do { (void)(nb); } while (0) 122/* These aren't inline functions due to a GCC bug. */
123#define unregister_hotcpu_notifier(nb) do { (void)(nb); } while (0) 123#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
124#define unregister_hotcpu_notifier(nb) ({ (void)(nb); })
124 125
125/* CPUs don't go offline once they're online w/o CONFIG_HOTPLUG_CPU */ 126/* CPUs don't go offline once they're online w/o CONFIG_HOTPLUG_CPU */
126static inline int cpu_is_offline(int cpu) { return 0; } 127static inline int cpu_is_offline(int cpu) { return 0; }
diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h
index 72aa00cc4b2d..8f3dcd30828f 100644
--- a/include/linux/cyclades.h
+++ b/include/linux/cyclades.h
@@ -512,11 +512,11 @@ struct cyclades_card {
512 void __iomem *base_addr; 512 void __iomem *base_addr;
513 void __iomem *ctl_addr; 513 void __iomem *ctl_addr;
514 int irq; 514 int irq;
515 int num_chips; /* 0 if card absent, -1 if Z/PCI, else Y */ 515 unsigned int num_chips; /* 0 if card absent, -1 if Z/PCI, else Y */
516 int first_line; /* minor number of first channel on card */ 516 unsigned int first_line; /* minor number of first channel on card */
517 int nports; /* Number of ports in the card */ 517 unsigned int nports; /* Number of ports in the card */
518 int bus_index; /* address shift - 0 for ISA, 1 for PCI */ 518 int bus_index; /* address shift - 0 for ISA, 1 for PCI */
519 int intr_enabled; /* FW Interrupt flag - 0 disabled, 1 enabled */ 519 int intr_enabled; /* FW Interrupt flag - 0 disabled, 1 enabled */
520 spinlock_t card_lock; 520 spinlock_t card_lock;
521 struct cyclades_port *ports; 521 struct cyclades_port *ports;
522}; 522};
@@ -566,10 +566,9 @@ struct cyclades_port {
566 int rtsdtr_inv; 566 int rtsdtr_inv;
567 int chip_rev; 567 int chip_rev;
568 int custom_divisor; 568 int custom_divisor;
569 int x_char; /* to be pushed out ASAP */ 569 u8 x_char; /* to be pushed out ASAP */
570 int close_delay; 570 int close_delay;
571 unsigned short closing_wait; 571 unsigned short closing_wait;
572 unsigned long event;
573 int count; /* # of fd on device */ 572 int count; /* # of fd on device */
574 int breakon; 573 int breakon;
575 int breakoff; 574 int breakoff;
@@ -584,7 +583,6 @@ struct cyclades_port {
584 struct cyclades_monitor mon; 583 struct cyclades_monitor mon;
585 struct cyclades_idle_stats idle_stats; 584 struct cyclades_idle_stats idle_stats;
586 struct cyclades_icount icount; 585 struct cyclades_icount icount;
587 struct work_struct tqueue;
588 wait_queue_head_t open_wait; 586 wait_queue_head_t open_wait;
589 wait_queue_head_t close_wait; 587 wait_queue_head_t close_wait;
590 struct completion shutdown_wait; 588 struct completion shutdown_wait;
@@ -592,19 +590,6 @@ struct cyclades_port {
592 int throttle; 590 int throttle;
593}; 591};
594 592
595/*
596 * Events are used to schedule things to happen at timer-interrupt
597 * time, instead of at cy interrupt time.
598 */
599#define Cy_EVENT_READ_PROCESS 0
600#define Cy_EVENT_WRITE_WAKEUP 1
601#define Cy_EVENT_HANGUP 2
602#define Cy_EVENT_BREAK 3
603#define Cy_EVENT_OPEN_WAKEUP 4
604#define Cy_EVENT_SHUTDOWN_WAKEUP 5
605#define Cy_EVENT_DELTA_WAKEUP 6
606#define Cy_EVENT_Z_RX_FULL 7
607
608#define CLOSING_WAIT_DELAY 30*HZ 593#define CLOSING_WAIT_DELAY 30*HZ
609#define CY_CLOSING_WAIT_NONE 65535 594#define CY_CLOSING_WAIT_NONE 65535
610#define CY_CLOSING_WAIT_INF 0 595#define CY_CLOSING_WAIT_INF 0
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 0ebfafbd338c..101a2d4636be 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -13,16 +13,26 @@ enum dma_data_direction {
13 DMA_NONE = 3, 13 DMA_NONE = 3,
14}; 14};
15 15
16#define DMA_64BIT_MASK 0xffffffffffffffffULL 16#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
17#define DMA_48BIT_MASK 0x0000ffffffffffffULL 17
18#define DMA_40BIT_MASK 0x000000ffffffffffULL 18/*
19#define DMA_39BIT_MASK 0x0000007fffffffffULL 19 * NOTE: do not use the below macros in new code and do not add new definitions
20#define DMA_32BIT_MASK 0x00000000ffffffffULL 20 * here.
21#define DMA_31BIT_MASK 0x000000007fffffffULL 21 *
22#define DMA_30BIT_MASK 0x000000003fffffffULL 22 * Instead, just open-code DMA_BIT_MASK(n) within your driver
23#define DMA_29BIT_MASK 0x000000001fffffffULL 23 */
24#define DMA_28BIT_MASK 0x000000000fffffffULL 24#define DMA_64BIT_MASK DMA_BIT_MASK(64)
25#define DMA_24BIT_MASK 0x0000000000ffffffULL 25#define DMA_48BIT_MASK DMA_BIT_MASK(48)
26#define DMA_47BIT_MASK DMA_BIT_MASK(47)
27#define DMA_40BIT_MASK DMA_BIT_MASK(40)
28#define DMA_39BIT_MASK DMA_BIT_MASK(39)
29#define DMA_35BIT_MASK DMA_BIT_MASK(35)
30#define DMA_32BIT_MASK DMA_BIT_MASK(32)
31#define DMA_31BIT_MASK DMA_BIT_MASK(31)
32#define DMA_30BIT_MASK DMA_BIT_MASK(30)
33#define DMA_29BIT_MASK DMA_BIT_MASK(29)
34#define DMA_28BIT_MASK DMA_BIT_MASK(28)
35#define DMA_24BIT_MASK DMA_BIT_MASK(24)
26 36
27#define DMA_MASK_NONE 0x0ULL 37#define DMA_MASK_NONE 0x0ULL
28 38
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index 589b0b355d84..64134456ed8c 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -72,8 +72,8 @@
72 * Macro-instructions used to manage several block sizes 72 * Macro-instructions used to manage several block sizes
73 */ 73 */
74#define EXT3_MIN_BLOCK_SIZE 1024 74#define EXT3_MIN_BLOCK_SIZE 1024
75#define EXT3_MAX_BLOCK_SIZE 4096 75#define EXT3_MAX_BLOCK_SIZE 65536
76#define EXT3_MIN_BLOCK_LOG_SIZE 10 76#define EXT3_MIN_BLOCK_LOG_SIZE 10
77#ifdef __KERNEL__ 77#ifdef __KERNEL__
78# define EXT3_BLOCK_SIZE(s) ((s)->s_blocksize) 78# define EXT3_BLOCK_SIZE(s) ((s)->s_blocksize)
79#else 79#else
diff --git a/include/linux/ext4_fs.h b/include/linux/ext4_fs.h
index cdee7aaa57aa..97dd409d5f4a 100644
--- a/include/linux/ext4_fs.h
+++ b/include/linux/ext4_fs.h
@@ -36,10 +36,6 @@
36/*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */ 36/*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */
37#define EXT4_MAX_RESERVE_BLOCKS 1027 37#define EXT4_MAX_RESERVE_BLOCKS 1027
38#define EXT4_RESERVE_WINDOW_NOT_ALLOCATED 0 38#define EXT4_RESERVE_WINDOW_NOT_ALLOCATED 0
39/*
40 * Always enable hashed directories
41 */
42#define CONFIG_EXT4_INDEX
43 39
44/* 40/*
45 * Debug code 41 * Debug code
@@ -105,37 +101,29 @@
105#define EXT4_BLOCK_ALIGN(size, blkbits) ALIGN((size), (1 << (blkbits))) 101#define EXT4_BLOCK_ALIGN(size, blkbits) ALIGN((size), (1 << (blkbits)))
106 102
107/* 103/*
108 * Macro-instructions used to manage fragments
109 */
110#define EXT4_MIN_FRAG_SIZE 1024
111#define EXT4_MAX_FRAG_SIZE 4096
112#define EXT4_MIN_FRAG_LOG_SIZE 10
113#ifdef __KERNEL__
114# define EXT4_FRAG_SIZE(s) (EXT4_SB(s)->s_frag_size)
115# define EXT4_FRAGS_PER_BLOCK(s) (EXT4_SB(s)->s_frags_per_block)
116#else
117# define EXT4_FRAG_SIZE(s) (EXT4_MIN_FRAG_SIZE << (s)->s_log_frag_size)
118# define EXT4_FRAGS_PER_BLOCK(s) (EXT4_BLOCK_SIZE(s) / EXT4_FRAG_SIZE(s))
119#endif
120
121/*
122 * Structure of a blocks group descriptor 104 * Structure of a blocks group descriptor
123 */ 105 */
124struct ext4_group_desc 106struct ext4_group_desc
125{ 107{
126 __le32 bg_block_bitmap; /* Blocks bitmap block */ 108 __le32 bg_block_bitmap_lo; /* Blocks bitmap block */
127 __le32 bg_inode_bitmap; /* Inodes bitmap block */ 109 __le32 bg_inode_bitmap_lo; /* Inodes bitmap block */
128 __le32 bg_inode_table; /* Inodes table block */ 110 __le32 bg_inode_table_lo; /* Inodes table block */
129 __le16 bg_free_blocks_count; /* Free blocks count */ 111 __le16 bg_free_blocks_count; /* Free blocks count */
130 __le16 bg_free_inodes_count; /* Free inodes count */ 112 __le16 bg_free_inodes_count; /* Free inodes count */
131 __le16 bg_used_dirs_count; /* Directories count */ 113 __le16 bg_used_dirs_count; /* Directories count */
132 __u16 bg_flags; 114 __le16 bg_flags; /* EXT4_BG_flags (INODE_UNINIT, etc) */
133 __u32 bg_reserved[3]; 115 __u32 bg_reserved[2]; /* Likely block/inode bitmap checksum */
116 __le16 bg_itable_unused; /* Unused inodes count */
117 __le16 bg_checksum; /* crc16(sb_uuid+group+desc) */
134 __le32 bg_block_bitmap_hi; /* Blocks bitmap block MSB */ 118 __le32 bg_block_bitmap_hi; /* Blocks bitmap block MSB */
135 __le32 bg_inode_bitmap_hi; /* Inodes bitmap block MSB */ 119 __le32 bg_inode_bitmap_hi; /* Inodes bitmap block MSB */
136 __le32 bg_inode_table_hi; /* Inodes table block MSB */ 120 __le32 bg_inode_table_hi; /* Inodes table block MSB */
137}; 121};
138 122
123#define EXT4_BG_INODE_UNINIT 0x0001 /* Inode table/bitmap not in use */
124#define EXT4_BG_BLOCK_UNINIT 0x0002 /* Block bitmap not in use */
125#define EXT4_BG_INODE_ZEROED 0x0004 /* On-disk itable initialized to zero */
126
139#ifdef __KERNEL__ 127#ifdef __KERNEL__
140#include <linux/ext4_fs_i.h> 128#include <linux/ext4_fs_i.h>
141#include <linux/ext4_fs_sb.h> 129#include <linux/ext4_fs_sb.h>
@@ -311,27 +299,24 @@ struct ext4_inode {
311 __le32 i_generation; /* File version (for NFS) */ 299 __le32 i_generation; /* File version (for NFS) */
312 __le32 i_file_acl; /* File ACL */ 300 __le32 i_file_acl; /* File ACL */
313 __le32 i_dir_acl; /* Directory ACL */ 301 __le32 i_dir_acl; /* Directory ACL */
314 __le32 i_faddr; /* Fragment address */ 302 __le32 i_obso_faddr; /* Obsoleted fragment address */
315 union { 303 union {
316 struct { 304 struct {
317 __u8 l_i_frag; /* Fragment number */ 305 __le16 l_i_reserved1; /* Obsoleted fragment number/size which are removed in ext4 */
318 __u8 l_i_fsize; /* Fragment size */
319 __le16 l_i_file_acl_high; 306 __le16 l_i_file_acl_high;
320 __le16 l_i_uid_high; /* these 2 fields */ 307 __le16 l_i_uid_high; /* these 2 fields */
321 __le16 l_i_gid_high; /* were reserved2[0] */ 308 __le16 l_i_gid_high; /* were reserved2[0] */
322 __u32 l_i_reserved2; 309 __u32 l_i_reserved2;
323 } linux2; 310 } linux2;
324 struct { 311 struct {
325 __u8 h_i_frag; /* Fragment number */ 312 __le16 h_i_reserved1; /* Obsoleted fragment number/size which are removed in ext4 */
326 __u8 h_i_fsize; /* Fragment size */
327 __u16 h_i_mode_high; 313 __u16 h_i_mode_high;
328 __u16 h_i_uid_high; 314 __u16 h_i_uid_high;
329 __u16 h_i_gid_high; 315 __u16 h_i_gid_high;
330 __u32 h_i_author; 316 __u32 h_i_author;
331 } hurd2; 317 } hurd2;
332 struct { 318 struct {
333 __u8 m_i_frag; /* Fragment number */ 319 __le16 h_i_reserved1; /* Obsoleted fragment number/size which are removed in ext4 */
334 __u8 m_i_fsize; /* Fragment size */
335 __le16 m_i_file_acl_high; 320 __le16 m_i_file_acl_high;
336 __u32 m_i_reserved2[2]; 321 __u32 m_i_reserved2[2];
337 } masix2; 322 } masix2;
@@ -419,8 +404,6 @@ do { \
419 404
420#if defined(__KERNEL__) || defined(__linux__) 405#if defined(__KERNEL__) || defined(__linux__)
421#define i_reserved1 osd1.linux1.l_i_reserved1 406#define i_reserved1 osd1.linux1.l_i_reserved1
422#define i_frag osd2.linux2.l_i_frag
423#define i_fsize osd2.linux2.l_i_fsize
424#define i_file_acl_high osd2.linux2.l_i_file_acl_high 407#define i_file_acl_high osd2.linux2.l_i_file_acl_high
425#define i_uid_low i_uid 408#define i_uid_low i_uid
426#define i_gid_low i_gid 409#define i_gid_low i_gid
@@ -431,8 +414,6 @@ do { \
431#elif defined(__GNU__) 414#elif defined(__GNU__)
432 415
433#define i_translator osd1.hurd1.h_i_translator 416#define i_translator osd1.hurd1.h_i_translator
434#define i_frag osd2.hurd2.h_i_frag;
435#define i_fsize osd2.hurd2.h_i_fsize;
436#define i_uid_high osd2.hurd2.h_i_uid_high 417#define i_uid_high osd2.hurd2.h_i_uid_high
437#define i_gid_high osd2.hurd2.h_i_gid_high 418#define i_gid_high osd2.hurd2.h_i_gid_high
438#define i_author osd2.hurd2.h_i_author 419#define i_author osd2.hurd2.h_i_author
@@ -440,8 +421,6 @@ do { \
440#elif defined(__masix__) 421#elif defined(__masix__)
441 422
442#define i_reserved1 osd1.masix1.m_i_reserved1 423#define i_reserved1 osd1.masix1.m_i_reserved1
443#define i_frag osd2.masix2.m_i_frag
444#define i_fsize osd2.masix2.m_i_fsize
445#define i_file_acl_high osd2.masix2.m_i_file_acl_high 424#define i_file_acl_high osd2.masix2.m_i_file_acl_high
446#define i_reserved2 osd2.masix2.m_i_reserved2 425#define i_reserved2 osd2.masix2.m_i_reserved2
447 426
@@ -522,15 +501,15 @@ do { \
522 */ 501 */
523struct ext4_super_block { 502struct ext4_super_block {
524/*00*/ __le32 s_inodes_count; /* Inodes count */ 503/*00*/ __le32 s_inodes_count; /* Inodes count */
525 __le32 s_blocks_count; /* Blocks count */ 504 __le32 s_blocks_count_lo; /* Blocks count */
526 __le32 s_r_blocks_count; /* Reserved blocks count */ 505 __le32 s_r_blocks_count_lo; /* Reserved blocks count */
527 __le32 s_free_blocks_count; /* Free blocks count */ 506 __le32 s_free_blocks_count_lo; /* Free blocks count */
528/*10*/ __le32 s_free_inodes_count; /* Free inodes count */ 507/*10*/ __le32 s_free_inodes_count; /* Free inodes count */
529 __le32 s_first_data_block; /* First Data Block */ 508 __le32 s_first_data_block; /* First Data Block */
530 __le32 s_log_block_size; /* Block size */ 509 __le32 s_log_block_size; /* Block size */
531 __le32 s_log_frag_size; /* Fragment size */ 510 __le32 s_obso_log_frag_size; /* Obsoleted fragment size */
532/*20*/ __le32 s_blocks_per_group; /* # Blocks per group */ 511/*20*/ __le32 s_blocks_per_group; /* # Blocks per group */
533 __le32 s_frags_per_group; /* # Fragments per group */ 512 __le32 s_obso_frags_per_group; /* Obsoleted fragments per group */
534 __le32 s_inodes_per_group; /* # Inodes per group */ 513 __le32 s_inodes_per_group; /* # Inodes per group */
535 __le32 s_mtime; /* Mount time */ 514 __le32 s_mtime; /* Mount time */
536/*30*/ __le32 s_wtime; /* Write time */ 515/*30*/ __le32 s_wtime; /* Write time */
@@ -595,13 +574,13 @@ struct ext4_super_block {
595/*150*/ __le32 s_blocks_count_hi; /* Blocks count */ 574/*150*/ __le32 s_blocks_count_hi; /* Blocks count */
596 __le32 s_r_blocks_count_hi; /* Reserved blocks count */ 575 __le32 s_r_blocks_count_hi; /* Reserved blocks count */
597 __le32 s_free_blocks_count_hi; /* Free blocks count */ 576 __le32 s_free_blocks_count_hi; /* Free blocks count */
598 __u16 s_min_extra_isize; /* All inodes have at least # bytes */ 577 __le16 s_min_extra_isize; /* All inodes have at least # bytes */
599 __u16 s_want_extra_isize; /* New inodes should reserve # bytes */ 578 __le16 s_want_extra_isize; /* New inodes should reserve # bytes */
600 __u32 s_flags; /* Miscellaneous flags */ 579 __le32 s_flags; /* Miscellaneous flags */
601 __u16 s_raid_stride; /* RAID stride */ 580 __le16 s_raid_stride; /* RAID stride */
602 __u16 s_mmp_interval; /* # seconds to wait in MMP checking */ 581 __le16 s_mmp_interval; /* # seconds to wait in MMP checking */
603 __u64 s_mmp_block; /* Block for multi-mount protection */ 582 __le64 s_mmp_block; /* Block for multi-mount protection */
604 __u32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/ 583 __le32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/
605 __u32 s_reserved[163]; /* Padding to the end of the block */ 584 __u32 s_reserved[163]; /* Padding to the end of the block */
606}; 585};
607 586
@@ -692,6 +671,7 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
692#define EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001 671#define EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001
693#define EXT4_FEATURE_RO_COMPAT_LARGE_FILE 0x0002 672#define EXT4_FEATURE_RO_COMPAT_LARGE_FILE 0x0002
694#define EXT4_FEATURE_RO_COMPAT_BTREE_DIR 0x0004 673#define EXT4_FEATURE_RO_COMPAT_BTREE_DIR 0x0004
674#define EXT4_FEATURE_RO_COMPAT_GDT_CSUM 0x0010
695#define EXT4_FEATURE_RO_COMPAT_DIR_NLINK 0x0020 675#define EXT4_FEATURE_RO_COMPAT_DIR_NLINK 0x0020
696#define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE 0x0040 676#define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE 0x0040
697 677
@@ -702,15 +682,18 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
702#define EXT4_FEATURE_INCOMPAT_META_BG 0x0010 682#define EXT4_FEATURE_INCOMPAT_META_BG 0x0010
703#define EXT4_FEATURE_INCOMPAT_EXTENTS 0x0040 /* extents support */ 683#define EXT4_FEATURE_INCOMPAT_EXTENTS 0x0040 /* extents support */
704#define EXT4_FEATURE_INCOMPAT_64BIT 0x0080 684#define EXT4_FEATURE_INCOMPAT_64BIT 0x0080
685#define EXT4_FEATURE_INCOMPAT_FLEX_BG 0x0200
705 686
706#define EXT4_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR 687#define EXT4_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR
707#define EXT4_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \ 688#define EXT4_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \
708 EXT4_FEATURE_INCOMPAT_RECOVER| \ 689 EXT4_FEATURE_INCOMPAT_RECOVER| \
709 EXT4_FEATURE_INCOMPAT_META_BG| \ 690 EXT4_FEATURE_INCOMPAT_META_BG| \
710 EXT4_FEATURE_INCOMPAT_EXTENTS| \ 691 EXT4_FEATURE_INCOMPAT_EXTENTS| \
711 EXT4_FEATURE_INCOMPAT_64BIT) 692 EXT4_FEATURE_INCOMPAT_64BIT| \
693 EXT4_FEATURE_INCOMPAT_FLEX_BG)
712#define EXT4_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \ 694#define EXT4_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
713 EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \ 695 EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
696 EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \
714 EXT4_FEATURE_RO_COMPAT_DIR_NLINK | \ 697 EXT4_FEATURE_RO_COMPAT_DIR_NLINK | \
715 EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE | \ 698 EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE | \
716 EXT4_FEATURE_RO_COMPAT_BTREE_DIR) 699 EXT4_FEATURE_RO_COMPAT_BTREE_DIR)
@@ -789,17 +772,11 @@ struct ext4_dir_entry_2 {
789 * (c) Daniel Phillips, 2001 772 * (c) Daniel Phillips, 2001
790 */ 773 */
791 774
792#ifdef CONFIG_EXT4_INDEX 775#define is_dx(dir) (EXT4_HAS_COMPAT_FEATURE(dir->i_sb, \
793 #define is_dx(dir) (EXT4_HAS_COMPAT_FEATURE(dir->i_sb, \ 776 EXT4_FEATURE_COMPAT_DIR_INDEX) && \
794 EXT4_FEATURE_COMPAT_DIR_INDEX) && \
795 (EXT4_I(dir)->i_flags & EXT4_INDEX_FL)) 777 (EXT4_I(dir)->i_flags & EXT4_INDEX_FL))
796#define EXT4_DIR_LINK_MAX(dir) (!is_dx(dir) && (dir)->i_nlink >= EXT4_LINK_MAX) 778#define EXT4_DIR_LINK_MAX(dir) (!is_dx(dir) && (dir)->i_nlink >= EXT4_LINK_MAX)
797#define EXT4_DIR_LINK_EMPTY(dir) ((dir)->i_nlink == 2 || (dir)->i_nlink == 1) 779#define EXT4_DIR_LINK_EMPTY(dir) ((dir)->i_nlink == 2 || (dir)->i_nlink == 1)
798#else
799 #define is_dx(dir) 0
800#define EXT4_DIR_LINK_MAX(dir) ((dir)->i_nlink >= EXT4_LINK_MAX)
801#define EXT4_DIR_LINK_EMPTY(dir) ((dir)->i_nlink == 2)
802#endif
803 780
804/* Legal values for the dx_root hash_version field: */ 781/* Legal values for the dx_root hash_version field: */
805 782
@@ -1004,39 +981,39 @@ extern void ext4_inode_table_set(struct super_block *sb,
1004static inline ext4_fsblk_t ext4_blocks_count(struct ext4_super_block *es) 981static inline ext4_fsblk_t ext4_blocks_count(struct ext4_super_block *es)
1005{ 982{
1006 return ((ext4_fsblk_t)le32_to_cpu(es->s_blocks_count_hi) << 32) | 983 return ((ext4_fsblk_t)le32_to_cpu(es->s_blocks_count_hi) << 32) |
1007 le32_to_cpu(es->s_blocks_count); 984 le32_to_cpu(es->s_blocks_count_lo);
1008} 985}
1009 986
1010static inline ext4_fsblk_t ext4_r_blocks_count(struct ext4_super_block *es) 987static inline ext4_fsblk_t ext4_r_blocks_count(struct ext4_super_block *es)
1011{ 988{
1012 return ((ext4_fsblk_t)le32_to_cpu(es->s_r_blocks_count_hi) << 32) | 989 return ((ext4_fsblk_t)le32_to_cpu(es->s_r_blocks_count_hi) << 32) |
1013 le32_to_cpu(es->s_r_blocks_count); 990 le32_to_cpu(es->s_r_blocks_count_lo);
1014} 991}
1015 992
1016static inline ext4_fsblk_t ext4_free_blocks_count(struct ext4_super_block *es) 993static inline ext4_fsblk_t ext4_free_blocks_count(struct ext4_super_block *es)
1017{ 994{
1018 return ((ext4_fsblk_t)le32_to_cpu(es->s_free_blocks_count_hi) << 32) | 995 return ((ext4_fsblk_t)le32_to_cpu(es->s_free_blocks_count_hi) << 32) |
1019 le32_to_cpu(es->s_free_blocks_count); 996 le32_to_cpu(es->s_free_blocks_count_lo);
1020} 997}
1021 998
1022static inline void ext4_blocks_count_set(struct ext4_super_block *es, 999static inline void ext4_blocks_count_set(struct ext4_super_block *es,
1023 ext4_fsblk_t blk) 1000 ext4_fsblk_t blk)
1024{ 1001{
1025 es->s_blocks_count = cpu_to_le32((u32)blk); 1002 es->s_blocks_count_lo = cpu_to_le32((u32)blk);
1026 es->s_blocks_count_hi = cpu_to_le32(blk >> 32); 1003 es->s_blocks_count_hi = cpu_to_le32(blk >> 32);
1027} 1004}
1028 1005
1029static inline void ext4_free_blocks_count_set(struct ext4_super_block *es, 1006static inline void ext4_free_blocks_count_set(struct ext4_super_block *es,
1030 ext4_fsblk_t blk) 1007 ext4_fsblk_t blk)
1031{ 1008{
1032 es->s_free_blocks_count = cpu_to_le32((u32)blk); 1009 es->s_free_blocks_count_lo = cpu_to_le32((u32)blk);
1033 es->s_free_blocks_count_hi = cpu_to_le32(blk >> 32); 1010 es->s_free_blocks_count_hi = cpu_to_le32(blk >> 32);
1034} 1011}
1035 1012
1036static inline void ext4_r_blocks_count_set(struct ext4_super_block *es, 1013static inline void ext4_r_blocks_count_set(struct ext4_super_block *es,
1037 ext4_fsblk_t blk) 1014 ext4_fsblk_t blk)
1038{ 1015{
1039 es->s_r_blocks_count = cpu_to_le32((u32)blk); 1016 es->s_r_blocks_count_lo = cpu_to_le32((u32)blk);
1040 es->s_r_blocks_count_hi = cpu_to_le32(blk >> 32); 1017 es->s_r_blocks_count_hi = cpu_to_le32(blk >> 32);
1041} 1018}
1042 1019
diff --git a/include/linux/ext4_fs_extents.h b/include/linux/ext4_fs_extents.h
index 81406f3655d4..d2045a26195d 100644
--- a/include/linux/ext4_fs_extents.h
+++ b/include/linux/ext4_fs_extents.h
@@ -74,7 +74,7 @@ struct ext4_extent {
74 __le32 ee_block; /* first logical block extent covers */ 74 __le32 ee_block; /* first logical block extent covers */
75 __le16 ee_len; /* number of blocks covered by extent */ 75 __le16 ee_len; /* number of blocks covered by extent */
76 __le16 ee_start_hi; /* high 16 bits of physical block */ 76 __le16 ee_start_hi; /* high 16 bits of physical block */
77 __le32 ee_start; /* low 32 bits of physical block */ 77 __le32 ee_start_lo; /* low 32 bits of physical block */
78}; 78};
79 79
80/* 80/*
@@ -83,7 +83,7 @@ struct ext4_extent {
83 */ 83 */
84struct ext4_extent_idx { 84struct ext4_extent_idx {
85 __le32 ei_block; /* index covers logical blocks from 'block' */ 85 __le32 ei_block; /* index covers logical blocks from 'block' */
86 __le32 ei_leaf; /* pointer to the physical block of the next * 86 __le32 ei_leaf_lo; /* pointer to the physical block of the next *
87 * level. leaf or next index could be there */ 87 * level. leaf or next index could be there */
88 __le16 ei_leaf_hi; /* high 16 bits of physical block */ 88 __le16 ei_leaf_hi; /* high 16 bits of physical block */
89 __u16 ei_unused; 89 __u16 ei_unused;
diff --git a/include/linux/ext4_fs_i.h b/include/linux/ext4_fs_i.h
index 1a511e9905aa..86ddfe2089f3 100644
--- a/include/linux/ext4_fs_i.h
+++ b/include/linux/ext4_fs_i.h
@@ -78,11 +78,6 @@ struct ext4_ext_cache {
78struct ext4_inode_info { 78struct ext4_inode_info {
79 __le32 i_data[15]; /* unconverted */ 79 __le32 i_data[15]; /* unconverted */
80 __u32 i_flags; 80 __u32 i_flags;
81#ifdef EXT4_FRAGMENTS
82 __u32 i_faddr;
83 __u8 i_frag_no;
84 __u8 i_frag_size;
85#endif
86 ext4_fsblk_t i_file_acl; 81 ext4_fsblk_t i_file_acl;
87 __u32 i_dir_acl; 82 __u32 i_dir_acl;
88 __u32 i_dtime; 83 __u32 i_dtime;
diff --git a/include/linux/ext4_fs_sb.h b/include/linux/ext4_fs_sb.h
index 0a8e47d47c91..b40e827cd495 100644
--- a/include/linux/ext4_fs_sb.h
+++ b/include/linux/ext4_fs_sb.h
@@ -28,11 +28,8 @@
28 * third extended-fs super-block data in memory 28 * third extended-fs super-block data in memory
29 */ 29 */
30struct ext4_sb_info { 30struct ext4_sb_info {
31 unsigned long s_frag_size; /* Size of a fragment in bytes */
32 unsigned long s_desc_size; /* Size of a group descriptor in bytes */ 31 unsigned long s_desc_size; /* Size of a group descriptor in bytes */
33 unsigned long s_frags_per_block;/* Number of fragments per block */
34 unsigned long s_inodes_per_block;/* Number of inodes per block */ 32 unsigned long s_inodes_per_block;/* Number of inodes per block */
35 unsigned long s_frags_per_group;/* Number of fragments in a group */
36 unsigned long s_blocks_per_group;/* Number of blocks in a group */ 33 unsigned long s_blocks_per_group;/* Number of blocks in a group */
37 unsigned long s_inodes_per_group;/* Number of inodes in a group */ 34 unsigned long s_inodes_per_group;/* Number of inodes in a group */
38 unsigned long s_itb_per_group; /* Number of inode table blocks per group */ 35 unsigned long s_itb_per_group; /* Number of inode table blocks per group */
diff --git a/include/linux/ext4_jbd2.h b/include/linux/ext4_jbd2.h
index d716e6392cf6..38c71d3c8dbf 100644
--- a/include/linux/ext4_jbd2.h
+++ b/include/linux/ext4_jbd2.h
@@ -12,8 +12,8 @@
12 * Ext4-specific journaling extensions. 12 * Ext4-specific journaling extensions.
13 */ 13 */
14 14
15#ifndef _LINUX_EXT4_JBD_H 15#ifndef _LINUX_EXT4_JBD2_H
16#define _LINUX_EXT4_JBD_H 16#define _LINUX_EXT4_JBD2_H
17 17
18#include <linux/fs.h> 18#include <linux/fs.h>
19#include <linux/jbd2.h> 19#include <linux/jbd2.h>
@@ -228,4 +228,4 @@ static inline int ext4_should_writeback_data(struct inode *inode)
228 return 0; 228 return 0;
229} 229}
230 230
231#endif /* _LINUX_EXT4_JBD_H */ 231#endif /* _LINUX_EXT4_JBD2_H */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 91b2e3b9251e..ddfa0372a3b7 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -146,6 +146,7 @@ struct sock;
146 146
147extern unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen); 147extern unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen);
148extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); 148extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
149extern int sk_detach_filter(struct sock *sk);
149extern int sk_chk_filter(struct sock_filter *filter, int flen); 150extern int sk_chk_filter(struct sock_filter *filter, int flen);
150#endif /* __KERNEL__ */ 151#endif /* __KERNEL__ */
151 152
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index efded00ad08c..08934995c7ab 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -4,6 +4,7 @@
4#define FREEZER_H_INCLUDED 4#define FREEZER_H_INCLUDED
5 5
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/wait.h>
7 8
8#ifdef CONFIG_PM_SLEEP 9#ifdef CONFIG_PM_SLEEP
9/* 10/*
@@ -126,6 +127,36 @@ static inline void set_freezable(void)
126 current->flags &= ~PF_NOFREEZE; 127 current->flags &= ~PF_NOFREEZE;
127} 128}
128 129
130/*
131 * Freezer-friendly wrappers around wait_event_interruptible() and
132 * wait_event_interruptible_timeout(), originally defined in <linux/wait.h>
133 */
134
135#define wait_event_freezable(wq, condition) \
136({ \
137 int __retval; \
138 do { \
139 __retval = wait_event_interruptible(wq, \
140 (condition) || freezing(current)); \
141 if (__retval && !freezing(current)) \
142 break; \
143 else if (!(condition)) \
144 __retval = -ERESTARTSYS; \
145 } while (try_to_freeze()); \
146 __retval; \
147})
148
149
150#define wait_event_freezable_timeout(wq, condition, timeout) \
151({ \
152 long __retval = timeout; \
153 do { \
154 __retval = wait_event_interruptible_timeout(wq, \
155 (condition) || freezing(current), \
156 __retval); \
157 } while (try_to_freeze()); \
158 __retval; \
159})
129#else /* !CONFIG_PM_SLEEP */ 160#else /* !CONFIG_PM_SLEEP */
130static inline int frozen(struct task_struct *p) { return 0; } 161static inline int frozen(struct task_struct *p) { return 0; }
131static inline int freezing(struct task_struct *p) { return 0; } 162static inline int freezing(struct task_struct *p) { return 0; }
@@ -143,6 +174,13 @@ static inline void freezer_do_not_count(void) {}
143static inline void freezer_count(void) {} 174static inline void freezer_count(void) {}
144static inline int freezer_should_skip(struct task_struct *p) { return 0; } 175static inline int freezer_should_skip(struct task_struct *p) { return 0; }
145static inline void set_freezable(void) {} 176static inline void set_freezable(void) {}
177
178#define wait_event_freezable(wq, condition) \
179 wait_event_interruptible(wq, condition)
180
181#define wait_event_freezable_timeout(wq, condition, timeout) \
182 wait_event_interruptible_timeout(wq, condition, timeout)
183
146#endif /* !CONFIG_PM_SLEEP */ 184#endif /* !CONFIG_PM_SLEEP */
147 185
148#endif /* FREEZER_H_INCLUDED */ 186#endif /* FREEZER_H_INCLUDED */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index e3fc5dbb2246..6a4d170ad9a5 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -330,6 +330,7 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
330#define ATTR_KILL_SGID 4096 330#define ATTR_KILL_SGID 4096
331#define ATTR_FILE 8192 331#define ATTR_FILE 8192
332#define ATTR_KILL_PRIV 16384 332#define ATTR_KILL_PRIV 16384
333#define ATTR_OPEN 32768 /* Truncating from open(O_TRUNC) */
333 334
334/* 335/*
335 * This is the Inode Attributes structure, used for notify_change(). It 336 * This is the Inode Attributes structure, used for notify_change(). It
diff --git a/include/linux/fuse.h b/include/linux/fuse.h
index 9fbe9d258e22..d0c437028c80 100644
--- a/include/linux/fuse.h
+++ b/include/linux/fuse.h
@@ -6,7 +6,17 @@
6 See the file COPYING. 6 See the file COPYING.
7*/ 7*/
8 8
9/* This file defines the kernel interface of FUSE */ 9/*
10 * This file defines the kernel interface of FUSE
11 *
12 * Protocol changelog:
13 *
14 * 7.9:
15 * - new fuse_getattr_in input argument of GETATTR
16 * - add lk_flags in fuse_lk_in
17 * - add lock_owner field to fuse_setattr_in, fuse_read_in and fuse_write_in
18 * - add blksize field to fuse_attr
19 */
10 20
11#include <asm/types.h> 21#include <asm/types.h>
12#include <linux/major.h> 22#include <linux/major.h>
@@ -15,7 +25,7 @@
15#define FUSE_KERNEL_VERSION 7 25#define FUSE_KERNEL_VERSION 7
16 26
17/** Minor version number of this interface */ 27/** Minor version number of this interface */
18#define FUSE_KERNEL_MINOR_VERSION 8 28#define FUSE_KERNEL_MINOR_VERSION 9
19 29
20/** The node ID of the root inode */ 30/** The node ID of the root inode */
21#define FUSE_ROOT_ID 1 31#define FUSE_ROOT_ID 1
@@ -44,6 +54,8 @@ struct fuse_attr {
44 __u32 uid; 54 __u32 uid;
45 __u32 gid; 55 __u32 gid;
46 __u32 rdev; 56 __u32 rdev;
57 __u32 blksize;
58 __u32 padding;
47}; 59};
48 60
49struct fuse_kstatfs { 61struct fuse_kstatfs {
@@ -76,6 +88,9 @@ struct fuse_file_lock {
76#define FATTR_ATIME (1 << 4) 88#define FATTR_ATIME (1 << 4)
77#define FATTR_MTIME (1 << 5) 89#define FATTR_MTIME (1 << 5)
78#define FATTR_FH (1 << 6) 90#define FATTR_FH (1 << 6)
91#define FATTR_ATIME_NOW (1 << 7)
92#define FATTR_MTIME_NOW (1 << 8)
93#define FATTR_LOCKOWNER (1 << 9)
79 94
80/** 95/**
81 * Flags returned by the OPEN request 96 * Flags returned by the OPEN request
@@ -91,12 +106,38 @@ struct fuse_file_lock {
91 */ 106 */
92#define FUSE_ASYNC_READ (1 << 0) 107#define FUSE_ASYNC_READ (1 << 0)
93#define FUSE_POSIX_LOCKS (1 << 1) 108#define FUSE_POSIX_LOCKS (1 << 1)
109#define FUSE_FILE_OPS (1 << 2)
110#define FUSE_ATOMIC_O_TRUNC (1 << 3)
94 111
95/** 112/**
96 * Release flags 113 * Release flags
97 */ 114 */
98#define FUSE_RELEASE_FLUSH (1 << 0) 115#define FUSE_RELEASE_FLUSH (1 << 0)
99 116
117/**
118 * Getattr flags
119 */
120#define FUSE_GETATTR_FH (1 << 0)
121
122/**
123 * Lock flags
124 */
125#define FUSE_LK_FLOCK (1 << 0)
126
127/**
128 * WRITE flags
129 *
130 * FUSE_WRITE_CACHE: delayed write from page cache, file handle is guessed
131 * FUSE_WRITE_LOCKOWNER: lock_owner field is valid
132 */
133#define FUSE_WRITE_CACHE (1 << 0)
134#define FUSE_WRITE_LOCKOWNER (1 << 1)
135
136/**
137 * Read flags
138 */
139#define FUSE_READ_LOCKOWNER (1 << 1)
140
100enum fuse_opcode { 141enum fuse_opcode {
101 FUSE_LOOKUP = 1, 142 FUSE_LOOKUP = 1,
102 FUSE_FORGET = 2, /* no reply */ 143 FUSE_FORGET = 2, /* no reply */
@@ -139,6 +180,8 @@ enum fuse_opcode {
139/* The read buffer is required to be at least 8k, but may be much larger */ 180/* The read buffer is required to be at least 8k, but may be much larger */
140#define FUSE_MIN_READ_BUFFER 8192 181#define FUSE_MIN_READ_BUFFER 8192
141 182
183#define FUSE_COMPAT_ENTRY_OUT_SIZE 120
184
142struct fuse_entry_out { 185struct fuse_entry_out {
143 __u64 nodeid; /* Inode ID */ 186 __u64 nodeid; /* Inode ID */
144 __u64 generation; /* Inode generation: nodeid:gen must 187 __u64 generation; /* Inode generation: nodeid:gen must
@@ -154,6 +197,14 @@ struct fuse_forget_in {
154 __u64 nlookup; 197 __u64 nlookup;
155}; 198};
156 199
200struct fuse_getattr_in {
201 __u32 getattr_flags;
202 __u32 dummy;
203 __u64 fh;
204};
205
206#define FUSE_COMPAT_ATTR_OUT_SIZE 96
207
157struct fuse_attr_out { 208struct fuse_attr_out {
158 __u64 attr_valid; /* Cache timeout for the attributes */ 209 __u64 attr_valid; /* Cache timeout for the attributes */
159 __u32 attr_valid_nsec; 210 __u32 attr_valid_nsec;
@@ -184,7 +235,7 @@ struct fuse_setattr_in {
184 __u32 padding; 235 __u32 padding;
185 __u64 fh; 236 __u64 fh;
186 __u64 size; 237 __u64 size;
187 __u64 unused1; 238 __u64 lock_owner;
188 __u64 atime; 239 __u64 atime;
189 __u64 mtime; 240 __u64 mtime;
190 __u64 unused2; 241 __u64 unused2;
@@ -227,14 +278,18 @@ struct fuse_read_in {
227 __u64 fh; 278 __u64 fh;
228 __u64 offset; 279 __u64 offset;
229 __u32 size; 280 __u32 size;
230 __u32 padding; 281 __u32 read_flags;
282 __u64 lock_owner;
231}; 283};
232 284
285#define FUSE_COMPAT_WRITE_IN_SIZE 24
286
233struct fuse_write_in { 287struct fuse_write_in {
234 __u64 fh; 288 __u64 fh;
235 __u64 offset; 289 __u64 offset;
236 __u32 size; 290 __u32 size;
237 __u32 write_flags; 291 __u32 write_flags;
292 __u64 lock_owner;
238}; 293};
239 294
240struct fuse_write_out { 295struct fuse_write_out {
@@ -273,6 +328,8 @@ struct fuse_lk_in {
273 __u64 fh; 328 __u64 fh;
274 __u64 owner; 329 __u64 owner;
275 struct fuse_file_lock lk; 330 struct fuse_file_lock lk;
331 __u32 lk_flags;
332 __u32 padding;
276}; 333};
277 334
278struct fuse_lk_out { 335struct fuse_lk_out {
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 540799bc85f8..7a9398e19704 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -300,7 +300,7 @@ hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
300 300
301/* Precise sleep: */ 301/* Precise sleep: */
302extern long hrtimer_nanosleep(struct timespec *rqtp, 302extern long hrtimer_nanosleep(struct timespec *rqtp,
303 struct timespec __user *rmtp, 303 struct timespec *rmtp,
304 const enum hrtimer_mode mode, 304 const enum hrtimer_mode mode,
305 const clockid_t clockid); 305 const clockid_t clockid);
306extern long hrtimer_nanosleep_restart(struct restart_block *restart_block); 306extern long hrtimer_nanosleep_restart(struct restart_block *restart_block);
diff --git a/include/linux/ide.h b/include/linux/ide.h
index e39ee2fa2607..19db0a4ae447 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -685,7 +685,6 @@ typedef struct hwif_s {
685 685
686 u8 pio_mask; 686 u8 pio_mask;
687 687
688 u8 atapi_dma; /* host supports atapi_dma */
689 u8 ultra_mask; 688 u8 ultra_mask;
690 u8 mwdma_mask; 689 u8 mwdma_mask;
691 u8 swdma_mask; 690 u8 swdma_mask;
@@ -797,12 +796,9 @@ typedef struct hwif_s {
797 unsigned serialized : 1; /* serialized all channel operation */ 796 unsigned serialized : 1; /* serialized all channel operation */
798 unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */ 797 unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */
799 unsigned reset : 1; /* reset after probe */ 798 unsigned reset : 1; /* reset after probe */
800 unsigned no_lba48 : 1; /* 1 = cannot do LBA48 */
801 unsigned no_lba48_dma : 1; /* 1 = cannot do LBA48 DMA */
802 unsigned auto_poll : 1; /* supports nop auto-poll */ 799 unsigned auto_poll : 1; /* supports nop auto-poll */
803 unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */ 800 unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */
804 unsigned no_io_32bit : 1; /* 1 = can not do 32-bit IO ops */ 801 unsigned no_io_32bit : 1; /* 1 = can not do 32-bit IO ops */
805 unsigned err_stops_fifo : 1; /* 1=data FIFO is cleared by an error */
806 unsigned mmio : 1; /* host uses MMIO */ 802 unsigned mmio : 1; /* host uses MMIO */
807 803
808 struct device gendev; 804 struct device gendev;
@@ -1211,19 +1207,6 @@ extern void default_hwif_iops(ide_hwif_t *);
1211extern void default_hwif_mmiops(ide_hwif_t *); 1207extern void default_hwif_mmiops(ide_hwif_t *);
1212extern void default_hwif_transport(ide_hwif_t *); 1208extern void default_hwif_transport(ide_hwif_t *);
1213 1209
1214#define ON_BOARD 1
1215#define NEVER_BOARD 0
1216
1217#ifdef CONFIG_BLK_DEV_OFFBOARD
1218# define OFF_BOARD ON_BOARD
1219#else /* CONFIG_BLK_DEV_OFFBOARD */
1220# define OFF_BOARD NEVER_BOARD
1221#endif /* CONFIG_BLK_DEV_OFFBOARD */
1222
1223#define NODMA 0
1224#define NOAUTODMA 1
1225#define AUTODMA 2
1226
1227typedef struct ide_pci_enablebit_s { 1210typedef struct ide_pci_enablebit_s {
1228 u8 reg; /* byte pci reg holding the enable-bit */ 1211 u8 reg; /* byte pci reg holding the enable-bit */
1229 u8 mask; /* mask to isolate the enable-bit */ 1212 u8 mask; /* mask to isolate the enable-bit */
@@ -1258,24 +1241,48 @@ enum {
1258 IDE_HFLAG_TRUST_BIOS_FOR_DMA = (1 << 10), 1241 IDE_HFLAG_TRUST_BIOS_FOR_DMA = (1 << 10),
1259 /* host uses VDMA */ 1242 /* host uses VDMA */
1260 IDE_HFLAG_VDMA = (1 << 11), 1243 IDE_HFLAG_VDMA = (1 << 11),
1244 /* ATAPI DMA is unsupported */
1245 IDE_HFLAG_NO_ATAPI_DMA = (1 << 12),
1246 /* set if host is a "bootable" controller */
1247 IDE_HFLAG_BOOTABLE = (1 << 13),
1248 /* host doesn't support DMA */
1249 IDE_HFLAG_NO_DMA = (1 << 14),
1250 /* check if host is PCI IDE device before allowing DMA */
1251 IDE_HFLAG_NO_AUTODMA = (1 << 15),
1252 /* host is CS5510/CS5520 */
1253 IDE_HFLAG_CS5520 = (1 << 16),
1254 /* no LBA48 */
1255 IDE_HFLAG_NO_LBA48 = (1 << 17),
1256 /* no LBA48 DMA */
1257 IDE_HFLAG_NO_LBA48_DMA = (1 << 18),
1258 /* data FIFO is cleared by an error */
1259 IDE_HFLAG_ERROR_STOPS_FIFO = (1 << 19),
1260 /* serialize ports */
1261 IDE_HFLAG_SERIALIZE = (1 << 20),
1262 /* use legacy IRQs */
1263 IDE_HFLAG_LEGACY_IRQS = (1 << 21),
1261}; 1264};
1262 1265
1266#ifdef CONFIG_BLK_DEV_OFFBOARD
1267# define IDE_HFLAG_OFF_BOARD IDE_HFLAG_BOOTABLE
1268#else
1269# define IDE_HFLAG_OFF_BOARD 0
1270#endif
1271
1263typedef struct ide_pci_device_s { 1272typedef struct ide_pci_device_s {
1264 char *name; 1273 char *name;
1265 int (*init_setup)(struct pci_dev *, struct ide_pci_device_s *);
1266 void (*init_setup_dma)(struct pci_dev *, struct ide_pci_device_s *, ide_hwif_t *);
1267 unsigned int (*init_chipset)(struct pci_dev *, const char *); 1274 unsigned int (*init_chipset)(struct pci_dev *, const char *);
1268 void (*init_iops)(ide_hwif_t *); 1275 void (*init_iops)(ide_hwif_t *);
1269 void (*init_hwif)(ide_hwif_t *); 1276 void (*init_hwif)(ide_hwif_t *);
1270 void (*init_dma)(ide_hwif_t *, unsigned long); 1277 void (*init_dma)(ide_hwif_t *, unsigned long);
1271 void (*fixup)(ide_hwif_t *); 1278 void (*fixup)(ide_hwif_t *);
1272 u8 autodma;
1273 ide_pci_enablebit_t enablebits[2]; 1279 ide_pci_enablebit_t enablebits[2];
1274 u8 bootable;
1275 unsigned int extra; 1280 unsigned int extra;
1276 struct ide_pci_device_s *next; 1281 struct ide_pci_device_s *next;
1277 u16 host_flags; 1282 u32 host_flags;
1278 u8 pio_mask; 1283 u8 pio_mask;
1284 u8 swdma_mask;
1285 u8 mwdma_mask;
1279 u8 udma_mask; 1286 u8 udma_mask;
1280} ide_pci_device_t; 1287} ide_pci_device_t;
1281 1288
@@ -1454,4 +1461,11 @@ static inline int hwif_to_node(ide_hwif_t *hwif)
1454 return dev ? pcibus_to_node(dev->bus) : -1; 1461 return dev ? pcibus_to_node(dev->bus) : -1;
1455} 1462}
1456 1463
1464static inline ide_drive_t *ide_get_paired_drive(ide_drive_t *drive)
1465{
1466 ide_hwif_t *hwif = HWIF(drive);
1467
1468 return &hwif->drives[(drive->dn ^ 1) & 1];
1469}
1470
1457#endif /* _IDE_H */ 1471#endif /* _IDE_H */
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index 7a9db390c56a..c5bd28b69aec 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -365,6 +365,16 @@ int ipmi_request_supply_msgs(ipmi_user_t user,
365 int priority); 365 int priority);
366 366
367/* 367/*
368 * Poll the IPMI interface for the user. This causes the IPMI code to
369 * do an immediate check for information from the driver and handle
370 * anything that is immediately pending. This will not block in any
371 * way. This is useful if you need to implement polling from the user
372 * for things like modifying the watchdog timeout when a panic occurs
373 * or disabling the watchdog timer on a reboot.
374 */
375void ipmi_poll_interface(ipmi_user_t user);
376
377/*
368 * When commands come in to the SMS, the user can register to receive 378 * When commands come in to the SMS, the user can register to receive
369 * them. Only one user can be listening on a specific netfn/cmd/chan tuple 379 * them. Only one user can be listening on a specific netfn/cmd/chan tuple
370 * at a time, you will get an EBUSY error if the command is already 380 * at a time, you will get an EBUSY error if the command is already
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index c0633108d05d..efa292a52e7e 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -148,26 +148,46 @@ struct ipmi_device_id {
148 148
149/* Take a pointer to a raw data buffer and a length and extract device 149/* Take a pointer to a raw data buffer and a length and extract device
150 id information from it. The first byte of data must point to the 150 id information from it. The first byte of data must point to the
151 byte from the get device id response after the completion code. 151 netfn << 2, the data should be of the format:
152 The caller is responsible for making sure the length is at least 152 netfn << 2, cmd, completion code, data
153 11 and the command completed without error. */ 153 as normally comes from a device interface. */
154static inline void ipmi_demangle_device_id(unsigned char *data, 154static inline int ipmi_demangle_device_id(const unsigned char *data,
155 unsigned int data_len, 155 unsigned int data_len,
156 struct ipmi_device_id *id) 156 struct ipmi_device_id *id)
157{ 157{
158 if (data_len < 9)
159 return -EINVAL;
160 if (data[0] != IPMI_NETFN_APP_RESPONSE << 2 ||
161 data[1] != IPMI_GET_DEVICE_ID_CMD)
162 /* Strange, didn't get the response we expected. */
163 return -EINVAL;
164 if (data[2] != 0)
165 /* That's odd, it shouldn't be able to fail. */
166 return -EINVAL;
167
168 data += 3;
169 data_len -= 3;
158 id->device_id = data[0]; 170 id->device_id = data[0];
159 id->device_revision = data[1]; 171 id->device_revision = data[1];
160 id->firmware_revision_1 = data[2]; 172 id->firmware_revision_1 = data[2];
161 id->firmware_revision_2 = data[3]; 173 id->firmware_revision_2 = data[3];
162 id->ipmi_version = data[4]; 174 id->ipmi_version = data[4];
163 id->additional_device_support = data[5]; 175 id->additional_device_support = data[5];
164 id->manufacturer_id = data[6] | (data[7] << 8) | (data[8] << 16); 176 if (data_len >= 6) {
165 id->product_id = data[9] | (data[10] << 8); 177 id->manufacturer_id = (data[6] | (data[7] << 8) |
178 (data[8] << 16));
179 id->product_id = data[9] | (data[10] << 8);
180 } else {
181 id->manufacturer_id = 0;
182 id->product_id = 0;
183 }
166 if (data_len >= 15) { 184 if (data_len >= 15) {
167 memcpy(id->aux_firmware_revision, data+11, 4); 185 memcpy(id->aux_firmware_revision, data+11, 4);
168 id->aux_firmware_revision_set = 1; 186 id->aux_firmware_revision_set = 1;
169 } else 187 } else
170 id->aux_firmware_revision_set = 0; 188 id->aux_firmware_revision_set = 0;
189
190 return 0;
171} 191}
172 192
173/* Add a low-level interface to the IPMI driver. Note that if the 193/* Add a low-level interface to the IPMI driver. Note that if the
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 72f522372924..a3abf51e488f 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -72,14 +72,15 @@ extern int journal_enable_debug;
72#define jbd_debug(f, a...) /**/ 72#define jbd_debug(f, a...) /**/
73#endif 73#endif
74 74
75extern void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry); 75static inline void *jbd_alloc(size_t size, gfp_t flags)
76extern void * jbd_slab_alloc(size_t size, gfp_t flags); 76{
77extern void jbd_slab_free(void *ptr, size_t size); 77 return (void *)__get_free_pages(flags, get_order(size));
78 78}
79#define jbd_kmalloc(size, flags) \ 79
80 __jbd_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry) 80static inline void jbd_free(void *ptr, size_t size)
81#define jbd_rep_kmalloc(size, flags) \ 81{
82 __jbd_kmalloc(__FUNCTION__, (size), (flags), 1) 82 free_pages((unsigned long)ptr, get_order(size));
83};
83 84
84#define JFS_MIN_JOURNAL_BLOCKS 1024 85#define JFS_MIN_JOURNAL_BLOCKS 1024
85 86
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 260d6d76c5f3..06ef11457051 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -13,8 +13,8 @@
13 * filesystem journaling support. 13 * filesystem journaling support.
14 */ 14 */
15 15
16#ifndef _LINUX_JBD_H 16#ifndef _LINUX_JBD2_H
17#define _LINUX_JBD_H 17#define _LINUX_JBD2_H
18 18
19/* Allow this file to be included directly into e2fsprogs */ 19/* Allow this file to be included directly into e2fsprogs */
20#ifndef __KERNEL__ 20#ifndef __KERNEL__
@@ -37,26 +37,26 @@
37#define journal_oom_retry 1 37#define journal_oom_retry 1
38 38
39/* 39/*
40 * Define JBD_PARANIOD_IOFAIL to cause a kernel BUG() if ext3 finds 40 * Define JBD2_PARANIOD_IOFAIL to cause a kernel BUG() if ext4 finds
41 * certain classes of error which can occur due to failed IOs. Under 41 * certain classes of error which can occur due to failed IOs. Under
42 * normal use we want ext3 to continue after such errors, because 42 * normal use we want ext4 to continue after such errors, because
43 * hardware _can_ fail, but for debugging purposes when running tests on 43 * hardware _can_ fail, but for debugging purposes when running tests on
44 * known-good hardware we may want to trap these errors. 44 * known-good hardware we may want to trap these errors.
45 */ 45 */
46#undef JBD_PARANOID_IOFAIL 46#undef JBD2_PARANOID_IOFAIL
47 47
48/* 48/*
49 * The default maximum commit age, in seconds. 49 * The default maximum commit age, in seconds.
50 */ 50 */
51#define JBD_DEFAULT_MAX_COMMIT_AGE 5 51#define JBD2_DEFAULT_MAX_COMMIT_AGE 5
52 52
53#ifdef CONFIG_JBD2_DEBUG 53#ifdef CONFIG_JBD2_DEBUG
54/* 54/*
55 * Define JBD_EXPENSIVE_CHECKING to enable more expensive internal 55 * Define JBD2_EXPENSIVE_CHECKING to enable more expensive internal
56 * consistency checks. By default we don't do this unless 56 * consistency checks. By default we don't do this unless
57 * CONFIG_JBD2_DEBUG is on. 57 * CONFIG_JBD2_DEBUG is on.
58 */ 58 */
59#define JBD_EXPENSIVE_CHECKING 59#define JBD2_EXPENSIVE_CHECKING
60extern u8 jbd2_journal_enable_debug; 60extern u8 jbd2_journal_enable_debug;
61 61
62#define jbd_debug(n, f, a...) \ 62#define jbd_debug(n, f, a...) \
@@ -71,14 +71,15 @@ extern u8 jbd2_journal_enable_debug;
71#define jbd_debug(f, a...) /**/ 71#define jbd_debug(f, a...) /**/
72#endif 72#endif
73 73
74extern void * __jbd2_kmalloc (const char *where, size_t size, gfp_t flags, int retry); 74static inline void *jbd2_alloc(size_t size, gfp_t flags)
75extern void * jbd2_slab_alloc(size_t size, gfp_t flags); 75{
76extern void jbd2_slab_free(void *ptr, size_t size); 76 return (void *)__get_free_pages(flags, get_order(size));
77}
77 78
78#define jbd_kmalloc(size, flags) \ 79static inline void jbd2_free(void *ptr, size_t size)
79 __jbd2_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry) 80{
80#define jbd_rep_kmalloc(size, flags) \ 81 free_pages((unsigned long)ptr, get_order(size));
81 __jbd2_kmalloc(__FUNCTION__, (size), (flags), 1) 82};
82 83
83#define JBD2_MIN_JOURNAL_BLOCKS 1024 84#define JBD2_MIN_JOURNAL_BLOCKS 1024
84 85
@@ -162,8 +163,8 @@ typedef struct journal_block_tag_s
162 __be32 t_blocknr_high; /* most-significant high 32bits. */ 163 __be32 t_blocknr_high; /* most-significant high 32bits. */
163} journal_block_tag_t; 164} journal_block_tag_t;
164 165
165#define JBD_TAG_SIZE32 (offsetof(journal_block_tag_t, t_blocknr_high)) 166#define JBD2_TAG_SIZE32 (offsetof(journal_block_tag_t, t_blocknr_high))
166#define JBD_TAG_SIZE64 (sizeof(journal_block_tag_t)) 167#define JBD2_TAG_SIZE64 (sizeof(journal_block_tag_t))
167 168
168/* 169/*
169 * The revoke descriptor: used on disk to describe a series of blocks to 170 * The revoke descriptor: used on disk to describe a series of blocks to
@@ -255,8 +256,8 @@ typedef struct journal_superblock_s
255#include <linux/fs.h> 256#include <linux/fs.h>
256#include <linux/sched.h> 257#include <linux/sched.h>
257 258
258#define JBD_ASSERTIONS 259#define JBD2_ASSERTIONS
259#ifdef JBD_ASSERTIONS 260#ifdef JBD2_ASSERTIONS
260#define J_ASSERT(assert) \ 261#define J_ASSERT(assert) \
261do { \ 262do { \
262 if (!(assert)) { \ 263 if (!(assert)) { \
@@ -283,9 +284,9 @@ void buffer_assertion_failure(struct buffer_head *bh);
283 284
284#else 285#else
285#define J_ASSERT(assert) do { } while (0) 286#define J_ASSERT(assert) do { } while (0)
286#endif /* JBD_ASSERTIONS */ 287#endif /* JBD2_ASSERTIONS */
287 288
288#if defined(JBD_PARANOID_IOFAIL) 289#if defined(JBD2_PARANOID_IOFAIL)
289#define J_EXPECT(expr, why...) J_ASSERT(expr) 290#define J_EXPECT(expr, why...) J_ASSERT(expr)
290#define J_EXPECT_BH(bh, expr, why...) J_ASSERT_BH(bh, expr) 291#define J_EXPECT_BH(bh, expr, why...) J_ASSERT_BH(bh, expr)
291#define J_EXPECT_JH(jh, expr, why...) J_ASSERT_JH(jh, expr) 292#define J_EXPECT_JH(jh, expr, why...) J_ASSERT_JH(jh, expr)
@@ -959,12 +960,12 @@ void jbd2_journal_put_journal_head(struct journal_head *jh);
959 */ 960 */
960extern struct kmem_cache *jbd2_handle_cache; 961extern struct kmem_cache *jbd2_handle_cache;
961 962
962static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags) 963static inline handle_t *jbd2_alloc_handle(gfp_t gfp_flags)
963{ 964{
964 return kmem_cache_alloc(jbd2_handle_cache, gfp_flags); 965 return kmem_cache_alloc(jbd2_handle_cache, gfp_flags);
965} 966}
966 967
967static inline void jbd_free_handle(handle_t *handle) 968static inline void jbd2_free_handle(handle_t *handle)
968{ 969{
969 kmem_cache_free(jbd2_handle_cache, handle); 970 kmem_cache_free(jbd2_handle_cache, handle);
970} 971}
@@ -1103,4 +1104,4 @@ extern int jbd_blocks_per_page(struct inode *inode);
1103 1104
1104#endif /* __KERNEL__ */ 1105#endif /* __KERNEL__ */
1105 1106
1106#endif /* _LINUX_JBD_H */ 1107#endif /* _LINUX_JBD2_H */
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 12bf44f083f5..e8ffce898bf9 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -53,7 +53,9 @@ static inline int kstat_irqs(int irq)
53} 53}
54 54
55extern void account_user_time(struct task_struct *, cputime_t); 55extern void account_user_time(struct task_struct *, cputime_t);
56extern void account_user_time_scaled(struct task_struct *, cputime_t);
56extern void account_system_time(struct task_struct *, int, cputime_t); 57extern void account_system_time(struct task_struct *, int, cputime_t);
58extern void account_system_time_scaled(struct task_struct *, cputime_t);
57extern void account_steal_time(struct task_struct *, cputime_t); 59extern void account_steal_time(struct task_struct *, cputime_t);
58 60
59#endif /* _LINUX_KERNEL_STAT_H */ 61#endif /* _LINUX_KERNEL_STAT_H */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 377e6d4d9be3..bc3b6fc7b98d 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1037,18 +1037,6 @@ extern void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
1037/* 1037/*
1038 * qc helpers 1038 * qc helpers
1039 */ 1039 */
1040static inline int
1041ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc)
1042{
1043 if (sg == &qc->pad_sgent)
1044 return 1;
1045 if (qc->pad_len)
1046 return 0;
1047 if (qc->n_iter == qc->n_elem)
1048 return 1;
1049 return 0;
1050}
1051
1052static inline struct scatterlist * 1040static inline struct scatterlist *
1053ata_qc_first_sg(struct ata_queued_cmd *qc) 1041ata_qc_first_sg(struct ata_queued_cmd *qc)
1054{ 1042{
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index a5e2dc1f0d98..6f85db3535e2 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -845,7 +845,7 @@ static inline int dev_parse_header(const struct sk_buff *skb,
845{ 845{
846 const struct net_device *dev = skb->dev; 846 const struct net_device *dev = skb->dev;
847 847
848 if (!dev->header_ops->parse) 848 if (!dev->header_ops || !dev->header_ops->parse)
849 return 0; 849 return 0;
850 return dev->header_ops->parse(skb, haddr); 850 return dev->header_ops->parse(skb, haddr);
851} 851}
diff --git a/include/linux/netfilter/xt_sctp.h b/include/linux/netfilter/xt_sctp.h
index b157897e7792..dd5a4fd4cfd3 100644
--- a/include/linux/netfilter/xt_sctp.h
+++ b/include/linux/netfilter/xt_sctp.h
@@ -7,9 +7,6 @@
7 7
8#define XT_SCTP_VALID_FLAGS 0x07 8#define XT_SCTP_VALID_FLAGS 0x07
9 9
10#define ELEMCOUNT(x) (sizeof(x)/sizeof(x[0]))
11
12
13struct xt_sctp_flag_info { 10struct xt_sctp_flag_info {
14 u_int8_t chunktype; 11 u_int8_t chunktype;
15 u_int8_t flag; 12 u_int8_t flag;
@@ -59,21 +56,21 @@ struct xt_sctp_info {
59#define SCTP_CHUNKMAP_RESET(chunkmap) \ 56#define SCTP_CHUNKMAP_RESET(chunkmap) \
60 do { \ 57 do { \
61 int i; \ 58 int i; \
62 for (i = 0; i < ELEMCOUNT(chunkmap); i++) \ 59 for (i = 0; i < ARRAY_SIZE(chunkmap); i++) \
63 chunkmap[i] = 0; \ 60 chunkmap[i] = 0; \
64 } while (0) 61 } while (0)
65 62
66#define SCTP_CHUNKMAP_SET_ALL(chunkmap) \ 63#define SCTP_CHUNKMAP_SET_ALL(chunkmap) \
67 do { \ 64 do { \
68 int i; \ 65 int i; \
69 for (i = 0; i < ELEMCOUNT(chunkmap); i++) \ 66 for (i = 0; i < ARRAY_SIZE(chunkmap); i++) \
70 chunkmap[i] = ~0; \ 67 chunkmap[i] = ~0; \
71 } while (0) 68 } while (0)
72 69
73#define SCTP_CHUNKMAP_COPY(destmap, srcmap) \ 70#define SCTP_CHUNKMAP_COPY(destmap, srcmap) \
74 do { \ 71 do { \
75 int i; \ 72 int i; \
76 for (i = 0; i < ELEMCOUNT(chunkmap); i++) \ 73 for (i = 0; i < ARRAY_SIZE(srcmap); i++) \
77 destmap[i] = srcmap[i]; \ 74 destmap[i] = srcmap[i]; \
78 } while (0) 75 } while (0)
79 76
@@ -81,7 +78,7 @@ struct xt_sctp_info {
81({ \ 78({ \
82 int i; \ 79 int i; \
83 int flag = 1; \ 80 int flag = 1; \
84 for (i = 0; i < ELEMCOUNT(chunkmap); i++) { \ 81 for (i = 0; i < ARRAY_SIZE(chunkmap); i++) { \
85 if (chunkmap[i]) { \ 82 if (chunkmap[i]) { \
86 flag = 0; \ 83 flag = 0; \
87 break; \ 84 break; \
@@ -94,7 +91,7 @@ struct xt_sctp_info {
94({ \ 91({ \
95 int i; \ 92 int i; \
96 int flag = 1; \ 93 int flag = 1; \
97 for (i = 0; i < ELEMCOUNT(chunkmap); i++) { \ 94 for (i = 0; i < ARRAY_SIZE(chunkmap); i++) { \
98 if (chunkmap[i] != ~0) { \ 95 if (chunkmap[i] != ~0) { \
99 flag = 0; \ 96 flag = 0; \
100 break; \ 97 break; \
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index 448f70b30a0c..a8efcfeea732 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -48,6 +48,10 @@ struct of_platform_driver
48#define to_of_platform_driver(drv) \ 48#define to_of_platform_driver(drv) \
49 container_of(drv,struct of_platform_driver, driver) 49 container_of(drv,struct of_platform_driver, driver)
50 50
51extern int of_register_driver(struct of_platform_driver *drv,
52 struct bus_type *bus);
53extern void of_unregister_driver(struct of_platform_driver *drv);
54
51#include <asm/of_platform.h> 55#include <asm/of_platform.h>
52 56
53extern struct of_device *of_find_device_by_node(struct device_node *np); 57extern struct of_device *of_find_device_by_node(struct device_node *np);
diff --git a/include/linux/parport.h b/include/linux/parport.h
index 9cdd6943e01b..ec3f76598327 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -510,7 +510,6 @@ extern struct pardevice *parport_open (int devnum, const char *name,
510 int flags, void *handle); 510 int flags, void *handle);
511extern void parport_close (struct pardevice *dev); 511extern void parport_close (struct pardevice *dev);
512extern ssize_t parport_device_id (int devnum, char *buffer, size_t len); 512extern ssize_t parport_device_id (int devnum, char *buffer, size_t len);
513extern int parport_device_num (int parport, int mux, int daisy);
514extern void parport_daisy_deselect_all (struct parport *port); 513extern void parport_daisy_deselect_all (struct parport *port);
515extern int parport_daisy_select (struct parport *port, int daisy, int mode); 514extern int parport_daisy_select (struct parport *port, int daisy, int mode);
516 515
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 48b71badfb4c..09a309b7b5d2 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -104,104 +104,6 @@ extern void (*pm_idle)(void);
104extern void (*pm_power_off)(void); 104extern void (*pm_power_off)(void);
105extern void (*pm_power_off_prepare)(void); 105extern void (*pm_power_off_prepare)(void);
106 106
107typedef int __bitwise suspend_state_t;
108
109#define PM_SUSPEND_ON ((__force suspend_state_t) 0)
110#define PM_SUSPEND_STANDBY ((__force suspend_state_t) 1)
111#define PM_SUSPEND_MEM ((__force suspend_state_t) 3)
112#define PM_SUSPEND_MAX ((__force suspend_state_t) 4)
113
114/**
115 * struct pm_ops - Callbacks for managing platform dependent system sleep
116 * states.
117 *
118 * @valid: Callback to determine if given system sleep state is supported by
119 * the platform.
120 * Valid (ie. supported) states are advertised in /sys/power/state. Note
121 * that it still may be impossible to enter given system sleep state if the
122 * conditions aren't right.
123 * There is the %pm_valid_only_mem function available that can be assigned
124 * to this if the platform only supports mem sleep.
125 *
126 * @set_target: Tell the platform which system sleep state is going to be
127 * entered.
128 * @set_target() is executed right prior to suspending devices. The
129 * information conveyed to the platform code by @set_target() should be
130 * disregarded by the platform as soon as @finish() is executed and if
131 * @prepare() fails. If @set_target() fails (ie. returns nonzero),
132 * @prepare(), @enter() and @finish() will not be called by the PM core.
133 * This callback is optional. However, if it is implemented, the argument
134 * passed to @prepare(), @enter() and @finish() is meaningless and should
135 * be ignored.
136 *
137 * @prepare: Prepare the platform for entering the system sleep state indicated
138 * by @set_target() or represented by the argument if @set_target() is not
139 * implemented.
140 * @prepare() is called right after devices have been suspended (ie. the
141 * appropriate .suspend() method has been executed for each device) and
142 * before the nonboot CPUs are disabled (it is executed with IRQs enabled).
143 * This callback is optional. It returns 0 on success or a negative
144 * error code otherwise, in which case the system cannot enter the desired
145 * sleep state (@enter() and @finish() will not be called in that case).
146 *
147 * @enter: Enter the system sleep state indicated by @set_target() or
148 * represented by the argument if @set_target() is not implemented.
149 * This callback is mandatory. It returns 0 on success or a negative
150 * error code otherwise, in which case the system cannot enter the desired
151 * sleep state.
152 *
153 * @finish: Called when the system has just left a sleep state, right after
154 * the nonboot CPUs have been enabled and before devices are resumed (it is
155 * executed with IRQs enabled). If @set_target() is not implemented, the
156 * argument represents the sleep state being left.
157 * This callback is optional, but should be implemented by the platforms
158 * that implement @prepare(). If implemented, it is always called after
159 * @enter() (even if @enter() fails).
160 */
161struct pm_ops {
162 int (*valid)(suspend_state_t state);
163 int (*set_target)(suspend_state_t state);
164 int (*prepare)(suspend_state_t state);
165 int (*enter)(suspend_state_t state);
166 int (*finish)(suspend_state_t state);
167};
168
169#ifdef CONFIG_SUSPEND
170extern struct pm_ops *pm_ops;
171
172/**
173 * pm_set_ops - set platform dependent power management ops
174 * @pm_ops: The new power management operations to set.
175 */
176extern void pm_set_ops(struct pm_ops *pm_ops);
177extern int pm_valid_only_mem(suspend_state_t state);
178
179/**
180 * arch_suspend_disable_irqs - disable IRQs for suspend
181 *
182 * Disables IRQs (in the default case). This is a weak symbol in the common
183 * code and thus allows architectures to override it if more needs to be
184 * done. Not called for suspend to disk.
185 */
186extern void arch_suspend_disable_irqs(void);
187
188/**
189 * arch_suspend_enable_irqs - enable IRQs after suspend
190 *
191 * Enables IRQs (in the default case). This is a weak symbol in the common
192 * code and thus allows architectures to override it if more needs to be
193 * done. Not called for suspend to disk.
194 */
195extern void arch_suspend_enable_irqs(void);
196
197extern int pm_suspend(suspend_state_t state);
198#else /* !CONFIG_SUSPEND */
199#define suspend_valid_only_mem NULL
200
201static inline void pm_set_ops(struct pm_ops *pm_ops) {}
202static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
203#endif /* !CONFIG_SUSPEND */
204
205/* 107/*
206 * Device power management 108 * Device power management
207 */ 109 */
diff --git a/include/linux/poison.h b/include/linux/poison.h
index d93c300a3449..a9c31be7052c 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -36,7 +36,8 @@
36 */ 36 */
37 37
38/********** fs/jbd/journal.c **********/ 38/********** fs/jbd/journal.c **********/
39#define JBD_POISON_FREE 0x5b 39#define JBD_POISON_FREE 0x5b
40#define JBD2_POISON_FREE 0x5c
40 41
41/********** drivers/base/dmapool.c **********/ 42/********** drivers/base/dmapool.c **********/
42#define POOL_POISON_FREED 0xa7 /* !inuse */ 43#define POOL_POISON_FREED 0xa7 /* !inuse */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c204ab0d4df1..10a83d8d5775 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -569,7 +569,7 @@ struct sched_info {
569 last_queued; /* when we were last queued to run */ 569 last_queued; /* when we were last queued to run */
570#ifdef CONFIG_SCHEDSTATS 570#ifdef CONFIG_SCHEDSTATS
571 /* BKL stats */ 571 /* BKL stats */
572 unsigned long bkl_count; 572 unsigned int bkl_count;
573#endif 573#endif
574}; 574};
575#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ 575#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
@@ -705,34 +705,34 @@ struct sched_domain {
705 705
706#ifdef CONFIG_SCHEDSTATS 706#ifdef CONFIG_SCHEDSTATS
707 /* load_balance() stats */ 707 /* load_balance() stats */
708 unsigned long lb_count[CPU_MAX_IDLE_TYPES]; 708 unsigned int lb_count[CPU_MAX_IDLE_TYPES];
709 unsigned long lb_failed[CPU_MAX_IDLE_TYPES]; 709 unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
710 unsigned long lb_balanced[CPU_MAX_IDLE_TYPES]; 710 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
711 unsigned long lb_imbalance[CPU_MAX_IDLE_TYPES]; 711 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
712 unsigned long lb_gained[CPU_MAX_IDLE_TYPES]; 712 unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
713 unsigned long lb_hot_gained[CPU_MAX_IDLE_TYPES]; 713 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
714 unsigned long lb_nobusyg[CPU_MAX_IDLE_TYPES]; 714 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
715 unsigned long lb_nobusyq[CPU_MAX_IDLE_TYPES]; 715 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
716 716
717 /* Active load balancing */ 717 /* Active load balancing */
718 unsigned long alb_count; 718 unsigned int alb_count;
719 unsigned long alb_failed; 719 unsigned int alb_failed;
720 unsigned long alb_pushed; 720 unsigned int alb_pushed;
721 721
722 /* SD_BALANCE_EXEC stats */ 722 /* SD_BALANCE_EXEC stats */
723 unsigned long sbe_count; 723 unsigned int sbe_count;
724 unsigned long sbe_balanced; 724 unsigned int sbe_balanced;
725 unsigned long sbe_pushed; 725 unsigned int sbe_pushed;
726 726
727 /* SD_BALANCE_FORK stats */ 727 /* SD_BALANCE_FORK stats */
728 unsigned long sbf_count; 728 unsigned int sbf_count;
729 unsigned long sbf_balanced; 729 unsigned int sbf_balanced;
730 unsigned long sbf_pushed; 730 unsigned int sbf_pushed;
731 731
732 /* try_to_wake_up() stats */ 732 /* try_to_wake_up() stats */
733 unsigned long ttwu_wake_remote; 733 unsigned int ttwu_wake_remote;
734 unsigned long ttwu_move_affine; 734 unsigned int ttwu_move_affine;
735 unsigned long ttwu_move_balance; 735 unsigned int ttwu_move_balance;
736#endif 736#endif
737}; 737};
738 738
@@ -991,7 +991,7 @@ struct task_struct {
991 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ 991 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
992 992
993 unsigned int rt_priority; 993 unsigned int rt_priority;
994 cputime_t utime, stime; 994 cputime_t utime, stime, utimescaled, stimescaled;
995 cputime_t gtime; 995 cputime_t gtime;
996 unsigned long nvcsw, nivcsw; /* context switch counts */ 996 unsigned long nvcsw, nivcsw; /* context switch counts */
997 struct timespec start_time; /* monotonic time */ 997 struct timespec start_time; /* monotonic time */
@@ -1110,13 +1110,6 @@ struct task_struct {
1110 1110
1111 unsigned long ptrace_message; 1111 unsigned long ptrace_message;
1112 siginfo_t *last_siginfo; /* For ptrace use. */ 1112 siginfo_t *last_siginfo; /* For ptrace use. */
1113/*
1114 * current io wait handle: wait queue entry to use for io waits
1115 * If this thread is processing aio, this points at the waitqueue
1116 * inside the currently handled kiocb. It may be NULL (i.e. default
1117 * to a stack based synchronous wait) if its doing sync IO.
1118 */
1119 wait_queue_t *io_wait;
1120#ifdef CONFIG_TASK_XACCT 1113#ifdef CONFIG_TASK_XACCT
1121/* i/o counters(bytes read/written, #syscalls */ 1114/* i/o counters(bytes read/written, #syscalls */
1122 u64 rchar, wchar, syscr, syscw; 1115 u64 rchar, wchar, syscr, syscw;
diff --git a/include/linux/security.h b/include/linux/security.h
index 9b0b63c50f44..ff3f857f6957 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -34,6 +34,13 @@
34#include <linux/xfrm.h> 34#include <linux/xfrm.h>
35#include <net/flow.h> 35#include <net/flow.h>
36 36
37/*
38 * Bounding set
39 */
40extern kernel_cap_t cap_bset;
41
42extern unsigned securebits;
43
37struct ctl_table; 44struct ctl_table;
38 45
39/* 46/*
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 388cace9751f..4360e0816956 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -1,5 +1,5 @@
1#ifndef _LINUX_SWSUSP_H 1#ifndef _LINUX_SUSPEND_H
2#define _LINUX_SWSUSP_H 2#define _LINUX_SUSPEND_H
3 3
4#if defined(CONFIG_X86) || defined(CONFIG_FRV) || defined(CONFIG_PPC32) || defined(CONFIG_PPC64) 4#if defined(CONFIG_X86) || defined(CONFIG_FRV) || defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
5#include <asm/suspend.h> 5#include <asm/suspend.h>
@@ -9,6 +9,108 @@
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/pm.h> 10#include <linux/pm.h>
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <asm/errno.h>
13
14#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
15extern int pm_prepare_console(void);
16extern void pm_restore_console(void);
17#else
18static inline int pm_prepare_console(void) { return 0; }
19static inline void pm_restore_console(void) {}
20#endif
21
22typedef int __bitwise suspend_state_t;
23
24#define PM_SUSPEND_ON ((__force suspend_state_t) 0)
25#define PM_SUSPEND_STANDBY ((__force suspend_state_t) 1)
26#define PM_SUSPEND_MEM ((__force suspend_state_t) 3)
27#define PM_SUSPEND_MAX ((__force suspend_state_t) 4)
28
29/**
30 * struct platform_suspend_ops - Callbacks for managing platform dependent
31 * system sleep states.
32 *
33 * @valid: Callback to determine if given system sleep state is supported by
34 * the platform.
35 * Valid (ie. supported) states are advertised in /sys/power/state. Note
36 * that it still may be impossible to enter given system sleep state if the
37 * conditions aren't right.
38 * There is the %suspend_valid_only_mem function available that can be
39 * assigned to this if the platform only supports mem sleep.
40 *
41 * @set_target: Tell the platform which system sleep state is going to be
42 * entered.
43 * @set_target() is executed right prior to suspending devices. The
44 * information conveyed to the platform code by @set_target() should be
45 * disregarded by the platform as soon as @finish() is executed and if
46 * @prepare() fails. If @set_target() fails (ie. returns nonzero),
47 * @prepare(), @enter() and @finish() will not be called by the PM core.
48 * This callback is optional. However, if it is implemented, the argument
49 * passed to @enter() is meaningless and should be ignored.
50 *
51 * @prepare: Prepare the platform for entering the system sleep state indicated
52 * by @set_target().
53 * @prepare() is called right after devices have been suspended (ie. the
54 * appropriate .suspend() method has been executed for each device) and
55 * before the nonboot CPUs are disabled (it is executed with IRQs enabled).
56 * This callback is optional. It returns 0 on success or a negative
57 * error code otherwise, in which case the system cannot enter the desired
58 * sleep state (@enter() and @finish() will not be called in that case).
59 *
60 * @enter: Enter the system sleep state indicated by @set_target() or
61 * represented by the argument if @set_target() is not implemented.
62 * This callback is mandatory. It returns 0 on success or a negative
63 * error code otherwise, in which case the system cannot enter the desired
64 * sleep state.
65 *
66 * @finish: Called when the system has just left a sleep state, right after
67 * the nonboot CPUs have been enabled and before devices are resumed (it is
68 * executed with IRQs enabled).
69 * This callback is optional, but should be implemented by the platforms
70 * that implement @prepare(). If implemented, it is always called after
71 * @enter() (even if @enter() fails).
72 */
73struct platform_suspend_ops {
74 int (*valid)(suspend_state_t state);
75 int (*set_target)(suspend_state_t state);
76 int (*prepare)(void);
77 int (*enter)(suspend_state_t state);
78 void (*finish)(void);
79};
80
81#ifdef CONFIG_SUSPEND
82/**
83 * suspend_set_ops - set platform dependent suspend operations
84 * @ops: The new suspend operations to set.
85 */
86extern void suspend_set_ops(struct platform_suspend_ops *ops);
87extern int suspend_valid_only_mem(suspend_state_t state);
88
89/**
90 * arch_suspend_disable_irqs - disable IRQs for suspend
91 *
92 * Disables IRQs (in the default case). This is a weak symbol in the common
93 * code and thus allows architectures to override it if more needs to be
94 * done. Not called for suspend to disk.
95 */
96extern void arch_suspend_disable_irqs(void);
97
98/**
99 * arch_suspend_enable_irqs - enable IRQs after suspend
100 *
101 * Enables IRQs (in the default case). This is a weak symbol in the common
102 * code and thus allows architectures to override it if more needs to be
103 * done. Not called for suspend to disk.
104 */
105extern void arch_suspend_enable_irqs(void);
106
107extern int pm_suspend(suspend_state_t state);
108#else /* !CONFIG_SUSPEND */
109#define suspend_valid_only_mem NULL
110
111static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
112static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
113#endif /* !CONFIG_SUSPEND */
12 114
13/* struct pbe is used for creating lists of pages that should be restored 115/* struct pbe is used for creating lists of pages that should be restored
14 * atomically during the resume from disk, because the page frames they have 116 * atomically during the resume from disk, because the page frames they have
@@ -24,32 +126,57 @@ struct pbe {
24extern void drain_local_pages(void); 126extern void drain_local_pages(void);
25extern void mark_free_pages(struct zone *zone); 127extern void mark_free_pages(struct zone *zone);
26 128
27#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
28extern int pm_prepare_console(void);
29extern void pm_restore_console(void);
30#else
31static inline int pm_prepare_console(void) { return 0; }
32static inline void pm_restore_console(void) {}
33#endif
34
35/** 129/**
36 * struct hibernation_ops - hibernation platform support 130 * struct platform_hibernation_ops - hibernation platform support
37 * 131 *
38 * The methods in this structure allow a platform to override the default 132 * The methods in this structure allow a platform to override the default
39 * mechanism of shutting down the machine during a hibernation transition. 133 * mechanism of shutting down the machine during a hibernation transition.
40 * 134 *
41 * All three methods must be assigned. 135 * All three methods must be assigned.
42 * 136 *
43 * @prepare: prepare system for hibernation 137 * @start: Tell the platform driver that we're starting hibernation.
44 * @enter: shut down system after state has been saved to disk 138 * Called right after shrinking memory and before freezing devices.
45 * @finish: finish/clean up after state has been reloaded 139 *
46 * @pre_restore: prepare system for the restoration from a hibernation image 140 * @pre_snapshot: Prepare the platform for creating the hibernation image.
47 * @restore_cleanup: clean up after a failing image restoration 141 * Called right after devices have been frozen and before the nonboot
142 * CPUs are disabled (runs with IRQs on).
143 *
144 * @finish: Restore the previous state of the platform after the hibernation
145 * image has been created *or* put the platform into the normal operation
146 * mode after the hibernation (the same method is executed in both cases).
147 * Called right after the nonboot CPUs have been enabled and before
148 * thawing devices (runs with IRQs on).
149 *
150 * @prepare: Prepare the platform for entering the low power state.
151 * Called right after the hibernation image has been saved and before
152 * devices are prepared for entering the low power state.
153 *
154 * @enter: Put the system into the low power state after the hibernation image
155 * has been saved to disk.
156 * Called after the nonboot CPUs have been disabled and all of the low
157 * level devices have been shut down (runs with IRQs off).
158 *
159 * @leave: Perform the first stage of the cleanup after the system sleep state
160 * indicated by @set_target() has been left.
161 * Called right after the control has been passed from the boot kernel to
162 * the image kernel, before the nonboot CPUs are enabled and before devices
163 * are resumed. Executed with interrupts disabled.
164 *
165 * @pre_restore: Prepare system for the restoration from a hibernation image.
166 * Called right after devices have been frozen and before the nonboot
167 * CPUs are disabled (runs with IRQs on).
168 *
169 * @restore_cleanup: Clean up after a failing image restoration.
170 * Called right after the nonboot CPUs have been enabled and before
171 * thawing devices (runs with IRQs on).
48 */ 172 */
49struct hibernation_ops { 173struct platform_hibernation_ops {
174 int (*start)(void);
175 int (*pre_snapshot)(void);
176 void (*finish)(void);
50 int (*prepare)(void); 177 int (*prepare)(void);
51 int (*enter)(void); 178 int (*enter)(void);
52 void (*finish)(void); 179 void (*leave)(void);
53 int (*pre_restore)(void); 180 int (*pre_restore)(void);
54 void (*restore_cleanup)(void); 181 void (*restore_cleanup)(void);
55}; 182};
@@ -70,14 +197,14 @@ extern void swsusp_set_page_free(struct page *);
70extern void swsusp_unset_page_free(struct page *); 197extern void swsusp_unset_page_free(struct page *);
71extern unsigned long get_safe_page(gfp_t gfp_mask); 198extern unsigned long get_safe_page(gfp_t gfp_mask);
72 199
73extern void hibernation_set_ops(struct hibernation_ops *ops); 200extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
74extern int hibernate(void); 201extern int hibernate(void);
75#else /* CONFIG_HIBERNATION */ 202#else /* CONFIG_HIBERNATION */
76static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } 203static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
77static inline void swsusp_set_page_free(struct page *p) {} 204static inline void swsusp_set_page_free(struct page *p) {}
78static inline void swsusp_unset_page_free(struct page *p) {} 205static inline void swsusp_unset_page_free(struct page *p) {}
79 206
80static inline void hibernation_set_ops(struct hibernation_ops *ops) {} 207static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
81static inline int hibernate(void) { return -ENOSYS; } 208static inline int hibernate(void) { return -ENOSYS; }
82#endif /* CONFIG_HIBERNATION */ 209#endif /* CONFIG_HIBERNATION */
83 210
@@ -130,4 +257,4 @@ static inline void register_nosave_region_late(unsigned long b, unsigned long e)
130} 257}
131#endif 258#endif
132 259
133#endif /* _LINUX_SWSUSP_H */ 260#endif /* _LINUX_SUSPEND_H */
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 483050c924c3..e99171f01b4c 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -238,6 +238,7 @@ enum
238 NET_LLC=18, 238 NET_LLC=18,
239 NET_NETFILTER=19, 239 NET_NETFILTER=19,
240 NET_DCCP=20, 240 NET_DCCP=20,
241 NET_IRDA=412,
241}; 242};
242 243
243/* /proc/sys/kernel/random */ 244/* /proc/sys/kernel/random */
@@ -795,6 +796,25 @@ enum {
795 NET_BRIDGE_NF_FILTER_PPPOE_TAGGED = 5, 796 NET_BRIDGE_NF_FILTER_PPPOE_TAGGED = 5,
796}; 797};
797 798
799/* proc/sys/net/irda */
800enum {
801 NET_IRDA_DISCOVERY=1,
802 NET_IRDA_DEVNAME=2,
803 NET_IRDA_DEBUG=3,
804 NET_IRDA_FAST_POLL=4,
805 NET_IRDA_DISCOVERY_SLOTS=5,
806 NET_IRDA_DISCOVERY_TIMEOUT=6,
807 NET_IRDA_SLOT_TIMEOUT=7,
808 NET_IRDA_MAX_BAUD_RATE=8,
809 NET_IRDA_MIN_TX_TURN_TIME=9,
810 NET_IRDA_MAX_TX_DATA_SIZE=10,
811 NET_IRDA_MAX_TX_WINDOW=11,
812 NET_IRDA_MAX_NOREPLY_TIME=12,
813 NET_IRDA_WARN_NOREPLY_TIME=13,
814 NET_IRDA_LAP_KEEPALIVE_TIME=14,
815};
816
817
798/* CTL_FS names: */ 818/* CTL_FS names: */
799enum 819enum
800{ 820{
@@ -937,41 +957,42 @@ extern int sysctl_perm(struct ctl_table *table, int op);
937 957
938typedef struct ctl_table ctl_table; 958typedef struct ctl_table ctl_table;
939 959
940typedef int ctl_handler (ctl_table *table, int __user *name, int nlen, 960typedef int ctl_handler (struct ctl_table *table, int __user *name, int nlen,
941 void __user *oldval, size_t __user *oldlenp, 961 void __user *oldval, size_t __user *oldlenp,
942 void __user *newval, size_t newlen); 962 void __user *newval, size_t newlen);
943 963
944typedef int proc_handler (ctl_table *ctl, int write, struct file * filp, 964typedef int proc_handler (struct ctl_table *ctl, int write, struct file * filp,
945 void __user *buffer, size_t *lenp, loff_t *ppos); 965 void __user *buffer, size_t *lenp, loff_t *ppos);
946 966
947extern int proc_dostring(ctl_table *, int, struct file *, 967extern int proc_dostring(struct ctl_table *, int, struct file *,
948 void __user *, size_t *, loff_t *); 968 void __user *, size_t *, loff_t *);
949extern int proc_dointvec(ctl_table *, int, struct file *, 969extern int proc_dointvec(struct ctl_table *, int, struct file *,
950 void __user *, size_t *, loff_t *); 970 void __user *, size_t *, loff_t *);
951extern int proc_dointvec_bset(ctl_table *, int, struct file *, 971extern int proc_dointvec_bset(struct ctl_table *, int, struct file *,
952 void __user *, size_t *, loff_t *); 972 void __user *, size_t *, loff_t *);
953extern int proc_dointvec_minmax(ctl_table *, int, struct file *, 973extern int proc_dointvec_minmax(struct ctl_table *, int, struct file *,
954 void __user *, size_t *, loff_t *); 974 void __user *, size_t *, loff_t *);
955extern int proc_dointvec_jiffies(ctl_table *, int, struct file *, 975extern int proc_dointvec_jiffies(struct ctl_table *, int, struct file *,
956 void __user *, size_t *, loff_t *); 976 void __user *, size_t *, loff_t *);
957extern int proc_dointvec_userhz_jiffies(ctl_table *, int, struct file *, 977extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int, struct file *,
958 void __user *, size_t *, loff_t *); 978 void __user *, size_t *, loff_t *);
959extern int proc_dointvec_ms_jiffies(ctl_table *, int, struct file *, 979extern int proc_dointvec_ms_jiffies(struct ctl_table *, int, struct file *,
960 void __user *, size_t *, loff_t *); 980 void __user *, size_t *, loff_t *);
961extern int proc_doulongvec_minmax(ctl_table *, int, struct file *, 981extern int proc_doulongvec_minmax(struct ctl_table *, int, struct file *,
962 void __user *, size_t *, loff_t *); 982 void __user *, size_t *, loff_t *);
963extern int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int, 983extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int,
964 struct file *, void __user *, size_t *, loff_t *); 984 struct file *, void __user *, size_t *, loff_t *);
965 985
966extern int do_sysctl (int __user *name, int nlen, 986extern int do_sysctl (int __user *name, int nlen,
967 void __user *oldval, size_t __user *oldlenp, 987 void __user *oldval, size_t __user *oldlenp,
968 void __user *newval, size_t newlen); 988 void __user *newval, size_t newlen);
969 989
970extern int do_sysctl_strategy (ctl_table *table, 990extern int do_sysctl_strategy (struct ctl_table *table,
971 int __user *name, int nlen, 991 int __user *name, int nlen,
972 void __user *oldval, size_t __user *oldlenp, 992 void __user *oldval, size_t __user *oldlenp,
973 void __user *newval, size_t newlen); 993 void __user *newval, size_t newlen);
974 994
995extern ctl_handler sysctl_data;
975extern ctl_handler sysctl_string; 996extern ctl_handler sysctl_string;
976extern ctl_handler sysctl_intvec; 997extern ctl_handler sysctl_intvec;
977extern ctl_handler sysctl_jiffies; 998extern ctl_handler sysctl_jiffies;
@@ -980,7 +1001,7 @@ extern ctl_handler sysctl_ms_jiffies;
980 1001
981/* 1002/*
982 * Register a set of sysctl names by calling register_sysctl_table 1003 * Register a set of sysctl names by calling register_sysctl_table
983 * with an initialised array of ctl_table's. An entry with zero 1004 * with an initialised array of struct ctl_table's. An entry with zero
984 * ctl_name and NULL procname terminates the table. table->de will be 1005 * ctl_name and NULL procname terminates the table. table->de will be
985 * set up by the registration and need not be initialised in advance. 1006 * set up by the registration and need not be initialised in advance.
986 * 1007 *
@@ -1026,8 +1047,8 @@ struct ctl_table
1026 void *data; 1047 void *data;
1027 int maxlen; 1048 int maxlen;
1028 mode_t mode; 1049 mode_t mode;
1029 ctl_table *child; 1050 struct ctl_table *child;
1030 ctl_table *parent; /* Automatically set */ 1051 struct ctl_table *parent; /* Automatically set */
1031 proc_handler *proc_handler; /* Callback for text formatting */ 1052 proc_handler *proc_handler; /* Callback for text formatting */
1032 ctl_handler *strategy; /* Callback function for all r/w */ 1053 ctl_handler *strategy; /* Callback function for all r/w */
1033 void *extra1; 1054 void *extra1;
@@ -1035,18 +1056,19 @@ struct ctl_table
1035}; 1056};
1036 1057
1037/* struct ctl_table_header is used to maintain dynamic lists of 1058/* struct ctl_table_header is used to maintain dynamic lists of
1038 ctl_table trees. */ 1059 struct ctl_table trees. */
1039struct ctl_table_header 1060struct ctl_table_header
1040{ 1061{
1041 ctl_table *ctl_table; 1062 struct ctl_table *ctl_table;
1042 struct list_head ctl_entry; 1063 struct list_head ctl_entry;
1043 int used; 1064 int used;
1044 struct completion *unregistering; 1065 struct completion *unregistering;
1045}; 1066};
1046 1067
1047struct ctl_table_header * register_sysctl_table(ctl_table * table); 1068struct ctl_table_header *register_sysctl_table(struct ctl_table * table);
1048 1069
1049void unregister_sysctl_table(struct ctl_table_header * table); 1070void unregister_sysctl_table(struct ctl_table_header * table);
1071int sysctl_check_table(struct ctl_table *table);
1050 1072
1051#else /* __KERNEL__ */ 1073#else /* __KERNEL__ */
1052 1074
diff --git a/include/linux/taskstats.h b/include/linux/taskstats.h
index dce1ed204972..5d69c0744fff 100644
--- a/include/linux/taskstats.h
+++ b/include/linux/taskstats.h
@@ -31,7 +31,7 @@
31 */ 31 */
32 32
33 33
34#define TASKSTATS_VERSION 5 34#define TASKSTATS_VERSION 6
35#define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN 35#define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN
36 * in linux/sched.h */ 36 * in linux/sched.h */
37 37
@@ -152,6 +152,11 @@ struct taskstats {
152 152
153 __u64 nvcsw; /* voluntary_ctxt_switches */ 153 __u64 nvcsw; /* voluntary_ctxt_switches */
154 __u64 nivcsw; /* nonvoluntary_ctxt_switches */ 154 __u64 nivcsw; /* nonvoluntary_ctxt_switches */
155
156 /* time accounting for SMT machines */
157 __u64 ac_utimescaled; /* utime scaled on frequency etc */
158 __u64 ac_stimescaled; /* stime scaled on frequency etc */
159 __u64 cpu_scaled_run_real_total; /* scaled cpu_run_real_total */
155}; 160};
156 161
157 162