diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-16 22:50:13 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-16 22:50:13 -0400 |
| commit | 517d08699b250021303f9a7cf0d758b6dc0748ed (patch) | |
| tree | 5e5b0134c3fffb78fe9d8b1641a64ff28fdd7bbc /include/linux | |
| parent | 8eeee4e2f04fc551f50c9d9847da2d73d7d33728 (diff) | |
| parent | a34601c5d84134055782ee031d58d82f5440e918 (diff) | |
Merge branch 'akpm'
* akpm: (182 commits)
fbdev: bf54x-lq043fb: use kzalloc over kmalloc/memset
fbdev: *bfin*: fix __dev{init,exit} markings
fbdev: *bfin*: drop unnecessary calls to memset
fbdev: bfin-t350mcqb-fb: drop unused local variables
fbdev: blackfin has __raw I/O accessors, so use them in fb.h
fbdev: s1d13xxxfb: add accelerated bitblt functions
tcx: use standard fields for framebuffer physical address and length
fbdev: add support for handoff from firmware to hw framebuffers
intelfb: fix a bug when changing video timing
fbdev: use framebuffer_release() for freeing fb_info structures
radeon: P2G2CLK_ALWAYS_ONb tested twice, should 2nd be P2G2CLK_DAC_ALWAYS_ONb?
s3c-fb: CPUFREQ frequency scaling support
s3c-fb: fix resource releasing on error during probing
carminefb: fix possible access beyond end of carmine_modedb[]
acornfb: remove fb_mmap function
mb862xxfb: use CONFIG_OF instead of CONFIG_PPC_OF
mb862xxfb: restrict compliation of platform driver to PPC
Samsung SoC Framebuffer driver: add Alpha Channel support
atmel-lcdc: fix pixclock upper bound detection
offb: use framebuffer_alloc() to allocate fb_info struct
...
Manually fix up conflicts due to kmemcheck in mm/slab.c
Diffstat (limited to 'include/linux')
32 files changed, 488 insertions, 194 deletions
diff --git a/include/linux/bug.h b/include/linux/bug.h index 54398d2c6d8d..d276b5510c83 100644 --- a/include/linux/bug.h +++ b/include/linux/bug.h | |||
| @@ -1,7 +1,6 @@ | |||
| 1 | #ifndef _LINUX_BUG_H | 1 | #ifndef _LINUX_BUG_H |
| 2 | #define _LINUX_BUG_H | 2 | #define _LINUX_BUG_H |
| 3 | 3 | ||
| 4 | #include <linux/module.h> | ||
| 5 | #include <asm/bug.h> | 4 | #include <asm/bug.h> |
| 6 | 5 | ||
| 7 | enum bug_trap_type { | 6 | enum bug_trap_type { |
| @@ -24,10 +23,6 @@ const struct bug_entry *find_bug(unsigned long bugaddr); | |||
| 24 | 23 | ||
| 25 | enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs); | 24 | enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs); |
| 26 | 25 | ||
| 27 | int module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *, | ||
| 28 | struct module *); | ||
| 29 | void module_bug_cleanup(struct module *); | ||
| 30 | |||
| 31 | /* These are defined by the architecture */ | 26 | /* These are defined by the architecture */ |
| 32 | int is_valid_bugaddr(unsigned long addr); | 27 | int is_valid_bugaddr(unsigned long addr); |
| 33 | 28 | ||
| @@ -38,13 +33,6 @@ static inline enum bug_trap_type report_bug(unsigned long bug_addr, | |||
| 38 | { | 33 | { |
| 39 | return BUG_TRAP_TYPE_BUG; | 34 | return BUG_TRAP_TYPE_BUG; |
| 40 | } | 35 | } |
| 41 | static inline int module_bug_finalize(const Elf_Ehdr *hdr, | ||
| 42 | const Elf_Shdr *sechdrs, | ||
| 43 | struct module *mod) | ||
| 44 | { | ||
| 45 | return 0; | ||
| 46 | } | ||
| 47 | static inline void module_bug_cleanup(struct module *mod) {} | ||
| 48 | 36 | ||
| 49 | #endif /* CONFIG_GENERIC_BUG */ | 37 | #endif /* CONFIG_GENERIC_BUG */ |
| 50 | #endif /* _LINUX_BUG_H */ | 38 | #endif /* _LINUX_BUG_H */ |
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 05ea1dd7d681..a5740fc4d04b 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h | |||
| @@ -18,7 +18,6 @@ | |||
| 18 | 18 | ||
| 19 | extern int number_of_cpusets; /* How many cpusets are defined in system? */ | 19 | extern int number_of_cpusets; /* How many cpusets are defined in system? */ |
| 20 | 20 | ||
| 21 | extern int cpuset_init_early(void); | ||
| 22 | extern int cpuset_init(void); | 21 | extern int cpuset_init(void); |
| 23 | extern void cpuset_init_smp(void); | 22 | extern void cpuset_init_smp(void); |
| 24 | extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); | 23 | extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); |
| @@ -27,7 +26,6 @@ extern void cpuset_cpus_allowed_locked(struct task_struct *p, | |||
| 27 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); | 26 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); |
| 28 | #define cpuset_current_mems_allowed (current->mems_allowed) | 27 | #define cpuset_current_mems_allowed (current->mems_allowed) |
| 29 | void cpuset_init_current_mems_allowed(void); | 28 | void cpuset_init_current_mems_allowed(void); |
| 30 | void cpuset_update_task_memory_state(void); | ||
| 31 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); | 29 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); |
| 32 | 30 | ||
| 33 | extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask); | 31 | extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask); |
| @@ -92,9 +90,13 @@ extern void rebuild_sched_domains(void); | |||
| 92 | 90 | ||
| 93 | extern void cpuset_print_task_mems_allowed(struct task_struct *p); | 91 | extern void cpuset_print_task_mems_allowed(struct task_struct *p); |
| 94 | 92 | ||
| 93 | static inline void set_mems_allowed(nodemask_t nodemask) | ||
| 94 | { | ||
| 95 | current->mems_allowed = nodemask; | ||
| 96 | } | ||
| 97 | |||
| 95 | #else /* !CONFIG_CPUSETS */ | 98 | #else /* !CONFIG_CPUSETS */ |
| 96 | 99 | ||
| 97 | static inline int cpuset_init_early(void) { return 0; } | ||
| 98 | static inline int cpuset_init(void) { return 0; } | 100 | static inline int cpuset_init(void) { return 0; } |
| 99 | static inline void cpuset_init_smp(void) {} | 101 | static inline void cpuset_init_smp(void) {} |
| 100 | 102 | ||
| @@ -116,7 +118,6 @@ static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) | |||
| 116 | 118 | ||
| 117 | #define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY]) | 119 | #define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY]) |
| 118 | static inline void cpuset_init_current_mems_allowed(void) {} | 120 | static inline void cpuset_init_current_mems_allowed(void) {} |
| 119 | static inline void cpuset_update_task_memory_state(void) {} | ||
| 120 | 121 | ||
| 121 | static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) | 122 | static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) |
| 122 | { | 123 | { |
| @@ -188,6 +189,10 @@ static inline void cpuset_print_task_mems_allowed(struct task_struct *p) | |||
| 188 | { | 189 | { |
| 189 | } | 190 | } |
| 190 | 191 | ||
| 192 | static inline void set_mems_allowed(nodemask_t nodemask) | ||
| 193 | { | ||
| 194 | } | ||
| 195 | |||
| 191 | #endif /* !CONFIG_CPUSETS */ | 196 | #endif /* !CONFIG_CPUSETS */ |
| 192 | 197 | ||
| 193 | #endif /* _LINUX_CPUSET_H */ | 198 | #endif /* _LINUX_CPUSET_H */ |
diff --git a/include/linux/fb.h b/include/linux/fb.h index 330c4b1bfcaa..dd68358996b7 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h | |||
| @@ -677,6 +677,9 @@ struct fb_ops { | |||
| 677 | /* get capability given var */ | 677 | /* get capability given var */ |
| 678 | void (*fb_get_caps)(struct fb_info *info, struct fb_blit_caps *caps, | 678 | void (*fb_get_caps)(struct fb_info *info, struct fb_blit_caps *caps, |
| 679 | struct fb_var_screeninfo *var); | 679 | struct fb_var_screeninfo *var); |
| 680 | |||
| 681 | /* teardown any resources to do with this framebuffer */ | ||
| 682 | void (*fb_destroy)(struct fb_info *info); | ||
| 680 | }; | 683 | }; |
| 681 | 684 | ||
| 682 | #ifdef CONFIG_FB_TILEBLITTING | 685 | #ifdef CONFIG_FB_TILEBLITTING |
| @@ -786,6 +789,8 @@ struct fb_tile_ops { | |||
| 786 | #define FBINFO_MISC_USEREVENT 0x10000 /* event request | 789 | #define FBINFO_MISC_USEREVENT 0x10000 /* event request |
| 787 | from userspace */ | 790 | from userspace */ |
| 788 | #define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */ | 791 | #define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */ |
| 792 | #define FBINFO_MISC_FIRMWARE 0x40000 /* a replaceable firmware | ||
| 793 | inited framebuffer */ | ||
| 789 | 794 | ||
| 790 | /* A driver may set this flag to indicate that it does want a set_par to be | 795 | /* A driver may set this flag to indicate that it does want a set_par to be |
| 791 | * called every time when fbcon_switch is executed. The advantage is that with | 796 | * called every time when fbcon_switch is executed. The advantage is that with |
| @@ -854,7 +859,12 @@ struct fb_info { | |||
| 854 | u32 state; /* Hardware state i.e suspend */ | 859 | u32 state; /* Hardware state i.e suspend */ |
| 855 | void *fbcon_par; /* fbcon use-only private area */ | 860 | void *fbcon_par; /* fbcon use-only private area */ |
| 856 | /* From here on everything is device dependent */ | 861 | /* From here on everything is device dependent */ |
| 857 | void *par; | 862 | void *par; |
| 863 | /* we need the PCI or similiar aperture base/size not | ||
| 864 | smem_start/size as smem_start may just be an object | ||
| 865 | allocated inside the aperture so may not actually overlap */ | ||
| 866 | resource_size_t aperture_base; | ||
| 867 | resource_size_t aperture_size; | ||
| 858 | }; | 868 | }; |
| 859 | 869 | ||
| 860 | #ifdef MODULE | 870 | #ifdef MODULE |
| @@ -893,7 +903,7 @@ struct fb_info { | |||
| 893 | #define fb_writeq sbus_writeq | 903 | #define fb_writeq sbus_writeq |
| 894 | #define fb_memset sbus_memset_io | 904 | #define fb_memset sbus_memset_io |
| 895 | 905 | ||
| 896 | #elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) | 906 | #elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) || defined(__bfin__) |
| 897 | 907 | ||
| 898 | #define fb_readb __raw_readb | 908 | #define fb_readb __raw_readb |
| 899 | #define fb_readw __raw_readw | 909 | #define fb_readw __raw_readw |
diff --git a/include/linux/firmware-map.h b/include/linux/firmware-map.h index cca686b39123..875451f1373a 100644 --- a/include/linux/firmware-map.h +++ b/include/linux/firmware-map.h | |||
| @@ -24,21 +24,17 @@ | |||
| 24 | */ | 24 | */ |
| 25 | #ifdef CONFIG_FIRMWARE_MEMMAP | 25 | #ifdef CONFIG_FIRMWARE_MEMMAP |
| 26 | 26 | ||
| 27 | int firmware_map_add(resource_size_t start, resource_size_t end, | 27 | int firmware_map_add(u64 start, u64 end, const char *type); |
| 28 | const char *type); | 28 | int firmware_map_add_early(u64 start, u64 end, const char *type); |
| 29 | int firmware_map_add_early(resource_size_t start, resource_size_t end, | ||
| 30 | const char *type); | ||
| 31 | 29 | ||
| 32 | #else /* CONFIG_FIRMWARE_MEMMAP */ | 30 | #else /* CONFIG_FIRMWARE_MEMMAP */ |
| 33 | 31 | ||
| 34 | static inline int firmware_map_add(resource_size_t start, resource_size_t end, | 32 | static inline int firmware_map_add(u64 start, u64 end, const char *type) |
| 35 | const char *type) | ||
| 36 | { | 33 | { |
| 37 | return 0; | 34 | return 0; |
| 38 | } | 35 | } |
| 39 | 36 | ||
| 40 | static inline int firmware_map_add_early(resource_size_t start, | 37 | static inline int firmware_map_add_early(u64 start, u64 end, const char *type) |
| 41 | resource_size_t end, const char *type) | ||
| 42 | { | 38 | { |
| 43 | return 0; | 39 | return 0; |
| 44 | } | 40 | } |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 6d12174fbe11..74a57938c880 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -879,7 +879,7 @@ struct file_ra_state { | |||
| 879 | there are only # of pages ahead */ | 879 | there are only # of pages ahead */ |
| 880 | 880 | ||
| 881 | unsigned int ra_pages; /* Maximum readahead window */ | 881 | unsigned int ra_pages; /* Maximum readahead window */ |
| 882 | int mmap_miss; /* Cache miss stat for mmap accesses */ | 882 | unsigned int mmap_miss; /* Cache miss stat for mmap accesses */ |
| 883 | loff_t prev_pos; /* Cache last read() position */ | 883 | loff_t prev_pos; /* Cache last read() position */ |
| 884 | }; | 884 | }; |
| 885 | 885 | ||
| @@ -2037,9 +2037,6 @@ extern int __invalidate_device(struct block_device *); | |||
| 2037 | extern int invalidate_partition(struct gendisk *, int); | 2037 | extern int invalidate_partition(struct gendisk *, int); |
| 2038 | #endif | 2038 | #endif |
| 2039 | extern int invalidate_inodes(struct super_block *); | 2039 | extern int invalidate_inodes(struct super_block *); |
| 2040 | unsigned long __invalidate_mapping_pages(struct address_space *mapping, | ||
| 2041 | pgoff_t start, pgoff_t end, | ||
| 2042 | bool be_atomic); | ||
| 2043 | unsigned long invalidate_mapping_pages(struct address_space *mapping, | 2040 | unsigned long invalidate_mapping_pages(struct address_space *mapping, |
| 2044 | pgoff_t start, pgoff_t end); | 2041 | pgoff_t start, pgoff_t end); |
| 2045 | 2042 | ||
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 80e14b8c2e78..cfdb35d71bca 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | #include <linux/stddef.h> | 5 | #include <linux/stddef.h> |
| 6 | #include <linux/linkage.h> | 6 | #include <linux/linkage.h> |
| 7 | #include <linux/topology.h> | 7 | #include <linux/topology.h> |
| 8 | #include <linux/mmdebug.h> | ||
| 8 | 9 | ||
| 9 | struct vm_area_struct; | 10 | struct vm_area_struct; |
| 10 | 11 | ||
| @@ -20,7 +21,8 @@ struct vm_area_struct; | |||
| 20 | #define __GFP_DMA ((__force gfp_t)0x01u) | 21 | #define __GFP_DMA ((__force gfp_t)0x01u) |
| 21 | #define __GFP_HIGHMEM ((__force gfp_t)0x02u) | 22 | #define __GFP_HIGHMEM ((__force gfp_t)0x02u) |
| 22 | #define __GFP_DMA32 ((__force gfp_t)0x04u) | 23 | #define __GFP_DMA32 ((__force gfp_t)0x04u) |
| 23 | 24 | #define __GFP_MOVABLE ((__force gfp_t)0x08u) /* Page is movable */ | |
| 25 | #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) | ||
| 24 | /* | 26 | /* |
| 25 | * Action modifiers - doesn't change the zoning | 27 | * Action modifiers - doesn't change the zoning |
| 26 | * | 28 | * |
| @@ -50,7 +52,6 @@ struct vm_area_struct; | |||
| 50 | #define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */ | 52 | #define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */ |
| 51 | #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ | 53 | #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ |
| 52 | #define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */ | 54 | #define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */ |
| 53 | #define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */ | ||
| 54 | 55 | ||
| 55 | #ifdef CONFIG_KMEMCHECK | 56 | #ifdef CONFIG_KMEMCHECK |
| 56 | #define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */ | 57 | #define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */ |
| @@ -127,24 +128,105 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags) | |||
| 127 | ((gfp_flags & __GFP_RECLAIMABLE) != 0); | 128 | ((gfp_flags & __GFP_RECLAIMABLE) != 0); |
| 128 | } | 129 | } |
| 129 | 130 | ||
| 130 | static inline enum zone_type gfp_zone(gfp_t flags) | 131 | #ifdef CONFIG_HIGHMEM |
| 131 | { | 132 | #define OPT_ZONE_HIGHMEM ZONE_HIGHMEM |
| 133 | #else | ||
| 134 | #define OPT_ZONE_HIGHMEM ZONE_NORMAL | ||
| 135 | #endif | ||
| 136 | |||
| 132 | #ifdef CONFIG_ZONE_DMA | 137 | #ifdef CONFIG_ZONE_DMA |
| 133 | if (flags & __GFP_DMA) | 138 | #define OPT_ZONE_DMA ZONE_DMA |
| 134 | return ZONE_DMA; | 139 | #else |
| 140 | #define OPT_ZONE_DMA ZONE_NORMAL | ||
| 135 | #endif | 141 | #endif |
| 142 | |||
| 136 | #ifdef CONFIG_ZONE_DMA32 | 143 | #ifdef CONFIG_ZONE_DMA32 |
| 137 | if (flags & __GFP_DMA32) | 144 | #define OPT_ZONE_DMA32 ZONE_DMA32 |
| 138 | return ZONE_DMA32; | 145 | #else |
| 146 | #define OPT_ZONE_DMA32 ZONE_NORMAL | ||
| 139 | #endif | 147 | #endif |
| 140 | if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) == | 148 | |
| 141 | (__GFP_HIGHMEM | __GFP_MOVABLE)) | 149 | /* |
| 142 | return ZONE_MOVABLE; | 150 | * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the |
| 143 | #ifdef CONFIG_HIGHMEM | 151 | * zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long |
| 144 | if (flags & __GFP_HIGHMEM) | 152 | * and there are 16 of them to cover all possible combinations of |
| 145 | return ZONE_HIGHMEM; | 153 | * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM |
| 154 | * | ||
| 155 | * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. | ||
| 156 | * But GFP_MOVABLE is not only a zone specifier but also an allocation | ||
| 157 | * policy. Therefore __GFP_MOVABLE plus another zone selector is valid. | ||
| 158 | * Only 1bit of the lowest 3 bit (DMA,DMA32,HIGHMEM) can be set to "1". | ||
| 159 | * | ||
| 160 | * bit result | ||
| 161 | * ================= | ||
| 162 | * 0x0 => NORMAL | ||
| 163 | * 0x1 => DMA or NORMAL | ||
| 164 | * 0x2 => HIGHMEM or NORMAL | ||
| 165 | * 0x3 => BAD (DMA+HIGHMEM) | ||
| 166 | * 0x4 => DMA32 or DMA or NORMAL | ||
| 167 | * 0x5 => BAD (DMA+DMA32) | ||
| 168 | * 0x6 => BAD (HIGHMEM+DMA32) | ||
| 169 | * 0x7 => BAD (HIGHMEM+DMA32+DMA) | ||
| 170 | * 0x8 => NORMAL (MOVABLE+0) | ||
| 171 | * 0x9 => DMA or NORMAL (MOVABLE+DMA) | ||
| 172 | * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too) | ||
| 173 | * 0xb => BAD (MOVABLE+HIGHMEM+DMA) | ||
| 174 | * 0xc => DMA32 (MOVABLE+HIGHMEM+DMA32) | ||
| 175 | * 0xd => BAD (MOVABLE+DMA32+DMA) | ||
| 176 | * 0xe => BAD (MOVABLE+DMA32+HIGHMEM) | ||
| 177 | * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA) | ||
| 178 | * | ||
| 179 | * ZONES_SHIFT must be <= 2 on 32 bit platforms. | ||
| 180 | */ | ||
| 181 | |||
| 182 | #if 16 * ZONES_SHIFT > BITS_PER_LONG | ||
| 183 | #error ZONES_SHIFT too large to create GFP_ZONE_TABLE integer | ||
| 184 | #endif | ||
| 185 | |||
| 186 | #define GFP_ZONE_TABLE ( \ | ||
| 187 | (ZONE_NORMAL << 0 * ZONES_SHIFT) \ | ||
| 188 | | (OPT_ZONE_DMA << __GFP_DMA * ZONES_SHIFT) \ | ||
| 189 | | (OPT_ZONE_HIGHMEM << __GFP_HIGHMEM * ZONES_SHIFT) \ | ||
| 190 | | (OPT_ZONE_DMA32 << __GFP_DMA32 * ZONES_SHIFT) \ | ||
| 191 | | (ZONE_NORMAL << __GFP_MOVABLE * ZONES_SHIFT) \ | ||
| 192 | | (OPT_ZONE_DMA << (__GFP_MOVABLE | __GFP_DMA) * ZONES_SHIFT) \ | ||
| 193 | | (ZONE_MOVABLE << (__GFP_MOVABLE | __GFP_HIGHMEM) * ZONES_SHIFT)\ | ||
| 194 | | (OPT_ZONE_DMA32 << (__GFP_MOVABLE | __GFP_DMA32) * ZONES_SHIFT)\ | ||
| 195 | ) | ||
| 196 | |||
| 197 | /* | ||
| 198 | * GFP_ZONE_BAD is a bitmap for all combination of __GFP_DMA, __GFP_DMA32 | ||
| 199 | * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per | ||
| 200 | * entry starting with bit 0. Bit is set if the combination is not | ||
| 201 | * allowed. | ||
| 202 | */ | ||
| 203 | #define GFP_ZONE_BAD ( \ | ||
| 204 | 1 << (__GFP_DMA | __GFP_HIGHMEM) \ | ||
| 205 | | 1 << (__GFP_DMA | __GFP_DMA32) \ | ||
| 206 | | 1 << (__GFP_DMA32 | __GFP_HIGHMEM) \ | ||
| 207 | | 1 << (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM) \ | ||
| 208 | | 1 << (__GFP_MOVABLE | __GFP_HIGHMEM | __GFP_DMA) \ | ||
| 209 | | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_DMA) \ | ||
| 210 | | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_HIGHMEM) \ | ||
| 211 | | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_DMA | __GFP_HIGHMEM)\ | ||
| 212 | ) | ||
| 213 | |||
| 214 | static inline enum zone_type gfp_zone(gfp_t flags) | ||
| 215 | { | ||
| 216 | enum zone_type z; | ||
| 217 | int bit = flags & GFP_ZONEMASK; | ||
| 218 | |||
| 219 | z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) & | ||
| 220 | ((1 << ZONES_SHIFT) - 1); | ||
| 221 | |||
| 222 | if (__builtin_constant_p(bit)) | ||
| 223 | BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1); | ||
| 224 | else { | ||
| 225 | #ifdef CONFIG_DEBUG_VM | ||
| 226 | BUG_ON((GFP_ZONE_BAD >> bit) & 1); | ||
| 146 | #endif | 227 | #endif |
| 147 | return ZONE_NORMAL; | 228 | } |
| 229 | return z; | ||
| 148 | } | 230 | } |
| 149 | 231 | ||
| 150 | /* | 232 | /* |
| @@ -184,30 +266,19 @@ static inline void arch_alloc_page(struct page *page, int order) { } | |||
| 184 | #endif | 266 | #endif |
| 185 | 267 | ||
| 186 | struct page * | 268 | struct page * |
| 187 | __alloc_pages_internal(gfp_t gfp_mask, unsigned int order, | 269 | __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, |
| 188 | struct zonelist *zonelist, nodemask_t *nodemask); | 270 | struct zonelist *zonelist, nodemask_t *nodemask); |
| 189 | 271 | ||
| 190 | static inline struct page * | 272 | static inline struct page * |
| 191 | __alloc_pages(gfp_t gfp_mask, unsigned int order, | 273 | __alloc_pages(gfp_t gfp_mask, unsigned int order, |
| 192 | struct zonelist *zonelist) | 274 | struct zonelist *zonelist) |
| 193 | { | 275 | { |
| 194 | return __alloc_pages_internal(gfp_mask, order, zonelist, NULL); | 276 | return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL); |
| 195 | } | 277 | } |
| 196 | 278 | ||
| 197 | static inline struct page * | ||
| 198 | __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, | ||
| 199 | struct zonelist *zonelist, nodemask_t *nodemask) | ||
| 200 | { | ||
| 201 | return __alloc_pages_internal(gfp_mask, order, zonelist, nodemask); | ||
| 202 | } | ||
| 203 | |||
| 204 | |||
| 205 | static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, | 279 | static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, |
| 206 | unsigned int order) | 280 | unsigned int order) |
| 207 | { | 281 | { |
| 208 | if (unlikely(order >= MAX_ORDER)) | ||
| 209 | return NULL; | ||
| 210 | |||
| 211 | /* Unknown node is current node */ | 282 | /* Unknown node is current node */ |
| 212 | if (nid < 0) | 283 | if (nid < 0) |
| 213 | nid = numa_node_id(); | 284 | nid = numa_node_id(); |
| @@ -215,15 +286,20 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, | |||
| 215 | return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); | 286 | return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); |
| 216 | } | 287 | } |
| 217 | 288 | ||
| 289 | static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask, | ||
| 290 | unsigned int order) | ||
| 291 | { | ||
| 292 | VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); | ||
| 293 | |||
| 294 | return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); | ||
| 295 | } | ||
| 296 | |||
| 218 | #ifdef CONFIG_NUMA | 297 | #ifdef CONFIG_NUMA |
| 219 | extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); | 298 | extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); |
| 220 | 299 | ||
| 221 | static inline struct page * | 300 | static inline struct page * |
| 222 | alloc_pages(gfp_t gfp_mask, unsigned int order) | 301 | alloc_pages(gfp_t gfp_mask, unsigned int order) |
| 223 | { | 302 | { |
| 224 | if (unlikely(order >= MAX_ORDER)) | ||
| 225 | return NULL; | ||
| 226 | |||
| 227 | return alloc_pages_current(gfp_mask, order); | 303 | return alloc_pages_current(gfp_mask, order); |
| 228 | } | 304 | } |
| 229 | extern struct page *alloc_page_vma(gfp_t gfp_mask, | 305 | extern struct page *alloc_page_vma(gfp_t gfp_mask, |
| @@ -260,4 +336,16 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); | |||
| 260 | void drain_all_pages(void); | 336 | void drain_all_pages(void); |
| 261 | void drain_local_pages(void *dummy); | 337 | void drain_local_pages(void *dummy); |
| 262 | 338 | ||
| 339 | extern bool oom_killer_disabled; | ||
| 340 | |||
| 341 | static inline void oom_killer_disable(void) | ||
| 342 | { | ||
| 343 | oom_killer_disabled = true; | ||
| 344 | } | ||
| 345 | |||
| 346 | static inline void oom_killer_enable(void) | ||
| 347 | { | ||
| 348 | oom_killer_disabled = false; | ||
| 349 | } | ||
| 350 | |||
| 263 | #endif /* __LINUX_GFP_H */ | 351 | #endif /* __LINUX_GFP_H */ |
diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 1fcb7126a01f..211ff4497269 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h | |||
| @@ -55,7 +55,9 @@ static inline void *kmap(struct page *page) | |||
| 55 | return page_address(page); | 55 | return page_address(page); |
| 56 | } | 56 | } |
| 57 | 57 | ||
| 58 | #define kunmap(page) do { (void) (page); } while (0) | 58 | static inline void kunmap(struct page *page) |
| 59 | { | ||
| 60 | } | ||
| 59 | 61 | ||
| 60 | static inline void *kmap_atomic(struct page *page, enum km_type idx) | 62 | static inline void *kmap_atomic(struct page *page, enum km_type idx) |
| 61 | { | 63 | { |
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 03be7f29ca01..a05a5ef33391 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
| @@ -11,6 +11,8 @@ | |||
| 11 | 11 | ||
| 12 | struct ctl_table; | 12 | struct ctl_table; |
| 13 | 13 | ||
| 14 | int PageHuge(struct page *page); | ||
| 15 | |||
| 14 | static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) | 16 | static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) |
| 15 | { | 17 | { |
| 16 | return vma->vm_flags & VM_HUGETLB; | 18 | return vma->vm_flags & VM_HUGETLB; |
| @@ -61,6 +63,11 @@ void hugetlb_change_protection(struct vm_area_struct *vma, | |||
| 61 | 63 | ||
| 62 | #else /* !CONFIG_HUGETLB_PAGE */ | 64 | #else /* !CONFIG_HUGETLB_PAGE */ |
| 63 | 65 | ||
| 66 | static inline int PageHuge(struct page *page) | ||
| 67 | { | ||
| 68 | return 0; | ||
| 69 | } | ||
| 70 | |||
| 64 | static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) | 71 | static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) |
| 65 | { | 72 | { |
| 66 | return 0; | 73 | return 0; |
diff --git a/include/linux/init.h b/include/linux/init.h index b2189803f19a..8c2c9989626d 100644 --- a/include/linux/init.h +++ b/include/linux/init.h | |||
| @@ -29,7 +29,7 @@ | |||
| 29 | * sign followed by value, e.g.: | 29 | * sign followed by value, e.g.: |
| 30 | * | 30 | * |
| 31 | * static int init_variable __initdata = 0; | 31 | * static int init_variable __initdata = 0; |
| 32 | * static char linux_logo[] __initdata = { 0x32, 0x36, ... }; | 32 | * static const char linux_logo[] __initconst = { 0x32, 0x36, ... }; |
| 33 | * | 33 | * |
| 34 | * Don't forget to initialize data not at file scope, i.e. within a function, | 34 | * Don't forget to initialize data not at file scope, i.e. within a function, |
| 35 | * as gcc otherwise puts the data into the bss section and not into the init | 35 | * as gcc otherwise puts the data into the bss section and not into the init |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 28b1f30601b5..5368fbdc7801 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
| @@ -15,18 +15,6 @@ | |||
| 15 | extern struct files_struct init_files; | 15 | extern struct files_struct init_files; |
| 16 | extern struct fs_struct init_fs; | 16 | extern struct fs_struct init_fs; |
| 17 | 17 | ||
| 18 | #define INIT_MM(name) \ | ||
| 19 | { \ | ||
| 20 | .mm_rb = RB_ROOT, \ | ||
| 21 | .pgd = swapper_pg_dir, \ | ||
| 22 | .mm_users = ATOMIC_INIT(2), \ | ||
| 23 | .mm_count = ATOMIC_INIT(1), \ | ||
| 24 | .mmap_sem = __RWSEM_INITIALIZER(name.mmap_sem), \ | ||
| 25 | .page_table_lock = __SPIN_LOCK_UNLOCKED(name.page_table_lock), \ | ||
| 26 | .mmlist = LIST_HEAD_INIT(name.mmlist), \ | ||
| 27 | .cpu_vm_mask = CPU_MASK_ALL, \ | ||
| 28 | } | ||
| 29 | |||
| 30 | #define INIT_SIGNALS(sig) { \ | 18 | #define INIT_SIGNALS(sig) { \ |
| 31 | .count = ATOMIC_INIT(1), \ | 19 | .count = ATOMIC_INIT(1), \ |
| 32 | .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\ | 20 | .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\ |
diff --git a/include/linux/linux_logo.h b/include/linux/linux_logo.h index 08a92969c76e..ca5bd91d12e1 100644 --- a/include/linux/linux_logo.h +++ b/include/linux/linux_logo.h | |||
| @@ -32,6 +32,22 @@ struct linux_logo { | |||
| 32 | const unsigned char *data; | 32 | const unsigned char *data; |
| 33 | }; | 33 | }; |
| 34 | 34 | ||
| 35 | extern const struct linux_logo logo_linux_mono; | ||
| 36 | extern const struct linux_logo logo_linux_vga16; | ||
| 37 | extern const struct linux_logo logo_linux_clut224; | ||
| 38 | extern const struct linux_logo logo_blackfin_vga16; | ||
| 39 | extern const struct linux_logo logo_blackfin_clut224; | ||
| 40 | extern const struct linux_logo logo_dec_clut224; | ||
| 41 | extern const struct linux_logo logo_mac_clut224; | ||
| 42 | extern const struct linux_logo logo_parisc_clut224; | ||
| 43 | extern const struct linux_logo logo_sgi_clut224; | ||
| 44 | extern const struct linux_logo logo_sun_clut224; | ||
| 45 | extern const struct linux_logo logo_superh_mono; | ||
| 46 | extern const struct linux_logo logo_superh_vga16; | ||
| 47 | extern const struct linux_logo logo_superh_clut224; | ||
| 48 | extern const struct linux_logo logo_m32r_clut224; | ||
| 49 | extern const struct linux_logo logo_spe_clut224; | ||
| 50 | |||
| 35 | extern const struct linux_logo *fb_find_logo(int depth); | 51 | extern const struct linux_logo *fb_find_logo(int depth); |
| 36 | #ifdef CONFIG_FB_LOGO_EXTRA | 52 | #ifdef CONFIG_FB_LOGO_EXTRA |
| 37 | extern void fb_append_extra_logo(const struct linux_logo *logo, | 53 | extern void fb_append_extra_logo(const struct linux_logo *logo, |
diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h new file mode 100644 index 000000000000..ad651f4e45ac --- /dev/null +++ b/include/linux/lis3lv02d.h | |||
| @@ -0,0 +1,39 @@ | |||
| 1 | #ifndef __LIS3LV02D_H_ | ||
| 2 | #define __LIS3LV02D_H_ | ||
| 3 | |||
| 4 | struct lis3lv02d_platform_data { | ||
| 5 | /* please note: the 'click' feature is only supported for | ||
| 6 | * LIS[32]02DL variants of the chip and will be ignored for | ||
| 7 | * others */ | ||
| 8 | #define LIS3_CLICK_SINGLE_X (1 << 0) | ||
| 9 | #define LIS3_CLICK_DOUBLE_X (1 << 1) | ||
| 10 | #define LIS3_CLICK_SINGLE_Y (1 << 2) | ||
| 11 | #define LIS3_CLICK_DOUBLE_Y (1 << 3) | ||
| 12 | #define LIS3_CLICK_SINGLE_Z (1 << 4) | ||
| 13 | #define LIS3_CLICK_DOUBLE_Z (1 << 5) | ||
| 14 | unsigned char click_flags; | ||
| 15 | unsigned char click_thresh_x; | ||
| 16 | unsigned char click_thresh_y; | ||
| 17 | unsigned char click_thresh_z; | ||
| 18 | unsigned char click_time_limit; | ||
| 19 | unsigned char click_latency; | ||
| 20 | unsigned char click_window; | ||
| 21 | |||
| 22 | #define LIS3_IRQ1_DISABLE (0 << 0) | ||
| 23 | #define LIS3_IRQ1_FF_WU_1 (1 << 0) | ||
| 24 | #define LIS3_IRQ1_FF_WU_2 (2 << 0) | ||
| 25 | #define LIS3_IRQ1_FF_WU_12 (3 << 0) | ||
| 26 | #define LIS3_IRQ1_DATA_READY (4 << 0) | ||
| 27 | #define LIS3_IRQ1_CLICK (7 << 0) | ||
| 28 | #define LIS3_IRQ2_DISABLE (0 << 3) | ||
| 29 | #define LIS3_IRQ2_FF_WU_1 (1 << 3) | ||
| 30 | #define LIS3_IRQ2_FF_WU_2 (2 << 3) | ||
| 31 | #define LIS3_IRQ2_FF_WU_12 (3 << 3) | ||
| 32 | #define LIS3_IRQ2_DATA_READY (4 << 3) | ||
| 33 | #define LIS3_IRQ2_CLICK (7 << 3) | ||
| 34 | #define LIS3_IRQ_OPEN_DRAIN (1 << 6) | ||
| 35 | #define LIS3_IRQ_ACTIVE_HIGH (1 << 7) | ||
| 36 | unsigned char irq_cfg; | ||
| 37 | }; | ||
| 38 | |||
| 39 | #endif /* __LIS3LV02D_H_ */ | ||
diff --git a/include/linux/major.h b/include/linux/major.h index 058ec15dd060..6a8ca98c9a96 100644 --- a/include/linux/major.h +++ b/include/linux/major.h | |||
| @@ -145,6 +145,7 @@ | |||
| 145 | #define UNIX98_PTY_MAJOR_COUNT 8 | 145 | #define UNIX98_PTY_MAJOR_COUNT 8 |
| 146 | #define UNIX98_PTY_SLAVE_MAJOR (UNIX98_PTY_MASTER_MAJOR+UNIX98_PTY_MAJOR_COUNT) | 146 | #define UNIX98_PTY_SLAVE_MAJOR (UNIX98_PTY_MASTER_MAJOR+UNIX98_PTY_MAJOR_COUNT) |
| 147 | 147 | ||
| 148 | #define DRBD_MAJOR 147 | ||
| 148 | #define RTF_MAJOR 150 | 149 | #define RTF_MAJOR 150 |
| 149 | #define RAW_MAJOR 162 | 150 | #define RAW_MAJOR 162 |
| 150 | 151 | ||
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 25b9ca93d232..45add35dda1b 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
| @@ -94,6 +94,7 @@ extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, | |||
| 94 | extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, | 94 | extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, |
| 95 | int priority); | 95 | int priority); |
| 96 | int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg); | 96 | int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg); |
| 97 | int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg); | ||
| 97 | unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, | 98 | unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, |
| 98 | struct zone *zone, | 99 | struct zone *zone, |
| 99 | enum lru_list lru); | 100 | enum lru_list lru); |
| @@ -239,6 +240,12 @@ mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg) | |||
| 239 | return 1; | 240 | return 1; |
| 240 | } | 241 | } |
| 241 | 242 | ||
| 243 | static inline int | ||
| 244 | mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg) | ||
| 245 | { | ||
| 246 | return 1; | ||
| 247 | } | ||
| 248 | |||
| 242 | static inline unsigned long | 249 | static inline unsigned long |
| 243 | mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone, | 250 | mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone, |
| 244 | enum lru_list lru) | 251 | enum lru_list lru) |
diff --git a/include/linux/mm.h b/include/linux/mm.h index ad613ed66ab0..d88d6fc530ad 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -7,7 +7,6 @@ | |||
| 7 | 7 | ||
| 8 | #include <linux/gfp.h> | 8 | #include <linux/gfp.h> |
| 9 | #include <linux/list.h> | 9 | #include <linux/list.h> |
| 10 | #include <linux/mmdebug.h> | ||
| 11 | #include <linux/mmzone.h> | 10 | #include <linux/mmzone.h> |
| 12 | #include <linux/rbtree.h> | 11 | #include <linux/rbtree.h> |
| 13 | #include <linux/prio_tree.h> | 12 | #include <linux/prio_tree.h> |
| @@ -725,7 +724,7 @@ static inline int shmem_lock(struct file *file, int lock, | |||
| 725 | return 0; | 724 | return 0; |
| 726 | } | 725 | } |
| 727 | #endif | 726 | #endif |
| 728 | struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags); | 727 | struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags); |
| 729 | 728 | ||
| 730 | int shmem_zero_setup(struct vm_area_struct *); | 729 | int shmem_zero_setup(struct vm_area_struct *); |
| 731 | 730 | ||
| @@ -793,6 +792,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src, | |||
| 793 | struct vm_area_struct *vma); | 792 | struct vm_area_struct *vma); |
| 794 | void unmap_mapping_range(struct address_space *mapping, | 793 | void unmap_mapping_range(struct address_space *mapping, |
| 795 | loff_t const holebegin, loff_t const holelen, int even_cows); | 794 | loff_t const holebegin, loff_t const holelen, int even_cows); |
| 795 | int follow_pfn(struct vm_area_struct *vma, unsigned long address, | ||
| 796 | unsigned long *pfn); | ||
| 796 | int follow_phys(struct vm_area_struct *vma, unsigned long address, | 797 | int follow_phys(struct vm_area_struct *vma, unsigned long address, |
| 797 | unsigned int flags, unsigned long *prot, resource_size_t *phys); | 798 | unsigned int flags, unsigned long *prot, resource_size_t *phys); |
| 798 | int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, | 799 | int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, |
| @@ -824,8 +825,11 @@ static inline int handle_mm_fault(struct mm_struct *mm, | |||
| 824 | extern int make_pages_present(unsigned long addr, unsigned long end); | 825 | extern int make_pages_present(unsigned long addr, unsigned long end); |
| 825 | extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); | 826 | extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); |
| 826 | 827 | ||
| 827 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, | 828 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
| 828 | int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); | 829 | unsigned long start, int len, int write, int force, |
| 830 | struct page **pages, struct vm_area_struct **vmas); | ||
| 831 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, | ||
| 832 | struct page **pages); | ||
| 829 | 833 | ||
| 830 | extern int try_to_release_page(struct page * page, gfp_t gfp_mask); | 834 | extern int try_to_release_page(struct page * page, gfp_t gfp_mask); |
| 831 | extern void do_invalidatepage(struct page *page, unsigned long offset); | 835 | extern void do_invalidatepage(struct page *page, unsigned long offset); |
| @@ -850,19 +854,6 @@ extern int mprotect_fixup(struct vm_area_struct *vma, | |||
| 850 | unsigned long end, unsigned long newflags); | 854 | unsigned long end, unsigned long newflags); |
| 851 | 855 | ||
| 852 | /* | 856 | /* |
| 853 | * get_user_pages_fast provides equivalent functionality to get_user_pages, | ||
| 854 | * operating on current and current->mm (force=0 and doesn't return any vmas). | ||
| 855 | * | ||
| 856 | * get_user_pages_fast may take mmap_sem and page tables, so no assumptions | ||
| 857 | * can be made about locking. get_user_pages_fast is to be implemented in a | ||
| 858 | * way that is advantageous (vs get_user_pages()) when the user memory area is | ||
| 859 | * already faulted in and present in ptes. However if the pages have to be | ||
| 860 | * faulted in, it may turn out to be slightly slower). | ||
| 861 | */ | ||
| 862 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, | ||
| 863 | struct page **pages); | ||
| 864 | |||
| 865 | /* | ||
| 866 | * A callback you can register to apply pressure to ageable caches. | 857 | * A callback you can register to apply pressure to ageable caches. |
| 867 | * | 858 | * |
| 868 | * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should | 859 | * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should |
| @@ -1061,7 +1052,8 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn); | |||
| 1061 | extern void set_dma_reserve(unsigned long new_dma_reserve); | 1052 | extern void set_dma_reserve(unsigned long new_dma_reserve); |
| 1062 | extern void memmap_init_zone(unsigned long, int, unsigned long, | 1053 | extern void memmap_init_zone(unsigned long, int, unsigned long, |
| 1063 | unsigned long, enum memmap_context); | 1054 | unsigned long, enum memmap_context); |
| 1064 | extern void setup_per_zone_pages_min(void); | 1055 | extern void setup_per_zone_wmarks(void); |
| 1056 | extern void calculate_zone_inactive_ratio(struct zone *zone); | ||
| 1065 | extern void mem_init(void); | 1057 | extern void mem_init(void); |
| 1066 | extern void __init mmap_init(void); | 1058 | extern void __init mmap_init(void); |
| 1067 | extern void show_mem(void); | 1059 | extern void show_mem(void); |
| @@ -1178,8 +1170,6 @@ void task_dirty_inc(struct task_struct *tsk); | |||
| 1178 | #define VM_MAX_READAHEAD 128 /* kbytes */ | 1170 | #define VM_MAX_READAHEAD 128 /* kbytes */ |
| 1179 | #define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ | 1171 | #define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ |
| 1180 | 1172 | ||
| 1181 | int do_page_cache_readahead(struct address_space *mapping, struct file *filp, | ||
| 1182 | pgoff_t offset, unsigned long nr_to_read); | ||
| 1183 | int force_page_cache_readahead(struct address_space *mapping, struct file *filp, | 1173 | int force_page_cache_readahead(struct address_space *mapping, struct file *filp, |
| 1184 | pgoff_t offset, unsigned long nr_to_read); | 1174 | pgoff_t offset, unsigned long nr_to_read); |
| 1185 | 1175 | ||
| @@ -1197,6 +1187,9 @@ void page_cache_async_readahead(struct address_space *mapping, | |||
| 1197 | unsigned long size); | 1187 | unsigned long size); |
| 1198 | 1188 | ||
| 1199 | unsigned long max_sane_readahead(unsigned long nr); | 1189 | unsigned long max_sane_readahead(unsigned long nr); |
| 1190 | unsigned long ra_submit(struct file_ra_state *ra, | ||
| 1191 | struct address_space *mapping, | ||
| 1192 | struct file *filp); | ||
| 1200 | 1193 | ||
| 1201 | /* Do stack extension */ | 1194 | /* Do stack extension */ |
| 1202 | extern int expand_stack(struct vm_area_struct *vma, unsigned long address); | 1195 | extern int expand_stack(struct vm_area_struct *vma, unsigned long address); |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 0042090a4d70..7acc8439d9b3 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
| @@ -240,6 +240,8 @@ struct mm_struct { | |||
| 240 | 240 | ||
| 241 | unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ | 241 | unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ |
| 242 | 242 | ||
| 243 | s8 oom_adj; /* OOM kill score adjustment (bit shift) */ | ||
| 244 | |||
| 243 | cpumask_t cpu_vm_mask; | 245 | cpumask_t cpu_vm_mask; |
| 244 | 246 | ||
| 245 | /* Architecture-specific MM context */ | 247 | /* Architecture-specific MM context */ |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index a47c879e1304..889598537370 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
| @@ -50,9 +50,6 @@ extern int page_group_by_mobility_disabled; | |||
| 50 | 50 | ||
| 51 | static inline int get_pageblock_migratetype(struct page *page) | 51 | static inline int get_pageblock_migratetype(struct page *page) |
| 52 | { | 52 | { |
| 53 | if (unlikely(page_group_by_mobility_disabled)) | ||
| 54 | return MIGRATE_UNMOVABLE; | ||
| 55 | |||
| 56 | return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end); | 53 | return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end); |
| 57 | } | 54 | } |
| 58 | 55 | ||
| @@ -86,13 +83,8 @@ enum zone_stat_item { | |||
| 86 | NR_ACTIVE_ANON, /* " " " " " */ | 83 | NR_ACTIVE_ANON, /* " " " " " */ |
| 87 | NR_INACTIVE_FILE, /* " " " " " */ | 84 | NR_INACTIVE_FILE, /* " " " " " */ |
| 88 | NR_ACTIVE_FILE, /* " " " " " */ | 85 | NR_ACTIVE_FILE, /* " " " " " */ |
| 89 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
| 90 | NR_UNEVICTABLE, /* " " " " " */ | 86 | NR_UNEVICTABLE, /* " " " " " */ |
| 91 | NR_MLOCK, /* mlock()ed pages found and moved off LRU */ | 87 | NR_MLOCK, /* mlock()ed pages found and moved off LRU */ |
| 92 | #else | ||
| 93 | NR_UNEVICTABLE = NR_ACTIVE_FILE, /* avoid compiler errors in dead code */ | ||
| 94 | NR_MLOCK = NR_ACTIVE_FILE, | ||
| 95 | #endif | ||
| 96 | NR_ANON_PAGES, /* Mapped anonymous pages */ | 88 | NR_ANON_PAGES, /* Mapped anonymous pages */ |
| 97 | NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. | 89 | NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. |
| 98 | only modified from process context */ | 90 | only modified from process context */ |
| @@ -135,11 +127,7 @@ enum lru_list { | |||
| 135 | LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, | 127 | LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, |
| 136 | LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, | 128 | LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, |
| 137 | LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, | 129 | LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, |
| 138 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
| 139 | LRU_UNEVICTABLE, | 130 | LRU_UNEVICTABLE, |
| 140 | #else | ||
| 141 | LRU_UNEVICTABLE = LRU_ACTIVE_FILE, /* avoid compiler errors in dead code */ | ||
| 142 | #endif | ||
| 143 | NR_LRU_LISTS | 131 | NR_LRU_LISTS |
| 144 | }; | 132 | }; |
| 145 | 133 | ||
| @@ -159,13 +147,20 @@ static inline int is_active_lru(enum lru_list l) | |||
| 159 | 147 | ||
| 160 | static inline int is_unevictable_lru(enum lru_list l) | 148 | static inline int is_unevictable_lru(enum lru_list l) |
| 161 | { | 149 | { |
| 162 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
| 163 | return (l == LRU_UNEVICTABLE); | 150 | return (l == LRU_UNEVICTABLE); |
| 164 | #else | ||
| 165 | return 0; | ||
| 166 | #endif | ||
| 167 | } | 151 | } |
| 168 | 152 | ||
| 153 | enum zone_watermarks { | ||
| 154 | WMARK_MIN, | ||
| 155 | WMARK_LOW, | ||
| 156 | WMARK_HIGH, | ||
| 157 | NR_WMARK | ||
| 158 | }; | ||
| 159 | |||
| 160 | #define min_wmark_pages(z) (z->watermark[WMARK_MIN]) | ||
| 161 | #define low_wmark_pages(z) (z->watermark[WMARK_LOW]) | ||
| 162 | #define high_wmark_pages(z) (z->watermark[WMARK_HIGH]) | ||
| 163 | |||
| 169 | struct per_cpu_pages { | 164 | struct per_cpu_pages { |
| 170 | int count; /* number of pages in the list */ | 165 | int count; /* number of pages in the list */ |
| 171 | int high; /* high watermark, emptying needed */ | 166 | int high; /* high watermark, emptying needed */ |
| @@ -278,7 +273,10 @@ struct zone_reclaim_stat { | |||
| 278 | 273 | ||
| 279 | struct zone { | 274 | struct zone { |
| 280 | /* Fields commonly accessed by the page allocator */ | 275 | /* Fields commonly accessed by the page allocator */ |
| 281 | unsigned long pages_min, pages_low, pages_high; | 276 | |
| 277 | /* zone watermarks, access with *_wmark_pages(zone) macros */ | ||
| 278 | unsigned long watermark[NR_WMARK]; | ||
| 279 | |||
| 282 | /* | 280 | /* |
| 283 | * We don't know if the memory that we're going to allocate will be freeable | 281 | * We don't know if the memory that we're going to allocate will be freeable |
| 284 | * or/and it will be released eventually, so to avoid totally wasting several | 282 | * or/and it will be released eventually, so to avoid totally wasting several |
| @@ -323,9 +321,9 @@ struct zone { | |||
| 323 | 321 | ||
| 324 | /* Fields commonly accessed by the page reclaim scanner */ | 322 | /* Fields commonly accessed by the page reclaim scanner */ |
| 325 | spinlock_t lru_lock; | 323 | spinlock_t lru_lock; |
| 326 | struct { | 324 | struct zone_lru { |
| 327 | struct list_head list; | 325 | struct list_head list; |
| 328 | unsigned long nr_scan; | 326 | unsigned long nr_saved_scan; /* accumulated for batching */ |
| 329 | } lru[NR_LRU_LISTS]; | 327 | } lru[NR_LRU_LISTS]; |
| 330 | 328 | ||
| 331 | struct zone_reclaim_stat reclaim_stat; | 329 | struct zone_reclaim_stat reclaim_stat; |
diff --git a/include/linux/module.h b/include/linux/module.h index a7bc6e7b43a7..505f20dcc1c7 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
| @@ -697,4 +697,21 @@ static inline void module_remove_modinfo_attrs(struct module *mod) | |||
| 697 | 697 | ||
| 698 | #define __MODULE_STRING(x) __stringify(x) | 698 | #define __MODULE_STRING(x) __stringify(x) |
| 699 | 699 | ||
| 700 | |||
| 701 | #ifdef CONFIG_GENERIC_BUG | ||
| 702 | int module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *, | ||
| 703 | struct module *); | ||
| 704 | void module_bug_cleanup(struct module *); | ||
| 705 | |||
| 706 | #else /* !CONFIG_GENERIC_BUG */ | ||
| 707 | |||
| 708 | static inline int module_bug_finalize(const Elf_Ehdr *hdr, | ||
| 709 | const Elf_Shdr *sechdrs, | ||
| 710 | struct module *mod) | ||
| 711 | { | ||
| 712 | return 0; | ||
| 713 | } | ||
| 714 | static inline void module_bug_cleanup(struct module *mod) {} | ||
| 715 | #endif /* CONFIG_GENERIC_BUG */ | ||
| 716 | |||
| 700 | #endif /* _LINUX_MODULE_H */ | 717 | #endif /* _LINUX_MODULE_H */ |
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h index 848025cd7087..829b94b156f2 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h | |||
| @@ -408,6 +408,19 @@ static inline int num_node_state(enum node_states state) | |||
| 408 | #define next_online_node(nid) next_node((nid), node_states[N_ONLINE]) | 408 | #define next_online_node(nid) next_node((nid), node_states[N_ONLINE]) |
| 409 | 409 | ||
| 410 | extern int nr_node_ids; | 410 | extern int nr_node_ids; |
| 411 | extern int nr_online_nodes; | ||
| 412 | |||
| 413 | static inline void node_set_online(int nid) | ||
| 414 | { | ||
| 415 | node_set_state(nid, N_ONLINE); | ||
| 416 | nr_online_nodes = num_node_state(N_ONLINE); | ||
| 417 | } | ||
| 418 | |||
| 419 | static inline void node_set_offline(int nid) | ||
| 420 | { | ||
| 421 | node_clear_state(nid, N_ONLINE); | ||
| 422 | nr_online_nodes = num_node_state(N_ONLINE); | ||
| 423 | } | ||
| 411 | #else | 424 | #else |
| 412 | 425 | ||
| 413 | static inline int node_state(int node, enum node_states state) | 426 | static inline int node_state(int node, enum node_states state) |
| @@ -434,7 +447,10 @@ static inline int num_node_state(enum node_states state) | |||
| 434 | #define first_online_node 0 | 447 | #define first_online_node 0 |
| 435 | #define next_online_node(nid) (MAX_NUMNODES) | 448 | #define next_online_node(nid) (MAX_NUMNODES) |
| 436 | #define nr_node_ids 1 | 449 | #define nr_node_ids 1 |
| 450 | #define nr_online_nodes 1 | ||
| 437 | 451 | ||
| 452 | #define node_set_online(node) node_set_state((node), N_ONLINE) | ||
| 453 | #define node_set_offline(node) node_clear_state((node), N_ONLINE) | ||
| 438 | #endif | 454 | #endif |
| 439 | 455 | ||
| 440 | #define node_online_map node_states[N_ONLINE] | 456 | #define node_online_map node_states[N_ONLINE] |
| @@ -454,9 +470,6 @@ static inline int num_node_state(enum node_states state) | |||
| 454 | #define node_online(node) node_state((node), N_ONLINE) | 470 | #define node_online(node) node_state((node), N_ONLINE) |
| 455 | #define node_possible(node) node_state((node), N_POSSIBLE) | 471 | #define node_possible(node) node_state((node), N_POSSIBLE) |
| 456 | 472 | ||
| 457 | #define node_set_online(node) node_set_state((node), N_ONLINE) | ||
| 458 | #define node_set_offline(node) node_clear_state((node), N_ONLINE) | ||
| 459 | |||
| 460 | #define for_each_node(node) for_each_node_state(node, N_POSSIBLE) | 473 | #define for_each_node(node) for_each_node_state(node, N_POSSIBLE) |
| 461 | #define for_each_online_node(node) for_each_node_state(node, N_ONLINE) | 474 | #define for_each_online_node(node) for_each_node_state(node, N_ONLINE) |
| 462 | 475 | ||
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 62214c7d2d93..d6792f88a176 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
| @@ -95,9 +95,7 @@ enum pageflags { | |||
| 95 | PG_reclaim, /* To be reclaimed asap */ | 95 | PG_reclaim, /* To be reclaimed asap */ |
| 96 | PG_buddy, /* Page is free, on buddy lists */ | 96 | PG_buddy, /* Page is free, on buddy lists */ |
| 97 | PG_swapbacked, /* Page is backed by RAM/swap */ | 97 | PG_swapbacked, /* Page is backed by RAM/swap */ |
| 98 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
| 99 | PG_unevictable, /* Page is "unevictable" */ | 98 | PG_unevictable, /* Page is "unevictable" */ |
| 100 | #endif | ||
| 101 | #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT | 99 | #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT |
| 102 | PG_mlocked, /* Page is vma mlocked */ | 100 | PG_mlocked, /* Page is vma mlocked */ |
| 103 | #endif | 101 | #endif |
| @@ -248,14 +246,8 @@ PAGEFLAG_FALSE(SwapCache) | |||
| 248 | SETPAGEFLAG_NOOP(SwapCache) CLEARPAGEFLAG_NOOP(SwapCache) | 246 | SETPAGEFLAG_NOOP(SwapCache) CLEARPAGEFLAG_NOOP(SwapCache) |
| 249 | #endif | 247 | #endif |
| 250 | 248 | ||
| 251 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
| 252 | PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable) | 249 | PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable) |
| 253 | TESTCLEARFLAG(Unevictable, unevictable) | 250 | TESTCLEARFLAG(Unevictable, unevictable) |
| 254 | #else | ||
| 255 | PAGEFLAG_FALSE(Unevictable) TESTCLEARFLAG_FALSE(Unevictable) | ||
| 256 | SETPAGEFLAG_NOOP(Unevictable) CLEARPAGEFLAG_NOOP(Unevictable) | ||
| 257 | __CLEARPAGEFLAG_NOOP(Unevictable) | ||
| 258 | #endif | ||
| 259 | 251 | ||
| 260 | #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT | 252 | #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT |
| 261 | #define MLOCK_PAGES 1 | 253 | #define MLOCK_PAGES 1 |
| @@ -382,12 +374,6 @@ static inline void __ClearPageTail(struct page *page) | |||
| 382 | 374 | ||
| 383 | #endif /* !PAGEFLAGS_EXTENDED */ | 375 | #endif /* !PAGEFLAGS_EXTENDED */ |
| 384 | 376 | ||
| 385 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
| 386 | #define __PG_UNEVICTABLE (1 << PG_unevictable) | ||
| 387 | #else | ||
| 388 | #define __PG_UNEVICTABLE 0 | ||
| 389 | #endif | ||
| 390 | |||
| 391 | #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT | 377 | #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT |
| 392 | #define __PG_MLOCKED (1 << PG_mlocked) | 378 | #define __PG_MLOCKED (1 << PG_mlocked) |
| 393 | #else | 379 | #else |
| @@ -403,7 +389,7 @@ static inline void __ClearPageTail(struct page *page) | |||
| 403 | 1 << PG_private | 1 << PG_private_2 | \ | 389 | 1 << PG_private | 1 << PG_private_2 | \ |
| 404 | 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \ | 390 | 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \ |
| 405 | 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ | 391 | 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ |
| 406 | __PG_UNEVICTABLE | __PG_MLOCKED) | 392 | 1 << PG_unevictable | __PG_MLOCKED) |
| 407 | 393 | ||
| 408 | /* | 394 | /* |
| 409 | * Flags checked when a page is prepped for return by the page allocator. | 395 | * Flags checked when a page is prepped for return by the page allocator. |
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 34da5230faab..aec3252afcf5 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
| @@ -22,9 +22,7 @@ enum mapping_flags { | |||
| 22 | AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */ | 22 | AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */ |
| 23 | AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */ | 23 | AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */ |
| 24 | AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ | 24 | AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ |
| 25 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
| 26 | AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ | 25 | AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ |
| 27 | #endif | ||
| 28 | }; | 26 | }; |
| 29 | 27 | ||
| 30 | static inline void mapping_set_error(struct address_space *mapping, int error) | 28 | static inline void mapping_set_error(struct address_space *mapping, int error) |
| @@ -37,8 +35,6 @@ static inline void mapping_set_error(struct address_space *mapping, int error) | |||
| 37 | } | 35 | } |
| 38 | } | 36 | } |
| 39 | 37 | ||
| 40 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
| 41 | |||
| 42 | static inline void mapping_set_unevictable(struct address_space *mapping) | 38 | static inline void mapping_set_unevictable(struct address_space *mapping) |
| 43 | { | 39 | { |
| 44 | set_bit(AS_UNEVICTABLE, &mapping->flags); | 40 | set_bit(AS_UNEVICTABLE, &mapping->flags); |
| @@ -55,14 +51,6 @@ static inline int mapping_unevictable(struct address_space *mapping) | |||
| 55 | return test_bit(AS_UNEVICTABLE, &mapping->flags); | 51 | return test_bit(AS_UNEVICTABLE, &mapping->flags); |
| 56 | return !!mapping; | 52 | return !!mapping; |
| 57 | } | 53 | } |
| 58 | #else | ||
| 59 | static inline void mapping_set_unevictable(struct address_space *mapping) { } | ||
| 60 | static inline void mapping_clear_unevictable(struct address_space *mapping) { } | ||
| 61 | static inline int mapping_unevictable(struct address_space *mapping) | ||
| 62 | { | ||
| 63 | return 0; | ||
| 64 | } | ||
| 65 | #endif | ||
| 66 | 54 | ||
| 67 | static inline gfp_t mapping_gfp_mask(struct address_space * mapping) | 55 | static inline gfp_t mapping_gfp_mask(struct address_space * mapping) |
| 68 | { | 56 | { |
diff --git a/include/linux/poll.h b/include/linux/poll.h index 8c24ef8d9976..fa287f25138d 100644 --- a/include/linux/poll.h +++ b/include/linux/poll.h | |||
| @@ -32,6 +32,7 @@ typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_ | |||
| 32 | 32 | ||
| 33 | typedef struct poll_table_struct { | 33 | typedef struct poll_table_struct { |
| 34 | poll_queue_proc qproc; | 34 | poll_queue_proc qproc; |
| 35 | unsigned long key; | ||
| 35 | } poll_table; | 36 | } poll_table; |
| 36 | 37 | ||
| 37 | static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) | 38 | static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) |
| @@ -43,10 +44,12 @@ static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_addres | |||
| 43 | static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc) | 44 | static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc) |
| 44 | { | 45 | { |
| 45 | pt->qproc = qproc; | 46 | pt->qproc = qproc; |
| 47 | pt->key = ~0UL; /* all events enabled */ | ||
| 46 | } | 48 | } |
| 47 | 49 | ||
| 48 | struct poll_table_entry { | 50 | struct poll_table_entry { |
| 49 | struct file *filp; | 51 | struct file *filp; |
| 52 | unsigned long key; | ||
| 50 | wait_queue_t wait; | 53 | wait_queue_t wait; |
| 51 | wait_queue_head_t *wait_address; | 54 | wait_queue_head_t *wait_address; |
| 52 | }; | 55 | }; |
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 355f6e80db0d..c5da74918096 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h | |||
| @@ -167,6 +167,8 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, | |||
| 167 | unsigned long first_index, unsigned int max_items); | 167 | unsigned long first_index, unsigned int max_items); |
| 168 | unsigned long radix_tree_next_hole(struct radix_tree_root *root, | 168 | unsigned long radix_tree_next_hole(struct radix_tree_root *root, |
| 169 | unsigned long index, unsigned long max_scan); | 169 | unsigned long index, unsigned long max_scan); |
| 170 | unsigned long radix_tree_prev_hole(struct radix_tree_root *root, | ||
| 171 | unsigned long index, unsigned long max_scan); | ||
| 170 | int radix_tree_preload(gfp_t gfp_mask); | 172 | int radix_tree_preload(gfp_t gfp_mask); |
| 171 | void radix_tree_init(void); | 173 | void radix_tree_init(void); |
| 172 | void *radix_tree_tag_set(struct radix_tree_root *root, | 174 | void *radix_tree_tag_set(struct radix_tree_root *root, |
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index b35bc0e19cd9..216d024f830d 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
| @@ -83,7 +83,8 @@ static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma, | |||
| 83 | /* | 83 | /* |
| 84 | * Called from mm/vmscan.c to handle paging out | 84 | * Called from mm/vmscan.c to handle paging out |
| 85 | */ | 85 | */ |
| 86 | int page_referenced(struct page *, int is_locked, struct mem_cgroup *cnt); | 86 | int page_referenced(struct page *, int is_locked, |
| 87 | struct mem_cgroup *cnt, unsigned long *vm_flags); | ||
| 87 | int try_to_unmap(struct page *, int ignore_refs); | 88 | int try_to_unmap(struct page *, int ignore_refs); |
| 88 | 89 | ||
| 89 | /* | 90 | /* |
| @@ -105,18 +106,11 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); | |||
| 105 | */ | 106 | */ |
| 106 | int page_mkclean(struct page *); | 107 | int page_mkclean(struct page *); |
| 107 | 108 | ||
| 108 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
| 109 | /* | 109 | /* |
| 110 | * called in munlock()/munmap() path to check for other vmas holding | 110 | * called in munlock()/munmap() path to check for other vmas holding |
| 111 | * the page mlocked. | 111 | * the page mlocked. |
| 112 | */ | 112 | */ |
| 113 | int try_to_munlock(struct page *); | 113 | int try_to_munlock(struct page *); |
| 114 | #else | ||
| 115 | static inline int try_to_munlock(struct page *page) | ||
| 116 | { | ||
| 117 | return 0; /* a.k.a. SWAP_SUCCESS */ | ||
| 118 | } | ||
| 119 | #endif | ||
| 120 | 114 | ||
| 121 | #else /* !CONFIG_MMU */ | 115 | #else /* !CONFIG_MMU */ |
| 122 | 116 | ||
| @@ -124,7 +118,7 @@ static inline int try_to_munlock(struct page *page) | |||
| 124 | #define anon_vma_prepare(vma) (0) | 118 | #define anon_vma_prepare(vma) (0) |
| 125 | #define anon_vma_link(vma) do {} while (0) | 119 | #define anon_vma_link(vma) do {} while (0) |
| 126 | 120 | ||
| 127 | #define page_referenced(page,l,cnt) TestClearPageReferenced(page) | 121 | #define page_referenced(page, locked, cnt, flags) TestClearPageReferenced(page) |
| 128 | #define try_to_unmap(page, refs) SWAP_FAIL | 122 | #define try_to_unmap(page, refs) SWAP_FAIL |
| 129 | 123 | ||
| 130 | static inline int page_mkclean(struct page *page) | 124 | static inline int page_mkclean(struct page *page) |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 7531b1c28201..02042e7f2196 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -1178,7 +1178,6 @@ struct task_struct { | |||
| 1178 | * a short time | 1178 | * a short time |
| 1179 | */ | 1179 | */ |
| 1180 | unsigned char fpu_counter; | 1180 | unsigned char fpu_counter; |
| 1181 | s8 oomkilladj; /* OOM kill score adjustment (bit shift). */ | ||
| 1182 | #ifdef CONFIG_BLK_DEV_IO_TRACE | 1181 | #ifdef CONFIG_BLK_DEV_IO_TRACE |
| 1183 | unsigned int btrace_seq; | 1182 | unsigned int btrace_seq; |
| 1184 | #endif | 1183 | #endif |
| @@ -1318,7 +1317,8 @@ struct task_struct { | |||
| 1318 | /* Thread group tracking */ | 1317 | /* Thread group tracking */ |
| 1319 | u32 parent_exec_id; | 1318 | u32 parent_exec_id; |
| 1320 | u32 self_exec_id; | 1319 | u32 self_exec_id; |
| 1321 | /* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ | 1320 | /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, |
| 1321 | * mempolicy */ | ||
| 1322 | spinlock_t alloc_lock; | 1322 | spinlock_t alloc_lock; |
| 1323 | 1323 | ||
| 1324 | #ifdef CONFIG_GENERIC_HARDIRQS | 1324 | #ifdef CONFIG_GENERIC_HARDIRQS |
| @@ -1386,8 +1386,7 @@ struct task_struct { | |||
| 1386 | cputime_t acct_timexpd; /* stime + utime since last update */ | 1386 | cputime_t acct_timexpd; /* stime + utime since last update */ |
| 1387 | #endif | 1387 | #endif |
| 1388 | #ifdef CONFIG_CPUSETS | 1388 | #ifdef CONFIG_CPUSETS |
| 1389 | nodemask_t mems_allowed; | 1389 | nodemask_t mems_allowed; /* Protected by alloc_lock */ |
| 1390 | int cpuset_mems_generation; | ||
| 1391 | int cpuset_mem_spread_rotor; | 1390 | int cpuset_mem_spread_rotor; |
| 1392 | #endif | 1391 | #endif |
| 1393 | #ifdef CONFIG_CGROUPS | 1392 | #ifdef CONFIG_CGROUPS |
| @@ -1410,7 +1409,7 @@ struct task_struct { | |||
| 1410 | struct list_head perf_counter_list; | 1409 | struct list_head perf_counter_list; |
| 1411 | #endif | 1410 | #endif |
| 1412 | #ifdef CONFIG_NUMA | 1411 | #ifdef CONFIG_NUMA |
| 1413 | struct mempolicy *mempolicy; | 1412 | struct mempolicy *mempolicy; /* Protected by alloc_lock */ |
| 1414 | short il_next; | 1413 | short il_next; |
| 1415 | #endif | 1414 | #endif |
| 1416 | atomic_t fs_excl; /* holding fs exclusive resources */ | 1415 | atomic_t fs_excl; /* holding fs exclusive resources */ |
diff --git a/include/linux/smp.h b/include/linux/smp.h index a69db820eed6..9e3d8af09207 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h | |||
| @@ -177,7 +177,6 @@ static inline void init_call_single_data(void) | |||
| 177 | 177 | ||
| 178 | #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) | 178 | #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) |
| 179 | #define put_cpu() preempt_enable() | 179 | #define put_cpu() preempt_enable() |
| 180 | #define put_cpu_no_resched() preempt_enable_no_resched() | ||
| 181 | 180 | ||
| 182 | /* | 181 | /* |
| 183 | * Callback to arch code if there's nosmp or maxcpus=0 on the | 182 | * Callback to arch code if there's nosmp or maxcpus=0 on the |
diff --git a/include/linux/swap.h b/include/linux/swap.h index d476aad3ff57..0cedf31af0b0 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
| @@ -129,9 +129,10 @@ enum { | |||
| 129 | 129 | ||
| 130 | #define SWAP_CLUSTER_MAX 32 | 130 | #define SWAP_CLUSTER_MAX 32 |
| 131 | 131 | ||
| 132 | #define SWAP_MAP_MAX 0x7fff | 132 | #define SWAP_MAP_MAX 0x7ffe |
| 133 | #define SWAP_MAP_BAD 0x8000 | 133 | #define SWAP_MAP_BAD 0x7fff |
| 134 | 134 | #define SWAP_HAS_CACHE 0x8000 /* There is a swap cache of entry. */ | |
| 135 | #define SWAP_COUNT_MASK (~SWAP_HAS_CACHE) | ||
| 135 | /* | 136 | /* |
| 136 | * The in-memory structure used to track swap areas. | 137 | * The in-memory structure used to track swap areas. |
| 137 | */ | 138 | */ |
| @@ -235,7 +236,6 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) | |||
| 235 | } | 236 | } |
| 236 | #endif | 237 | #endif |
| 237 | 238 | ||
| 238 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
| 239 | extern int page_evictable(struct page *page, struct vm_area_struct *vma); | 239 | extern int page_evictable(struct page *page, struct vm_area_struct *vma); |
| 240 | extern void scan_mapping_unevictable_pages(struct address_space *); | 240 | extern void scan_mapping_unevictable_pages(struct address_space *); |
| 241 | 241 | ||
| @@ -244,24 +244,6 @@ extern int scan_unevictable_handler(struct ctl_table *, int, struct file *, | |||
| 244 | void __user *, size_t *, loff_t *); | 244 | void __user *, size_t *, loff_t *); |
| 245 | extern int scan_unevictable_register_node(struct node *node); | 245 | extern int scan_unevictable_register_node(struct node *node); |
| 246 | extern void scan_unevictable_unregister_node(struct node *node); | 246 | extern void scan_unevictable_unregister_node(struct node *node); |
| 247 | #else | ||
| 248 | static inline int page_evictable(struct page *page, | ||
| 249 | struct vm_area_struct *vma) | ||
| 250 | { | ||
| 251 | return 1; | ||
| 252 | } | ||
| 253 | |||
| 254 | static inline void scan_mapping_unevictable_pages(struct address_space *mapping) | ||
| 255 | { | ||
| 256 | } | ||
| 257 | |||
| 258 | static inline int scan_unevictable_register_node(struct node *node) | ||
| 259 | { | ||
| 260 | return 0; | ||
| 261 | } | ||
| 262 | |||
| 263 | static inline void scan_unevictable_unregister_node(struct node *node) { } | ||
| 264 | #endif | ||
| 265 | 247 | ||
| 266 | extern int kswapd_run(int nid); | 248 | extern int kswapd_run(int nid); |
| 267 | 249 | ||
| @@ -274,7 +256,7 @@ extern void swap_unplug_io_fn(struct backing_dev_info *, struct page *); | |||
| 274 | 256 | ||
| 275 | #ifdef CONFIG_SWAP | 257 | #ifdef CONFIG_SWAP |
| 276 | /* linux/mm/page_io.c */ | 258 | /* linux/mm/page_io.c */ |
| 277 | extern int swap_readpage(struct file *, struct page *); | 259 | extern int swap_readpage(struct page *); |
| 278 | extern int swap_writepage(struct page *page, struct writeback_control *wbc); | 260 | extern int swap_writepage(struct page *page, struct writeback_control *wbc); |
| 279 | extern void end_swap_bio_read(struct bio *bio, int err); | 261 | extern void end_swap_bio_read(struct bio *bio, int err); |
| 280 | 262 | ||
| @@ -300,9 +282,11 @@ extern long total_swap_pages; | |||
| 300 | extern void si_swapinfo(struct sysinfo *); | 282 | extern void si_swapinfo(struct sysinfo *); |
| 301 | extern swp_entry_t get_swap_page(void); | 283 | extern swp_entry_t get_swap_page(void); |
| 302 | extern swp_entry_t get_swap_page_of_type(int); | 284 | extern swp_entry_t get_swap_page_of_type(int); |
| 303 | extern int swap_duplicate(swp_entry_t); | 285 | extern void swap_duplicate(swp_entry_t); |
| 286 | extern int swapcache_prepare(swp_entry_t); | ||
| 304 | extern int valid_swaphandles(swp_entry_t, unsigned long *); | 287 | extern int valid_swaphandles(swp_entry_t, unsigned long *); |
| 305 | extern void swap_free(swp_entry_t); | 288 | extern void swap_free(swp_entry_t); |
| 289 | extern void swapcache_free(swp_entry_t, struct page *page); | ||
| 306 | extern int free_swap_and_cache(swp_entry_t); | 290 | extern int free_swap_and_cache(swp_entry_t); |
| 307 | extern int swap_type_of(dev_t, sector_t, struct block_device **); | 291 | extern int swap_type_of(dev_t, sector_t, struct block_device **); |
| 308 | extern unsigned int count_swap_pages(int, int); | 292 | extern unsigned int count_swap_pages(int, int); |
| @@ -370,12 +354,20 @@ static inline void show_swap_cache_info(void) | |||
| 370 | } | 354 | } |
| 371 | 355 | ||
| 372 | #define free_swap_and_cache(swp) is_migration_entry(swp) | 356 | #define free_swap_and_cache(swp) is_migration_entry(swp) |
| 373 | #define swap_duplicate(swp) is_migration_entry(swp) | 357 | #define swapcache_prepare(swp) is_migration_entry(swp) |
| 358 | |||
| 359 | static inline void swap_duplicate(swp_entry_t swp) | ||
| 360 | { | ||
| 361 | } | ||
| 374 | 362 | ||
| 375 | static inline void swap_free(swp_entry_t swp) | 363 | static inline void swap_free(swp_entry_t swp) |
| 376 | { | 364 | { |
| 377 | } | 365 | } |
| 378 | 366 | ||
| 367 | static inline void swapcache_free(swp_entry_t swp, struct page *page) | ||
| 368 | { | ||
| 369 | } | ||
| 370 | |||
| 379 | static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, | 371 | static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, |
| 380 | struct vm_area_struct *vma, unsigned long addr) | 372 | struct vm_area_struct *vma, unsigned long addr) |
| 381 | { | 373 | { |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 418d90f5effe..fa4242cdade8 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
| @@ -434,6 +434,7 @@ asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg); | |||
| 434 | asmlinkage long sys_fcntl64(unsigned int fd, | 434 | asmlinkage long sys_fcntl64(unsigned int fd, |
| 435 | unsigned int cmd, unsigned long arg); | 435 | unsigned int cmd, unsigned long arg); |
| 436 | #endif | 436 | #endif |
| 437 | asmlinkage long sys_pipe(int __user *fildes); | ||
| 437 | asmlinkage long sys_pipe2(int __user *fildes, int flags); | 438 | asmlinkage long sys_pipe2(int __user *fildes, int flags); |
| 438 | asmlinkage long sys_dup(unsigned int fildes); | 439 | asmlinkage long sys_dup(unsigned int fildes); |
| 439 | asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd); | 440 | asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd); |
| @@ -751,8 +752,6 @@ asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *, | |||
| 751 | asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int, | 752 | asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int, |
| 752 | struct timespec __user *, const sigset_t __user *, | 753 | struct timespec __user *, const sigset_t __user *, |
| 753 | size_t); | 754 | size_t); |
| 754 | asmlinkage long sys_pipe2(int __user *, int); | ||
| 755 | asmlinkage long sys_pipe(int __user *); | ||
| 756 | 755 | ||
| 757 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]); | 756 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]); |
| 758 | 757 | ||
diff --git a/include/linux/timex.h b/include/linux/timex.h index 9910e3bd5b31..e6967d10d9e5 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h | |||
| @@ -280,6 +280,9 @@ extern int do_adjtimex(struct timex *); | |||
| 280 | 280 | ||
| 281 | int read_current_timer(unsigned long *timer_val); | 281 | int read_current_timer(unsigned long *timer_val); |
| 282 | 282 | ||
| 283 | /* The clock frequency of the i8253/i8254 PIT */ | ||
| 284 | #define PIT_TICK_RATE 1193182ul | ||
| 285 | |||
| 283 | #endif /* KERNEL */ | 286 | #endif /* KERNEL */ |
| 284 | 287 | ||
| 285 | #endif /* LINUX_TIMEX_H */ | 288 | #endif /* LINUX_TIMEX_H */ |
diff --git a/include/linux/utsname.h b/include/linux/utsname.h index 11232676bfff..3656b300de3a 100644 --- a/include/linux/utsname.h +++ b/include/linux/utsname.h | |||
| @@ -22,12 +22,12 @@ struct old_utsname { | |||
| 22 | }; | 22 | }; |
| 23 | 23 | ||
| 24 | struct new_utsname { | 24 | struct new_utsname { |
| 25 | char sysname[65]; | 25 | char sysname[__NEW_UTS_LEN + 1]; |
| 26 | char nodename[65]; | 26 | char nodename[__NEW_UTS_LEN + 1]; |
| 27 | char release[65]; | 27 | char release[__NEW_UTS_LEN + 1]; |
| 28 | char version[65]; | 28 | char version[__NEW_UTS_LEN + 1]; |
| 29 | char machine[65]; | 29 | char machine[__NEW_UTS_LEN + 1]; |
| 30 | char domainname[65]; | 30 | char domainname[__NEW_UTS_LEN + 1]; |
| 31 | }; | 31 | }; |
| 32 | 32 | ||
| 33 | #ifdef __KERNEL__ | 33 | #ifdef __KERNEL__ |
diff --git a/include/linux/vlynq.h b/include/linux/vlynq.h new file mode 100644 index 000000000000..8f6a95882b09 --- /dev/null +++ b/include/linux/vlynq.h | |||
| @@ -0,0 +1,161 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2006, 2007 Eugene Konev <ejka@openwrt.org> | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License as published by | ||
| 6 | * the Free Software Foundation; either version 2 of the License, or | ||
| 7 | * (at your option) any later version. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License | ||
| 15 | * along with this program; if not, write to the Free Software | ||
| 16 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
| 17 | */ | ||
| 18 | |||
| 19 | #ifndef __VLYNQ_H__ | ||
| 20 | #define __VLYNQ_H__ | ||
| 21 | |||
| 22 | #include <linux/device.h> | ||
| 23 | #include <linux/module.h> | ||
| 24 | #include <linux/types.h> | ||
| 25 | |||
| 26 | #define VLYNQ_NUM_IRQS 32 | ||
| 27 | |||
| 28 | struct vlynq_mapping { | ||
| 29 | u32 size; | ||
| 30 | u32 offset; | ||
| 31 | }; | ||
| 32 | |||
| 33 | enum vlynq_divisor { | ||
| 34 | vlynq_div_auto = 0, | ||
| 35 | vlynq_ldiv1, | ||
| 36 | vlynq_ldiv2, | ||
| 37 | vlynq_ldiv3, | ||
| 38 | vlynq_ldiv4, | ||
| 39 | vlynq_ldiv5, | ||
| 40 | vlynq_ldiv6, | ||
| 41 | vlynq_ldiv7, | ||
| 42 | vlynq_ldiv8, | ||
| 43 | vlynq_rdiv1, | ||
| 44 | vlynq_rdiv2, | ||
| 45 | vlynq_rdiv3, | ||
| 46 | vlynq_rdiv4, | ||
| 47 | vlynq_rdiv5, | ||
| 48 | vlynq_rdiv6, | ||
| 49 | vlynq_rdiv7, | ||
| 50 | vlynq_rdiv8, | ||
| 51 | vlynq_div_external | ||
| 52 | }; | ||
| 53 | |||
| 54 | struct vlynq_device_id { | ||
| 55 | u32 id; | ||
| 56 | enum vlynq_divisor divisor; | ||
| 57 | unsigned long driver_data; | ||
| 58 | }; | ||
| 59 | |||
| 60 | struct vlynq_regs; | ||
| 61 | struct vlynq_device { | ||
| 62 | u32 id, dev_id; | ||
| 63 | int local_irq; | ||
| 64 | int remote_irq; | ||
| 65 | enum vlynq_divisor divisor; | ||
| 66 | u32 regs_start, regs_end; | ||
| 67 | u32 mem_start, mem_end; | ||
| 68 | u32 irq_start, irq_end; | ||
| 69 | int irq; | ||
| 70 | int enabled; | ||
| 71 | struct vlynq_regs *local; | ||
| 72 | struct vlynq_regs *remote; | ||
| 73 | struct device dev; | ||
| 74 | }; | ||
| 75 | |||
| 76 | struct vlynq_driver { | ||
| 77 | char *name; | ||
| 78 | struct vlynq_device_id *id_table; | ||
| 79 | int (*probe)(struct vlynq_device *dev, struct vlynq_device_id *id); | ||
| 80 | void (*remove)(struct vlynq_device *dev); | ||
| 81 | struct device_driver driver; | ||
| 82 | }; | ||
| 83 | |||
| 84 | struct plat_vlynq_ops { | ||
| 85 | int (*on)(struct vlynq_device *dev); | ||
| 86 | void (*off)(struct vlynq_device *dev); | ||
| 87 | }; | ||
| 88 | |||
| 89 | static inline struct vlynq_driver *to_vlynq_driver(struct device_driver *drv) | ||
| 90 | { | ||
| 91 | return container_of(drv, struct vlynq_driver, driver); | ||
| 92 | } | ||
| 93 | |||
| 94 | static inline struct vlynq_device *to_vlynq_device(struct device *device) | ||
| 95 | { | ||
| 96 | return container_of(device, struct vlynq_device, dev); | ||
| 97 | } | ||
| 98 | |||
| 99 | extern struct bus_type vlynq_bus_type; | ||
| 100 | |||
| 101 | extern int __vlynq_register_driver(struct vlynq_driver *driver, | ||
| 102 | struct module *owner); | ||
| 103 | |||
| 104 | static inline int vlynq_register_driver(struct vlynq_driver *driver) | ||
| 105 | { | ||
| 106 | return __vlynq_register_driver(driver, THIS_MODULE); | ||
| 107 | } | ||
| 108 | |||
| 109 | static inline void *vlynq_get_drvdata(struct vlynq_device *dev) | ||
| 110 | { | ||
| 111 | return dev_get_drvdata(&dev->dev); | ||
| 112 | } | ||
| 113 | |||
| 114 | static inline void vlynq_set_drvdata(struct vlynq_device *dev, void *data) | ||
| 115 | { | ||
| 116 | dev_set_drvdata(&dev->dev, data); | ||
| 117 | } | ||
| 118 | |||
| 119 | static inline u32 vlynq_mem_start(struct vlynq_device *dev) | ||
| 120 | { | ||
| 121 | return dev->mem_start; | ||
| 122 | } | ||
| 123 | |||
| 124 | static inline u32 vlynq_mem_end(struct vlynq_device *dev) | ||
| 125 | { | ||
| 126 | return dev->mem_end; | ||
| 127 | } | ||
| 128 | |||
| 129 | static inline u32 vlynq_mem_len(struct vlynq_device *dev) | ||
| 130 | { | ||
| 131 | return dev->mem_end - dev->mem_start + 1; | ||
| 132 | } | ||
| 133 | |||
| 134 | static inline int vlynq_virq_to_irq(struct vlynq_device *dev, int virq) | ||
| 135 | { | ||
| 136 | int irq = dev->irq_start + virq; | ||
| 137 | if ((irq < dev->irq_start) || (irq > dev->irq_end)) | ||
| 138 | return -EINVAL; | ||
| 139 | |||
| 140 | return irq; | ||
| 141 | } | ||
| 142 | |||
| 143 | static inline int vlynq_irq_to_virq(struct vlynq_device *dev, int irq) | ||
| 144 | { | ||
| 145 | if ((irq < dev->irq_start) || (irq > dev->irq_end)) | ||
| 146 | return -EINVAL; | ||
| 147 | |||
| 148 | return irq - dev->irq_start; | ||
| 149 | } | ||
| 150 | |||
| 151 | extern void vlynq_unregister_driver(struct vlynq_driver *driver); | ||
| 152 | extern int vlynq_enable_device(struct vlynq_device *dev); | ||
| 153 | extern void vlynq_disable_device(struct vlynq_device *dev); | ||
| 154 | extern int vlynq_set_local_mapping(struct vlynq_device *dev, u32 tx_offset, | ||
| 155 | struct vlynq_mapping *mapping); | ||
| 156 | extern int vlynq_set_remote_mapping(struct vlynq_device *dev, u32 tx_offset, | ||
| 157 | struct vlynq_mapping *mapping); | ||
| 158 | extern int vlynq_set_local_irq(struct vlynq_device *dev, int virq); | ||
| 159 | extern int vlynq_set_remote_irq(struct vlynq_device *dev, int virq); | ||
| 160 | |||
| 161 | #endif /* __VLYNQ_H__ */ | ||
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 524cd1b28ecb..81a97cf8f0a0 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h | |||
| @@ -36,12 +36,14 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | |||
| 36 | FOR_ALL_ZONES(PGSTEAL), | 36 | FOR_ALL_ZONES(PGSTEAL), |
| 37 | FOR_ALL_ZONES(PGSCAN_KSWAPD), | 37 | FOR_ALL_ZONES(PGSCAN_KSWAPD), |
| 38 | FOR_ALL_ZONES(PGSCAN_DIRECT), | 38 | FOR_ALL_ZONES(PGSCAN_DIRECT), |
| 39 | #ifdef CONFIG_NUMA | ||
| 40 | PGSCAN_ZONE_RECLAIM_FAILED, | ||
| 41 | #endif | ||
| 39 | PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, | 42 | PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, |
| 40 | PAGEOUTRUN, ALLOCSTALL, PGROTATED, | 43 | PAGEOUTRUN, ALLOCSTALL, PGROTATED, |
| 41 | #ifdef CONFIG_HUGETLB_PAGE | 44 | #ifdef CONFIG_HUGETLB_PAGE |
| 42 | HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, | 45 | HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, |
| 43 | #endif | 46 | #endif |
| 44 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
| 45 | UNEVICTABLE_PGCULLED, /* culled to noreclaim list */ | 47 | UNEVICTABLE_PGCULLED, /* culled to noreclaim list */ |
| 46 | UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */ | 48 | UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */ |
| 47 | UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */ | 49 | UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */ |
| @@ -50,7 +52,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | |||
| 50 | UNEVICTABLE_PGCLEARED, /* on COW, page truncate */ | 52 | UNEVICTABLE_PGCLEARED, /* on COW, page truncate */ |
| 51 | UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */ | 53 | UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */ |
| 52 | UNEVICTABLE_MLOCKFREED, | 54 | UNEVICTABLE_MLOCKFREED, |
| 53 | #endif | ||
| 54 | NR_VM_EVENT_ITEMS | 55 | NR_VM_EVENT_ITEMS |
| 55 | }; | 56 | }; |
| 56 | 57 | ||
