aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 22:50:13 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 22:50:13 -0400
commit517d08699b250021303f9a7cf0d758b6dc0748ed (patch)
tree5e5b0134c3fffb78fe9d8b1641a64ff28fdd7bbc
parent8eeee4e2f04fc551f50c9d9847da2d73d7d33728 (diff)
parenta34601c5d84134055782ee031d58d82f5440e918 (diff)
Merge branch 'akpm'
* akpm: (182 commits) fbdev: bf54x-lq043fb: use kzalloc over kmalloc/memset fbdev: *bfin*: fix __dev{init,exit} markings fbdev: *bfin*: drop unnecessary calls to memset fbdev: bfin-t350mcqb-fb: drop unused local variables fbdev: blackfin has __raw I/O accessors, so use them in fb.h fbdev: s1d13xxxfb: add accelerated bitblt functions tcx: use standard fields for framebuffer physical address and length fbdev: add support for handoff from firmware to hw framebuffers intelfb: fix a bug when changing video timing fbdev: use framebuffer_release() for freeing fb_info structures radeon: P2G2CLK_ALWAYS_ONb tested twice, should 2nd be P2G2CLK_DAC_ALWAYS_ONb? s3c-fb: CPUFREQ frequency scaling support s3c-fb: fix resource releasing on error during probing carminefb: fix possible access beyond end of carmine_modedb[] acornfb: remove fb_mmap function mb862xxfb: use CONFIG_OF instead of CONFIG_PPC_OF mb862xxfb: restrict compliation of platform driver to PPC Samsung SoC Framebuffer driver: add Alpha Channel support atmel-lcdc: fix pixclock upper bound detection offb: use framebuffer_alloc() to allocate fb_info struct ... Manually fix up conflicts due to kmemcheck in mm/slab.c
-rw-r--r--Documentation/accounting/getdelays.c3
-rw-r--r--Documentation/atomic_ops.txt4
-rw-r--r--Documentation/fb/vesafb.txt2
-rw-r--r--Documentation/filesystems/proc.txt15
-rw-r--r--Documentation/kernel-parameters.txt4
-rw-r--r--Documentation/sysctl/vm.txt23
-rw-r--r--Documentation/vm/Makefile2
-rw-r--r--Documentation/vm/balance18
-rw-r--r--Documentation/vm/page-types.c698
-rw-r--r--Documentation/vm/pagemap.txt68
-rw-r--r--MAINTAINERS123
-rw-r--r--arch/alpha/include/asm/8253pit.h7
-rw-r--r--arch/alpha/include/asm/kmap_types.h24
-rw-r--r--arch/alpha/kernel/init_task.c3
-rw-r--r--arch/alpha/kernel/irq_alpha.c2
-rw-r--r--arch/alpha/kernel/irq_i8259.c2
-rw-r--r--arch/alpha/kernel/irq_impl.h2
-rw-r--r--arch/alpha/kernel/irq_pyxis.c2
-rw-r--r--arch/alpha/kernel/irq_srm.c2
-rw-r--r--arch/alpha/kernel/setup.c6
-rw-r--r--arch/alpha/kernel/sys_alcor.c2
-rw-r--r--arch/alpha/kernel/sys_cabriolet.c2
-rw-r--r--arch/alpha/kernel/sys_dp264.c6
-rw-r--r--arch/alpha/kernel/sys_eb64p.c2
-rw-r--r--arch/alpha/kernel/sys_eiger.c2
-rw-r--r--arch/alpha/kernel/sys_jensen.c2
-rw-r--r--arch/alpha/kernel/sys_marvel.c10
-rw-r--r--arch/alpha/kernel/sys_mikasa.c2
-rw-r--r--arch/alpha/kernel/sys_noritake.c2
-rw-r--r--arch/alpha/kernel/sys_rawhide.c2
-rw-r--r--arch/alpha/kernel/sys_ruffian.c1
-rw-r--r--arch/alpha/kernel/sys_rx164.c2
-rw-r--r--arch/alpha/kernel/sys_sable.c2
-rw-r--r--arch/alpha/kernel/sys_takara.c2
-rw-r--r--arch/alpha/kernel/sys_titan.c4
-rw-r--r--arch/alpha/kernel/sys_wildfire.c2
-rw-r--r--arch/alpha/mm/numa.c6
-rw-r--r--arch/arm/kernel/init_task.c4
-rw-r--r--arch/avr32/kernel/init_task.c4
-rw-r--r--arch/blackfin/include/asm/kmap_types.h17
-rw-r--r--arch/blackfin/kernel/init_task.c4
-rw-r--r--arch/cris/include/asm/kmap_types.h17
-rw-r--r--arch/cris/kernel/process.c4
-rw-r--r--arch/frv/kernel/init_task.c4
-rw-r--r--arch/h8300/include/asm/kmap_types.h17
-rw-r--r--arch/h8300/kernel/init_task.c4
-rw-r--r--arch/ia64/hp/common/sba_iommu.c2
-rw-r--r--arch/ia64/include/asm/kmap_types.h24
-rw-r--r--arch/ia64/kernel/init_task.c4
-rw-r--r--arch/ia64/kernel/mca.c3
-rw-r--r--arch/ia64/kernel/perfmon.c2
-rw-r--r--arch/ia64/kernel/uncached.c3
-rw-r--r--arch/ia64/sn/pci/pci_dma.c3
-rw-r--r--arch/m32r/include/asm/kmap_types.h23
-rw-r--r--arch/m32r/kernel/init_task.c4
-rw-r--r--arch/m32r/mm/discontig.c6
-rw-r--r--arch/m32r/platforms/m32104ut/setup.c2
-rw-r--r--arch/m32r/platforms/m32700ut/setup.c8
-rw-r--r--arch/m32r/platforms/mappi/setup.c2
-rw-r--r--arch/m32r/platforms/mappi2/setup.c2
-rw-r--r--arch/m32r/platforms/mappi3/setup.c2
-rw-r--r--arch/m32r/platforms/oaks32r/setup.c2
-rw-r--r--arch/m32r/platforms/opsput/setup.c8
-rw-r--r--arch/m32r/platforms/usrv/setup.c4
-rw-r--r--arch/m68k/include/asm/kmap_types.h17
-rw-r--r--arch/m68k/kernel/process.c4
-rw-r--r--arch/m68knommu/kernel/init_task.c4
-rw-r--r--arch/microblaze/include/asm/kmap_types.h25
-rw-r--r--arch/mips/include/asm/i8253.h2
-rw-r--r--arch/mips/include/asm/kmap_types.h24
-rw-r--r--arch/mips/kernel/init_task.c4
-rw-r--r--arch/mn10300/include/asm/kmap_types.h27
-rw-r--r--arch/mn10300/kernel/init_task.c3
-rw-r--r--arch/parisc/include/asm/kmap_types.h24
-rw-r--r--arch/parisc/kernel/init_task.c4
-rw-r--r--arch/powerpc/include/asm/8253pit.h7
-rw-r--r--arch/powerpc/kernel/init_task.c4
-rw-r--r--arch/powerpc/kernel/prom_init.c3
-rw-r--r--arch/powerpc/platforms/cell/ras.c4
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c11
-rw-r--r--arch/s390/include/asm/kmap_types.h17
-rw-r--r--arch/s390/kernel/init_task.c4
-rw-r--r--arch/sh/include/asm/kmap_types.h24
-rw-r--r--arch/sh/kernel/init_task.c3
-rw-r--r--arch/sparc/include/asm/kmap_types.h17
-rw-r--r--arch/sparc/kernel/init_task.c3
-rw-r--r--arch/um/include/shared/init.h2
-rw-r--r--arch/um/include/shared/net_user.h2
-rw-r--r--arch/um/kernel/init_task.c3
-rw-r--r--arch/um/kernel/irq.c6
-rw-r--r--arch/um/sys-i386/stub.S2
-rw-r--r--arch/um/sys-x86_64/asm/elf.h44
-rw-r--r--arch/um/sys-x86_64/stub.S2
-rw-r--r--arch/x86/include/asm/kmap_types.h23
-rw-r--r--arch/x86/include/asm/timex.h4
-rw-r--r--arch/x86/kernel/cpu/common.c11
-rw-r--r--arch/x86/kernel/i8253.c1
-rw-r--r--arch/x86/kernel/init_task.c1
-rw-r--r--arch/x86/kernel/tsc.c1
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--arch/xtensa/include/asm/kmap_types.h27
-rw-r--r--arch/xtensa/kernel/init_task.c4
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/base/node.c4
-rw-r--r--drivers/char/vt.c13
-rw-r--r--drivers/clocksource/acpi_pm.c1
-rw-r--r--drivers/eisa/eisa.ids5
-rw-r--r--drivers/firmware/memmap.c16
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c8
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/hp_accel.c20
-rw-r--r--drivers/hwmon/lis3lv02d.c187
-rw-r--r--drivers/hwmon/lis3lv02d.h29
-rw-r--r--drivers/hwmon/lis3lv02d_spi.c1
-rw-r--r--drivers/input/joystick/analog.c2
-rw-r--r--drivers/input/misc/pcspkr.c1
-rw-r--r--drivers/media/video/videobuf-dma-contig.c94
-rw-r--r--drivers/misc/sgi-gru/grufile.c2
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c2
-rw-r--r--drivers/pcmcia/pcmcia_ioctl.c9
-rw-r--r--drivers/spi/spi_mpc83xx.c6
-rw-r--r--drivers/video/Kconfig1
-rw-r--r--drivers/video/acornfb.c38
-rw-r--r--drivers/video/atmel_lcdfb.c2
-rw-r--r--drivers/video/aty/radeon_pm.c3
-rw-r--r--drivers/video/bf54x-lq043fb.c15
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c15
-rw-r--r--drivers/video/carminefb.c2
-rw-r--r--drivers/video/chipsfb.c1
-rw-r--r--drivers/video/efifb.c5
-rw-r--r--drivers/video/fbmem.c31
-rw-r--r--drivers/video/igafb.c8
-rw-r--r--drivers/video/intelfb/intelfbdrv.c5
-rw-r--r--drivers/video/logo/Makefile12
-rw-r--r--drivers/video/logo/logo.c15
-rw-r--r--drivers/video/mb862xx/mb862xxfb.c2
-rw-r--r--drivers/video/modedb.c8
-rw-r--r--drivers/video/offb.c8
-rw-r--r--drivers/video/pm2fb.c2
-rw-r--r--drivers/video/s1d13xxxfb.c341
-rw-r--r--drivers/video/s3c-fb.c53
-rw-r--r--drivers/video/s3c2410fb.c67
-rw-r--r--drivers/video/s3c2410fb.h5
-rw-r--r--drivers/video/sis/sis_main.c4
-rw-r--r--drivers/video/stifb.c2
-rw-r--r--drivers/video/tcx.c27
-rw-r--r--drivers/video/vesafb.c15
-rw-r--r--drivers/vlynq/Kconfig20
-rw-r--r--drivers/vlynq/Makefile5
-rw-r--r--drivers/vlynq/vlynq.c814
-rw-r--r--fs/Kconfig14
-rw-r--r--fs/drop_caches.c2
-rw-r--r--fs/fs-writeback.c6
-rw-r--r--fs/nfs/iostat.h6
-rw-r--r--fs/ntfs/inode.c3
-rw-r--r--fs/ntfs/logfile.c3
-rw-r--r--fs/proc/base.c19
-rw-r--r--fs/proc/meminfo.c4
-rw-r--r--fs/proc/page.c162
-rw-r--r--fs/select.c40
-rw-r--r--include/asm-generic/kmap_types.h2
-rw-r--r--include/linux/bug.h12
-rw-r--r--include/linux/cpuset.h13
-rw-r--r--include/linux/fb.h14
-rw-r--r--include/linux/firmware-map.h12
-rw-r--r--include/linux/fs.h5
-rw-r--r--include/linux/gfp.h150
-rw-r--r--include/linux/highmem.h4
-rw-r--r--include/linux/hugetlb.h7
-rw-r--r--include/linux/init.h2
-rw-r--r--include/linux/init_task.h12
-rw-r--r--include/linux/linux_logo.h16
-rw-r--r--include/linux/lis3lv02d.h39
-rw-r--r--include/linux/major.h1
-rw-r--r--include/linux/memcontrol.h7
-rw-r--r--include/linux/mm.h33
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/mmzone.h36
-rw-r--r--include/linux/module.h17
-rw-r--r--include/linux/nodemask.h19
-rw-r--r--include/linux/page-flags.h16
-rw-r--r--include/linux/pagemap.h12
-rw-r--r--include/linux/poll.h3
-rw-r--r--include/linux/radix-tree.h2
-rw-r--r--include/linux/rmap.h12
-rw-r--r--include/linux/sched.h9
-rw-r--r--include/linux/smp.h1
-rw-r--r--include/linux/swap.h42
-rw-r--r--include/linux/syscalls.h3
-rw-r--r--include/linux/timex.h3
-rw-r--r--include/linux/utsname.h12
-rw-r--r--include/linux/vlynq.h161
-rw-r--r--include/linux/vmstat.h5
-rw-r--r--include/video/s1d13xxxfb.h9
-rw-r--r--init/main.c6
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/cpuset.c260
-rw-r--r--kernel/groups.c288
-rw-r--r--kernel/kfifo.c4
-rw-r--r--kernel/kthread.c2
-rw-r--r--kernel/power/process.c5
-rw-r--r--kernel/profile.c8
-rw-r--r--kernel/slow-work.c23
-rw-r--r--kernel/sys.c283
-rw-r--r--kernel/sysctl.c2
-rw-r--r--lib/dec_and_lock.c3
-rw-r--r--lib/genalloc.c1
-rw-r--r--lib/hexdump.c15
-rw-r--r--lib/radix-tree.c110
-rw-r--r--lib/rbtree.c34
-rw-r--r--mm/Kconfig14
-rw-r--r--mm/Makefile1
-rw-r--r--mm/fadvise.c2
-rw-r--r--mm/filemap.c169
-rw-r--r--mm/hugetlb.c106
-rw-r--r--mm/init-mm.c20
-rw-r--r--mm/internal.h33
-rw-r--r--mm/madvise.c26
-rw-r--r--mm/memcontrol.c11
-rw-r--r--mm/memory.c128
-rw-r--r--mm/memory_hotplug.c6
-rw-r--r--mm/mempolicy.c145
-rw-r--r--mm/migrate.c6
-rw-r--r--mm/mlock.c22
-rw-r--r--mm/oom_kill.c64
-rw-r--r--mm/page-writeback.c19
-rw-r--r--mm/page_alloc.c754
-rw-r--r--mm/page_io.c2
-rw-r--r--mm/readahead.c145
-rw-r--r--mm/rmap.c40
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/slab.c11
-rw-r--r--mm/slob.c4
-rw-r--r--mm/slub.c2
-rw-r--r--mm/swap_state.c17
-rw-r--r--mm/swapfile.c276
-rw-r--r--mm/truncate.c39
-rw-r--r--mm/util.c16
-rw-r--r--mm/vmscan.c372
-rw-r--r--mm/vmstat.c19
-rw-r--r--net/sunrpc/svc.c2
-rwxr-xr-xscripts/get_maintainer.pl234
-rw-r--r--scripts/gfp-translate81
-rw-r--r--scripts/pnmtologo.c18
-rw-r--r--sound/drivers/pcsp/pcsp.h1
-rw-r--r--sound/oss/pas2_pcm.c2
247 files changed, 5666 insertions, 2683 deletions
diff --git a/Documentation/accounting/getdelays.c b/Documentation/accounting/getdelays.c
index 7ea231172c85..aa73e72fd793 100644
--- a/Documentation/accounting/getdelays.c
+++ b/Documentation/accounting/getdelays.c
@@ -246,7 +246,8 @@ void print_ioacct(struct taskstats *t)
246 246
247int main(int argc, char *argv[]) 247int main(int argc, char *argv[])
248{ 248{
249 int c, rc, rep_len, aggr_len, len2, cmd_type; 249 int c, rc, rep_len, aggr_len, len2;
250 int cmd_type = TASKSTATS_CMD_ATTR_UNSPEC;
250 __u16 id; 251 __u16 id;
251 __u32 mypid; 252 __u32 mypid;
252 253
diff --git a/Documentation/atomic_ops.txt b/Documentation/atomic_ops.txt
index 4ef245010457..396bec3b74ed 100644
--- a/Documentation/atomic_ops.txt
+++ b/Documentation/atomic_ops.txt
@@ -229,10 +229,10 @@ kernel. It is the use of atomic counters to implement reference
229counting, and it works such that once the counter falls to zero it can 229counting, and it works such that once the counter falls to zero it can
230be guaranteed that no other entity can be accessing the object: 230be guaranteed that no other entity can be accessing the object:
231 231
232static void obj_list_add(struct obj *obj) 232static void obj_list_add(struct obj *obj, struct list_head *head)
233{ 233{
234 obj->active = 1; 234 obj->active = 1;
235 list_add(&obj->list); 235 list_add(&obj->list, head);
236} 236}
237 237
238static void obj_list_del(struct obj *obj) 238static void obj_list_del(struct obj *obj)
diff --git a/Documentation/fb/vesafb.txt b/Documentation/fb/vesafb.txt
index ee277dd204b0..950d5a658cb3 100644
--- a/Documentation/fb/vesafb.txt
+++ b/Documentation/fb/vesafb.txt
@@ -95,7 +95,7 @@ There is no way to change the vesafb video mode and/or timings after
95booting linux. If you are not happy with the 60 Hz refresh rate, you 95booting linux. If you are not happy with the 60 Hz refresh rate, you
96have these options: 96have these options:
97 97
98 * configure and load the DOS-Tools for your the graphics board (if 98 * configure and load the DOS-Tools for the graphics board (if
99 available) and boot linux with loadlin. 99 available) and boot linux with loadlin.
100 * use a native driver (matroxfb/atyfb) instead if vesafb. If none 100 * use a native driver (matroxfb/atyfb) instead if vesafb. If none
101 is available, write a new one! 101 is available, write a new one!
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index cd8717a36271..ebff3c10a07f 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -1003,11 +1003,13 @@ CHAPTER 3: PER-PROCESS PARAMETERS
10033.1 /proc/<pid>/oom_adj - Adjust the oom-killer score 10033.1 /proc/<pid>/oom_adj - Adjust the oom-killer score
1004------------------------------------------------------ 1004------------------------------------------------------
1005 1005
1006This file can be used to adjust the score used to select which processes 1006This file can be used to adjust the score used to select which processes should
1007should be killed in an out-of-memory situation. Giving it a high score will 1007be killed in an out-of-memory situation. The oom_adj value is a characteristic
1008increase the likelihood of this process being killed by the oom-killer. Valid 1008of the task's mm, so all threads that share an mm with pid will have the same
1009values are in the range -16 to +15, plus the special value -17, which disables 1009oom_adj value. A high value will increase the likelihood of this process being
1010oom-killing altogether for this process. 1010killed by the oom-killer. Valid values are in the range -16 to +15 as
1011explained below and a special value of -17, which disables oom-killing
1012altogether for threads sharing pid's mm.
1011 1013
1012The process to be killed in an out-of-memory situation is selected among all others 1014The process to be killed in an out-of-memory situation is selected among all others
1013based on its badness score. This value equals the original memory size of the process 1015based on its badness score. This value equals the original memory size of the process
@@ -1021,6 +1023,9 @@ the parent's score if they do not share the same memory. Thus forking servers
1021are the prime candidates to be killed. Having only one 'hungry' child will make 1023are the prime candidates to be killed. Having only one 'hungry' child will make
1022parent less preferable than the child. 1024parent less preferable than the child.
1023 1025
1026/proc/<pid>/oom_adj cannot be changed for kthreads since they are immune from
1027oom-killing already.
1028
1024/proc/<pid>/oom_score shows process' current badness score. 1029/proc/<pid>/oom_score shows process' current badness score.
1025 1030
1026The following heuristics are then applied: 1031The following heuristics are then applied:
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index ad3800630772..5578248c18a4 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -546,6 +546,10 @@ and is between 256 and 4096 characters. It is defined in the file
546 console=brl,ttyS0 546 console=brl,ttyS0
547 For now, only VisioBraille is supported. 547 For now, only VisioBraille is supported.
548 548
549 consoleblank= [KNL] The console blank (screen saver) timeout in
550 seconds. Defaults to 10*60 = 10mins. A value of 0
551 disables the blank timer.
552
549 coredump_filter= 553 coredump_filter=
550 [KNL] Change the default value for 554 [KNL] Change the default value for
551 /proc/<pid>/coredump_filter. 555 /proc/<pid>/coredump_filter.
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 6fab2dcbb4d3..c4de6359d440 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -233,8 +233,8 @@ These protections are added to score to judge whether this zone should be used
233for page allocation or should be reclaimed. 233for page allocation or should be reclaimed.
234 234
235In this example, if normal pages (index=2) are required to this DMA zone and 235In this example, if normal pages (index=2) are required to this DMA zone and
236pages_high is used for watermark, the kernel judges this zone should not be 236watermark[WMARK_HIGH] is used for watermark, the kernel judges this zone should
237used because pages_free(1355) is smaller than watermark + protection[2] 237not be used because pages_free(1355) is smaller than watermark + protection[2]
238(4 + 2004 = 2008). If this protection value is 0, this zone would be used for 238(4 + 2004 = 2008). If this protection value is 0, this zone would be used for
239normal page requirement. If requirement is DMA zone(index=0), protection[0] 239normal page requirement. If requirement is DMA zone(index=0), protection[0]
240(=0) is used. 240(=0) is used.
@@ -280,9 +280,10 @@ The default value is 65536.
280min_free_kbytes: 280min_free_kbytes:
281 281
282This is used to force the Linux VM to keep a minimum number 282This is used to force the Linux VM to keep a minimum number
283of kilobytes free. The VM uses this number to compute a pages_min 283of kilobytes free. The VM uses this number to compute a
284value for each lowmem zone in the system. Each lowmem zone gets 284watermark[WMARK_MIN] value for each lowmem zone in the system.
285a number of reserved free pages based proportionally on its size. 285Each lowmem zone gets a number of reserved free pages based
286proportionally on its size.
286 287
287Some minimal amount of memory is needed to satisfy PF_MEMALLOC 288Some minimal amount of memory is needed to satisfy PF_MEMALLOC
288allocations; if you set this to lower than 1024KB, your system will 289allocations; if you set this to lower than 1024KB, your system will
@@ -314,10 +315,14 @@ min_unmapped_ratio:
314 315
315This is available only on NUMA kernels. 316This is available only on NUMA kernels.
316 317
317A percentage of the total pages in each zone. Zone reclaim will only 318This is a percentage of the total pages in each zone. Zone reclaim will
318occur if more than this percentage of pages are file backed and unmapped. 319only occur if more than this percentage of pages are in a state that
319This is to insure that a minimal amount of local pages is still available for 320zone_reclaim_mode allows to be reclaimed.
320file I/O even if the node is overallocated. 321
322If zone_reclaim_mode has the value 4 OR'd, then the percentage is compared
323against all file-backed unmapped pages including swapcache pages and tmpfs
324files. Otherwise, only unmapped pages backed by normal files but not tmpfs
325files and similar are considered.
321 326
322The default is 1 percent. 327The default is 1 percent.
323 328
diff --git a/Documentation/vm/Makefile b/Documentation/vm/Makefile
index 6f562f778b28..27479d43a9b0 100644
--- a/Documentation/vm/Makefile
+++ b/Documentation/vm/Makefile
@@ -2,7 +2,7 @@
2obj- := dummy.o 2obj- := dummy.o
3 3
4# List of programs to build 4# List of programs to build
5hostprogs-y := slabinfo 5hostprogs-y := slabinfo slqbinfo page-types
6 6
7# Tell kbuild to always build the programs 7# Tell kbuild to always build the programs
8always := $(hostprogs-y) 8always := $(hostprogs-y)
diff --git a/Documentation/vm/balance b/Documentation/vm/balance
index bd3d31bc4915..c46e68cf9344 100644
--- a/Documentation/vm/balance
+++ b/Documentation/vm/balance
@@ -75,15 +75,15 @@ Page stealing from process memory and shm is done if stealing the page would
75alleviate memory pressure on any zone in the page's node that has fallen below 75alleviate memory pressure on any zone in the page's node that has fallen below
76its watermark. 76its watermark.
77 77
78pages_min/pages_low/pages_high/low_on_memory/zone_wake_kswapd: These are 78watemark[WMARK_MIN/WMARK_LOW/WMARK_HIGH]/low_on_memory/zone_wake_kswapd: These
79per-zone fields, used to determine when a zone needs to be balanced. When 79are per-zone fields, used to determine when a zone needs to be balanced. When
80the number of pages falls below pages_min, the hysteric field low_on_memory 80the number of pages falls below watermark[WMARK_MIN], the hysteric field
81gets set. This stays set till the number of free pages becomes pages_high. 81low_on_memory gets set. This stays set till the number of free pages becomes
82When low_on_memory is set, page allocation requests will try to free some 82watermark[WMARK_HIGH]. When low_on_memory is set, page allocation requests will
83pages in the zone (providing GFP_WAIT is set in the request). Orthogonal 83try to free some pages in the zone (providing GFP_WAIT is set in the request).
84to this, is the decision to poke kswapd to free some zone pages. That 84Orthogonal to this, is the decision to poke kswapd to free some zone pages.
85decision is not hysteresis based, and is done when the number of free 85That decision is not hysteresis based, and is done when the number of free
86pages is below pages_low; in which case zone_wake_kswapd is also set. 86pages is below watermark[WMARK_LOW]; in which case zone_wake_kswapd is also set.
87 87
88 88
89(Good) Ideas that I have heard: 89(Good) Ideas that I have heard:
diff --git a/Documentation/vm/page-types.c b/Documentation/vm/page-types.c
new file mode 100644
index 000000000000..0833f44ba16b
--- /dev/null
+++ b/Documentation/vm/page-types.c
@@ -0,0 +1,698 @@
1/*
2 * page-types: Tool for querying page flags
3 *
4 * Copyright (C) 2009 Intel corporation
5 * Copyright (C) 2009 Wu Fengguang <fengguang.wu@intel.com>
6 */
7
8#include <stdio.h>
9#include <stdlib.h>
10#include <unistd.h>
11#include <stdint.h>
12#include <stdarg.h>
13#include <string.h>
14#include <getopt.h>
15#include <limits.h>
16#include <sys/types.h>
17#include <sys/errno.h>
18#include <sys/fcntl.h>
19
20
21/*
22 * kernel page flags
23 */
24
25#define KPF_BYTES 8
26#define PROC_KPAGEFLAGS "/proc/kpageflags"
27
28/* copied from kpageflags_read() */
29#define KPF_LOCKED 0
30#define KPF_ERROR 1
31#define KPF_REFERENCED 2
32#define KPF_UPTODATE 3
33#define KPF_DIRTY 4
34#define KPF_LRU 5
35#define KPF_ACTIVE 6
36#define KPF_SLAB 7
37#define KPF_WRITEBACK 8
38#define KPF_RECLAIM 9
39#define KPF_BUDDY 10
40
41/* [11-20] new additions in 2.6.31 */
42#define KPF_MMAP 11
43#define KPF_ANON 12
44#define KPF_SWAPCACHE 13
45#define KPF_SWAPBACKED 14
46#define KPF_COMPOUND_HEAD 15
47#define KPF_COMPOUND_TAIL 16
48#define KPF_HUGE 17
49#define KPF_UNEVICTABLE 18
50#define KPF_NOPAGE 20
51
52/* [32-] kernel hacking assistances */
53#define KPF_RESERVED 32
54#define KPF_MLOCKED 33
55#define KPF_MAPPEDTODISK 34
56#define KPF_PRIVATE 35
57#define KPF_PRIVATE_2 36
58#define KPF_OWNER_PRIVATE 37
59#define KPF_ARCH 38
60#define KPF_UNCACHED 39
61
62/* [48-] take some arbitrary free slots for expanding overloaded flags
63 * not part of kernel API
64 */
65#define KPF_READAHEAD 48
66#define KPF_SLOB_FREE 49
67#define KPF_SLUB_FROZEN 50
68#define KPF_SLUB_DEBUG 51
69
70#define KPF_ALL_BITS ((uint64_t)~0ULL)
71#define KPF_HACKERS_BITS (0xffffULL << 32)
72#define KPF_OVERLOADED_BITS (0xffffULL << 48)
73#define BIT(name) (1ULL << KPF_##name)
74#define BITS_COMPOUND (BIT(COMPOUND_HEAD) | BIT(COMPOUND_TAIL))
75
76static char *page_flag_names[] = {
77 [KPF_LOCKED] = "L:locked",
78 [KPF_ERROR] = "E:error",
79 [KPF_REFERENCED] = "R:referenced",
80 [KPF_UPTODATE] = "U:uptodate",
81 [KPF_DIRTY] = "D:dirty",
82 [KPF_LRU] = "l:lru",
83 [KPF_ACTIVE] = "A:active",
84 [KPF_SLAB] = "S:slab",
85 [KPF_WRITEBACK] = "W:writeback",
86 [KPF_RECLAIM] = "I:reclaim",
87 [KPF_BUDDY] = "B:buddy",
88
89 [KPF_MMAP] = "M:mmap",
90 [KPF_ANON] = "a:anonymous",
91 [KPF_SWAPCACHE] = "s:swapcache",
92 [KPF_SWAPBACKED] = "b:swapbacked",
93 [KPF_COMPOUND_HEAD] = "H:compound_head",
94 [KPF_COMPOUND_TAIL] = "T:compound_tail",
95 [KPF_HUGE] = "G:huge",
96 [KPF_UNEVICTABLE] = "u:unevictable",
97 [KPF_NOPAGE] = "n:nopage",
98
99 [KPF_RESERVED] = "r:reserved",
100 [KPF_MLOCKED] = "m:mlocked",
101 [KPF_MAPPEDTODISK] = "d:mappedtodisk",
102 [KPF_PRIVATE] = "P:private",
103 [KPF_PRIVATE_2] = "p:private_2",
104 [KPF_OWNER_PRIVATE] = "O:owner_private",
105 [KPF_ARCH] = "h:arch",
106 [KPF_UNCACHED] = "c:uncached",
107
108 [KPF_READAHEAD] = "I:readahead",
109 [KPF_SLOB_FREE] = "P:slob_free",
110 [KPF_SLUB_FROZEN] = "A:slub_frozen",
111 [KPF_SLUB_DEBUG] = "E:slub_debug",
112};
113
114
115/*
116 * data structures
117 */
118
119static int opt_raw; /* for kernel developers */
120static int opt_list; /* list pages (in ranges) */
121static int opt_no_summary; /* don't show summary */
122static pid_t opt_pid; /* process to walk */
123
124#define MAX_ADDR_RANGES 1024
125static int nr_addr_ranges;
126static unsigned long opt_offset[MAX_ADDR_RANGES];
127static unsigned long opt_size[MAX_ADDR_RANGES];
128
129#define MAX_BIT_FILTERS 64
130static int nr_bit_filters;
131static uint64_t opt_mask[MAX_BIT_FILTERS];
132static uint64_t opt_bits[MAX_BIT_FILTERS];
133
134static int page_size;
135
136#define PAGES_BATCH (64 << 10) /* 64k pages */
137static int kpageflags_fd;
138static uint64_t kpageflags_buf[KPF_BYTES * PAGES_BATCH];
139
140#define HASH_SHIFT 13
141#define HASH_SIZE (1 << HASH_SHIFT)
142#define HASH_MASK (HASH_SIZE - 1)
143#define HASH_KEY(flags) (flags & HASH_MASK)
144
145static unsigned long total_pages;
146static unsigned long nr_pages[HASH_SIZE];
147static uint64_t page_flags[HASH_SIZE];
148
149
150/*
151 * helper functions
152 */
153
154#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
155
156#define min_t(type, x, y) ({ \
157 type __min1 = (x); \
158 type __min2 = (y); \
159 __min1 < __min2 ? __min1 : __min2; })
160
161unsigned long pages2mb(unsigned long pages)
162{
163 return (pages * page_size) >> 20;
164}
165
166void fatal(const char *x, ...)
167{
168 va_list ap;
169
170 va_start(ap, x);
171 vfprintf(stderr, x, ap);
172 va_end(ap);
173 exit(EXIT_FAILURE);
174}
175
176
177/*
178 * page flag names
179 */
180
181char *page_flag_name(uint64_t flags)
182{
183 static char buf[65];
184 int present;
185 int i, j;
186
187 for (i = 0, j = 0; i < ARRAY_SIZE(page_flag_names); i++) {
188 present = (flags >> i) & 1;
189 if (!page_flag_names[i]) {
190 if (present)
191 fatal("unkown flag bit %d\n", i);
192 continue;
193 }
194 buf[j++] = present ? page_flag_names[i][0] : '_';
195 }
196
197 return buf;
198}
199
200char *page_flag_longname(uint64_t flags)
201{
202 static char buf[1024];
203 int i, n;
204
205 for (i = 0, n = 0; i < ARRAY_SIZE(page_flag_names); i++) {
206 if (!page_flag_names[i])
207 continue;
208 if ((flags >> i) & 1)
209 n += snprintf(buf + n, sizeof(buf) - n, "%s,",
210 page_flag_names[i] + 2);
211 }
212 if (n)
213 n--;
214 buf[n] = '\0';
215
216 return buf;
217}
218
219
220/*
221 * page list and summary
222 */
223
224void show_page_range(unsigned long offset, uint64_t flags)
225{
226 static uint64_t flags0;
227 static unsigned long index;
228 static unsigned long count;
229
230 if (flags == flags0 && offset == index + count) {
231 count++;
232 return;
233 }
234
235 if (count)
236 printf("%lu\t%lu\t%s\n",
237 index, count, page_flag_name(flags0));
238
239 flags0 = flags;
240 index = offset;
241 count = 1;
242}
243
244void show_page(unsigned long offset, uint64_t flags)
245{
246 printf("%lu\t%s\n", offset, page_flag_name(flags));
247}
248
249void show_summary(void)
250{
251 int i;
252
253 printf(" flags\tpage-count MB"
254 " symbolic-flags\t\t\tlong-symbolic-flags\n");
255
256 for (i = 0; i < ARRAY_SIZE(nr_pages); i++) {
257 if (nr_pages[i])
258 printf("0x%016llx\t%10lu %8lu %s\t%s\n",
259 (unsigned long long)page_flags[i],
260 nr_pages[i],
261 pages2mb(nr_pages[i]),
262 page_flag_name(page_flags[i]),
263 page_flag_longname(page_flags[i]));
264 }
265
266 printf(" total\t%10lu %8lu\n",
267 total_pages, pages2mb(total_pages));
268}
269
270
271/*
272 * page flag filters
273 */
274
275int bit_mask_ok(uint64_t flags)
276{
277 int i;
278
279 for (i = 0; i < nr_bit_filters; i++) {
280 if (opt_bits[i] == KPF_ALL_BITS) {
281 if ((flags & opt_mask[i]) == 0)
282 return 0;
283 } else {
284 if ((flags & opt_mask[i]) != opt_bits[i])
285 return 0;
286 }
287 }
288
289 return 1;
290}
291
292uint64_t expand_overloaded_flags(uint64_t flags)
293{
294 /* SLOB/SLUB overload several page flags */
295 if (flags & BIT(SLAB)) {
296 if (flags & BIT(PRIVATE))
297 flags ^= BIT(PRIVATE) | BIT(SLOB_FREE);
298 if (flags & BIT(ACTIVE))
299 flags ^= BIT(ACTIVE) | BIT(SLUB_FROZEN);
300 if (flags & BIT(ERROR))
301 flags ^= BIT(ERROR) | BIT(SLUB_DEBUG);
302 }
303
304 /* PG_reclaim is overloaded as PG_readahead in the read path */
305 if ((flags & (BIT(RECLAIM) | BIT(WRITEBACK))) == BIT(RECLAIM))
306 flags ^= BIT(RECLAIM) | BIT(READAHEAD);
307
308 return flags;
309}
310
311uint64_t well_known_flags(uint64_t flags)
312{
313 /* hide flags intended only for kernel hacker */
314 flags &= ~KPF_HACKERS_BITS;
315
316 /* hide non-hugeTLB compound pages */
317 if ((flags & BITS_COMPOUND) && !(flags & BIT(HUGE)))
318 flags &= ~BITS_COMPOUND;
319
320 return flags;
321}
322
323
324/*
325 * page frame walker
326 */
327
328int hash_slot(uint64_t flags)
329{
330 int k = HASH_KEY(flags);
331 int i;
332
333 /* Explicitly reserve slot 0 for flags 0: the following logic
334 * cannot distinguish an unoccupied slot from slot (flags==0).
335 */
336 if (flags == 0)
337 return 0;
338
339 /* search through the remaining (HASH_SIZE-1) slots */
340 for (i = 1; i < ARRAY_SIZE(page_flags); i++, k++) {
341 if (!k || k >= ARRAY_SIZE(page_flags))
342 k = 1;
343 if (page_flags[k] == 0) {
344 page_flags[k] = flags;
345 return k;
346 }
347 if (page_flags[k] == flags)
348 return k;
349 }
350
351 fatal("hash table full: bump up HASH_SHIFT?\n");
352 exit(EXIT_FAILURE);
353}
354
355void add_page(unsigned long offset, uint64_t flags)
356{
357 flags = expand_overloaded_flags(flags);
358
359 if (!opt_raw)
360 flags = well_known_flags(flags);
361
362 if (!bit_mask_ok(flags))
363 return;
364
365 if (opt_list == 1)
366 show_page_range(offset, flags);
367 else if (opt_list == 2)
368 show_page(offset, flags);
369
370 nr_pages[hash_slot(flags)]++;
371 total_pages++;
372}
373
374void walk_pfn(unsigned long index, unsigned long count)
375{
376 unsigned long batch;
377 unsigned long n;
378 unsigned long i;
379
380 if (index > ULONG_MAX / KPF_BYTES)
381 fatal("index overflow: %lu\n", index);
382
383 lseek(kpageflags_fd, index * KPF_BYTES, SEEK_SET);
384
385 while (count) {
386 batch = min_t(unsigned long, count, PAGES_BATCH);
387 n = read(kpageflags_fd, kpageflags_buf, batch * KPF_BYTES);
388 if (n == 0)
389 break;
390 if (n < 0) {
391 perror(PROC_KPAGEFLAGS);
392 exit(EXIT_FAILURE);
393 }
394
395 if (n % KPF_BYTES != 0)
396 fatal("partial read: %lu bytes\n", n);
397 n = n / KPF_BYTES;
398
399 for (i = 0; i < n; i++)
400 add_page(index + i, kpageflags_buf[i]);
401
402 index += batch;
403 count -= batch;
404 }
405}
406
407void walk_addr_ranges(void)
408{
409 int i;
410
411 kpageflags_fd = open(PROC_KPAGEFLAGS, O_RDONLY);
412 if (kpageflags_fd < 0) {
413 perror(PROC_KPAGEFLAGS);
414 exit(EXIT_FAILURE);
415 }
416
417 if (!nr_addr_ranges)
418 walk_pfn(0, ULONG_MAX);
419
420 for (i = 0; i < nr_addr_ranges; i++)
421 walk_pfn(opt_offset[i], opt_size[i]);
422
423 close(kpageflags_fd);
424}
425
426
427/*
428 * user interface
429 */
430
431const char *page_flag_type(uint64_t flag)
432{
433 if (flag & KPF_HACKERS_BITS)
434 return "(r)";
435 if (flag & KPF_OVERLOADED_BITS)
436 return "(o)";
437 return " ";
438}
439
440void usage(void)
441{
442 int i, j;
443
444 printf(
445"page-types [options]\n"
446" -r|--raw Raw mode, for kernel developers\n"
447" -a|--addr addr-spec Walk a range of pages\n"
448" -b|--bits bits-spec Walk pages with specified bits\n"
449#if 0 /* planned features */
450" -p|--pid pid Walk process address space\n"
451" -f|--file filename Walk file address space\n"
452#endif
453" -l|--list Show page details in ranges\n"
454" -L|--list-each Show page details one by one\n"
455" -N|--no-summary Don't show summay info\n"
456" -h|--help Show this usage message\n"
457"addr-spec:\n"
458" N one page at offset N (unit: pages)\n"
459" N+M pages range from N to N+M-1\n"
460" N,M pages range from N to M-1\n"
461" N, pages range from N to end\n"
462" ,M pages range from 0 to M\n"
463"bits-spec:\n"
464" bit1,bit2 (flags & (bit1|bit2)) != 0\n"
465" bit1,bit2=bit1 (flags & (bit1|bit2)) == bit1\n"
466" bit1,~bit2 (flags & (bit1|bit2)) == bit1\n"
467" =bit1,bit2 flags == (bit1|bit2)\n"
468"bit-names:\n"
469 );
470
471 for (i = 0, j = 0; i < ARRAY_SIZE(page_flag_names); i++) {
472 if (!page_flag_names[i])
473 continue;
474 printf("%16s%s", page_flag_names[i] + 2,
475 page_flag_type(1ULL << i));
476 if (++j > 3) {
477 j = 0;
478 putchar('\n');
479 }
480 }
481 printf("\n "
482 "(r) raw mode bits (o) overloaded bits\n");
483}
484
485unsigned long long parse_number(const char *str)
486{
487 unsigned long long n;
488
489 n = strtoll(str, NULL, 0);
490
491 if (n == 0 && str[0] != '0')
492 fatal("invalid name or number: %s\n", str);
493
494 return n;
495}
496
497void parse_pid(const char *str)
498{
499 opt_pid = parse_number(str);
500}
501
502void parse_file(const char *name)
503{
504}
505
506void add_addr_range(unsigned long offset, unsigned long size)
507{
508 if (nr_addr_ranges >= MAX_ADDR_RANGES)
509 fatal("too much addr ranges\n");
510
511 opt_offset[nr_addr_ranges] = offset;
512 opt_size[nr_addr_ranges] = size;
513 nr_addr_ranges++;
514}
515
516void parse_addr_range(const char *optarg)
517{
518 unsigned long offset;
519 unsigned long size;
520 char *p;
521
522 p = strchr(optarg, ',');
523 if (!p)
524 p = strchr(optarg, '+');
525
526 if (p == optarg) {
527 offset = 0;
528 size = parse_number(p + 1);
529 } else if (p) {
530 offset = parse_number(optarg);
531 if (p[1] == '\0')
532 size = ULONG_MAX;
533 else {
534 size = parse_number(p + 1);
535 if (*p == ',') {
536 if (size < offset)
537 fatal("invalid range: %lu,%lu\n",
538 offset, size);
539 size -= offset;
540 }
541 }
542 } else {
543 offset = parse_number(optarg);
544 size = 1;
545 }
546
547 add_addr_range(offset, size);
548}
549
550void add_bits_filter(uint64_t mask, uint64_t bits)
551{
552 if (nr_bit_filters >= MAX_BIT_FILTERS)
553 fatal("too much bit filters\n");
554
555 opt_mask[nr_bit_filters] = mask;
556 opt_bits[nr_bit_filters] = bits;
557 nr_bit_filters++;
558}
559
560uint64_t parse_flag_name(const char *str, int len)
561{
562 int i;
563
564 if (!*str || !len)
565 return 0;
566
567 if (len <= 8 && !strncmp(str, "compound", len))
568 return BITS_COMPOUND;
569
570 for (i = 0; i < ARRAY_SIZE(page_flag_names); i++) {
571 if (!page_flag_names[i])
572 continue;
573 if (!strncmp(str, page_flag_names[i] + 2, len))
574 return 1ULL << i;
575 }
576
577 return parse_number(str);
578}
579
580uint64_t parse_flag_names(const char *str, int all)
581{
582 const char *p = str;
583 uint64_t flags = 0;
584
585 while (1) {
586 if (*p == ',' || *p == '=' || *p == '\0') {
587 if ((*str != '~') || (*str == '~' && all && *++str))
588 flags |= parse_flag_name(str, p - str);
589 if (*p != ',')
590 break;
591 str = p + 1;
592 }
593 p++;
594 }
595
596 return flags;
597}
598
599void parse_bits_mask(const char *optarg)
600{
601 uint64_t mask;
602 uint64_t bits;
603 const char *p;
604
605 p = strchr(optarg, '=');
606 if (p == optarg) {
607 mask = KPF_ALL_BITS;
608 bits = parse_flag_names(p + 1, 0);
609 } else if (p) {
610 mask = parse_flag_names(optarg, 0);
611 bits = parse_flag_names(p + 1, 0);
612 } else if (strchr(optarg, '~')) {
613 mask = parse_flag_names(optarg, 1);
614 bits = parse_flag_names(optarg, 0);
615 } else {
616 mask = parse_flag_names(optarg, 0);
617 bits = KPF_ALL_BITS;
618 }
619
620 add_bits_filter(mask, bits);
621}
622
623
624struct option opts[] = {
625 { "raw" , 0, NULL, 'r' },
626 { "pid" , 1, NULL, 'p' },
627 { "file" , 1, NULL, 'f' },
628 { "addr" , 1, NULL, 'a' },
629 { "bits" , 1, NULL, 'b' },
630 { "list" , 0, NULL, 'l' },
631 { "list-each" , 0, NULL, 'L' },
632 { "no-summary", 0, NULL, 'N' },
633 { "help" , 0, NULL, 'h' },
634 { NULL , 0, NULL, 0 }
635};
636
637int main(int argc, char *argv[])
638{
639 int c;
640
641 page_size = getpagesize();
642
643 while ((c = getopt_long(argc, argv,
644 "rp:f:a:b:lLNh", opts, NULL)) != -1) {
645 switch (c) {
646 case 'r':
647 opt_raw = 1;
648 break;
649 case 'p':
650 parse_pid(optarg);
651 break;
652 case 'f':
653 parse_file(optarg);
654 break;
655 case 'a':
656 parse_addr_range(optarg);
657 break;
658 case 'b':
659 parse_bits_mask(optarg);
660 break;
661 case 'l':
662 opt_list = 1;
663 break;
664 case 'L':
665 opt_list = 2;
666 break;
667 case 'N':
668 opt_no_summary = 1;
669 break;
670 case 'h':
671 usage();
672 exit(0);
673 default:
674 usage();
675 exit(1);
676 }
677 }
678
679 if (opt_list == 1)
680 printf("offset\tcount\tflags\n");
681 if (opt_list == 2)
682 printf("offset\tflags\n");
683
684 walk_addr_ranges();
685
686 if (opt_list == 1)
687 show_page_range(0, 0); /* drain the buffer */
688
689 if (opt_no_summary)
690 return 0;
691
692 if (opt_list)
693 printf("\n\n");
694
695 show_summary();
696
697 return 0;
698}
diff --git a/Documentation/vm/pagemap.txt b/Documentation/vm/pagemap.txt
index ce72c0fe6177..600a304a828c 100644
--- a/Documentation/vm/pagemap.txt
+++ b/Documentation/vm/pagemap.txt
@@ -12,9 +12,9 @@ There are three components to pagemap:
12 value for each virtual page, containing the following data (from 12 value for each virtual page, containing the following data (from
13 fs/proc/task_mmu.c, above pagemap_read): 13 fs/proc/task_mmu.c, above pagemap_read):
14 14
15 * Bits 0-55 page frame number (PFN) if present 15 * Bits 0-54 page frame number (PFN) if present
16 * Bits 0-4 swap type if swapped 16 * Bits 0-4 swap type if swapped
17 * Bits 5-55 swap offset if swapped 17 * Bits 5-54 swap offset if swapped
18 * Bits 55-60 page shift (page size = 1<<page shift) 18 * Bits 55-60 page shift (page size = 1<<page shift)
19 * Bit 61 reserved for future use 19 * Bit 61 reserved for future use
20 * Bit 62 page swapped 20 * Bit 62 page swapped
@@ -36,7 +36,7 @@ There are three components to pagemap:
36 * /proc/kpageflags. This file contains a 64-bit set of flags for each 36 * /proc/kpageflags. This file contains a 64-bit set of flags for each
37 page, indexed by PFN. 37 page, indexed by PFN.
38 38
39 The flags are (from fs/proc/proc_misc, above kpageflags_read): 39 The flags are (from fs/proc/page.c, above kpageflags_read):
40 40
41 0. LOCKED 41 0. LOCKED
42 1. ERROR 42 1. ERROR
@@ -49,6 +49,68 @@ There are three components to pagemap:
49 8. WRITEBACK 49 8. WRITEBACK
50 9. RECLAIM 50 9. RECLAIM
51 10. BUDDY 51 10. BUDDY
52 11. MMAP
53 12. ANON
54 13. SWAPCACHE
55 14. SWAPBACKED
56 15. COMPOUND_HEAD
57 16. COMPOUND_TAIL
58 16. HUGE
59 18. UNEVICTABLE
60 20. NOPAGE
61
62Short descriptions to the page flags:
63
64 0. LOCKED
65 page is being locked for exclusive access, eg. by undergoing read/write IO
66
67 7. SLAB
68 page is managed by the SLAB/SLOB/SLUB/SLQB kernel memory allocator
69 When compound page is used, SLUB/SLQB will only set this flag on the head
70 page; SLOB will not flag it at all.
71
7210. BUDDY
73 a free memory block managed by the buddy system allocator
74 The buddy system organizes free memory in blocks of various orders.
75 An order N block has 2^N physically contiguous pages, with the BUDDY flag
76 set for and _only_ for the first page.
77
7815. COMPOUND_HEAD
7916. COMPOUND_TAIL
80 A compound page with order N consists of 2^N physically contiguous pages.
81 A compound page with order 2 takes the form of "HTTT", where H donates its
82 head page and T donates its tail page(s). The major consumers of compound
83 pages are hugeTLB pages (Documentation/vm/hugetlbpage.txt), the SLUB etc.
84 memory allocators and various device drivers. However in this interface,
85 only huge/giga pages are made visible to end users.
8617. HUGE
87 this is an integral part of a HugeTLB page
88
8920. NOPAGE
90 no page frame exists at the requested address
91
92 [IO related page flags]
93 1. ERROR IO error occurred
94 3. UPTODATE page has up-to-date data
95 ie. for file backed page: (in-memory data revision >= on-disk one)
96 4. DIRTY page has been written to, hence contains new data
97 ie. for file backed page: (in-memory data revision > on-disk one)
98 8. WRITEBACK page is being synced to disk
99
100 [LRU related page flags]
101 5. LRU page is in one of the LRU lists
102 6. ACTIVE page is in the active LRU list
10318. UNEVICTABLE page is in the unevictable (non-)LRU list
104 It is somehow pinned and not a candidate for LRU page reclaims,
105 eg. ramfs pages, shmctl(SHM_LOCK) and mlock() memory segments
106 2. REFERENCED page has been referenced since last LRU list enqueue/requeue
107 9. RECLAIM page will be reclaimed soon after its pageout IO completed
10811. MMAP a memory mapped page
10912. ANON a memory mapped page that is not part of a file
11013. SWAPCACHE page is mapped to swap space, ie. has an associated swap entry
11114. SWAPBACKED page is backed by swap/RAM
112
113The page-types tool in this directory can be used to query the above flags.
52 114
53Using pagemap to do something useful: 115Using pagemap to do something useful:
54 116
diff --git a/MAINTAINERS b/MAINTAINERS
index af8ef6527f22..fb94addb34de 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -36,6 +36,12 @@ trivial patch so apply some common sense.
36 (scripts/checkpatch.pl) to catch trival style violations. 36 (scripts/checkpatch.pl) to catch trival style violations.
37 See Documentation/CodingStyle for guidance here. 37 See Documentation/CodingStyle for guidance here.
38 38
39 PLEASE CC: the maintainers and mailing lists that are generated
40 by scripts/get_maintainer.pl. The results returned by the
41 script will be best if you have git installed and are making
42 your changes in a branch derived from Linus' latest git tree.
43 See Documentation/SubmittingPatches for details.
44
39 PLEASE try to include any credit lines you want added with the 45 PLEASE try to include any credit lines you want added with the
40 patch. It avoids people being missed off by mistake and makes 46 patch. It avoids people being missed off by mistake and makes
41 it easier to know who wants adding and who doesn't. 47 it easier to know who wants adding and who doesn't.
@@ -489,7 +495,7 @@ AOA (Apple Onboard Audio) ALSA DRIVER
489P: Johannes Berg 495P: Johannes Berg
490M: johannes@sipsolutions.net 496M: johannes@sipsolutions.net
491L: linuxppc-dev@ozlabs.org 497L: linuxppc-dev@ozlabs.org
492L: alsa-devel@alsa-project.org (subscribers-only) 498L: alsa-devel@alsa-project.org (moderated for non-subscribers)
493S: Maintained 499S: Maintained
494F: sound/aoa/ 500F: sound/aoa/
495 501
@@ -912,7 +918,6 @@ P: Dan Williams
912M: dan.j.williams@intel.com 918M: dan.j.williams@intel.com
913P: Maciej Sosnowski 919P: Maciej Sosnowski
914M: maciej.sosnowski@intel.com 920M: maciej.sosnowski@intel.com
915L: linux-kernel@vger.kernel.org
916W: http://sourceforge.net/projects/xscaleiop 921W: http://sourceforge.net/projects/xscaleiop
917S: Supported 922S: Supported
918F: Documentation/crypto/async-tx-api.txt 923F: Documentation/crypto/async-tx-api.txt
@@ -1008,7 +1013,6 @@ F: drivers/mmc/host/at91_mci.c
1008ATMEL AT91 / AT32 SERIAL DRIVER 1013ATMEL AT91 / AT32 SERIAL DRIVER
1009P: Haavard Skinnemoen 1014P: Haavard Skinnemoen
1010M: hskinnemoen@atmel.com 1015M: hskinnemoen@atmel.com
1011L: linux-kernel@vger.kernel.org
1012S: Supported 1016S: Supported
1013F: drivers/serial/atmel_serial.c 1017F: drivers/serial/atmel_serial.c
1014 1018
@@ -1064,7 +1068,6 @@ F: kernel/audit*
1064AUXILIARY DISPLAY DRIVERS 1068AUXILIARY DISPLAY DRIVERS
1065P: Miguel Ojeda Sandonis 1069P: Miguel Ojeda Sandonis
1066M: miguel.ojeda.sandonis@gmail.com 1070M: miguel.ojeda.sandonis@gmail.com
1067L: linux-kernel@vger.kernel.org
1068W: http://miguelojeda.es/auxdisplay.htm 1071W: http://miguelojeda.es/auxdisplay.htm
1069W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm 1072W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm
1070S: Maintained 1073S: Maintained
@@ -1134,7 +1137,6 @@ F: drivers/net/hamradio/baycom*
1134BEFS FILE SYSTEM 1137BEFS FILE SYSTEM
1135P: Sergey S. Kostyliov 1138P: Sergey S. Kostyliov
1136M: rathamahata@php4.ru 1139M: rathamahata@php4.ru
1137L: linux-kernel@vger.kernel.org
1138S: Maintained 1140S: Maintained
1139F: Documentation/filesystems/befs.txt 1141F: Documentation/filesystems/befs.txt
1140F: fs/befs/ 1142F: fs/befs/
@@ -1142,7 +1144,6 @@ F: fs/befs/
1142BFS FILE SYSTEM 1144BFS FILE SYSTEM
1143P: Tigran A. Aivazian 1145P: Tigran A. Aivazian
1144M: tigran@aivazian.fsnet.co.uk 1146M: tigran@aivazian.fsnet.co.uk
1145L: linux-kernel@vger.kernel.org
1146S: Maintained 1147S: Maintained
1147F: Documentation/filesystems/bfs.txt 1148F: Documentation/filesystems/bfs.txt
1148F: fs/bfs/ 1149F: fs/bfs/
@@ -1199,7 +1200,6 @@ F: drivers/i2c/busses/i2c-bfin-twi.c
1199BLOCK LAYER 1200BLOCK LAYER
1200P: Jens Axboe 1201P: Jens Axboe
1201M: axboe@kernel.dk 1202M: axboe@kernel.dk
1202L: linux-kernel@vger.kernel.org
1203T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-2.6-block.git 1203T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-2.6-block.git
1204S: Maintained 1204S: Maintained
1205F: block/ 1205F: block/
@@ -1326,7 +1326,6 @@ P: Muli Ben-Yehuda
1326M: muli@il.ibm.com 1326M: muli@il.ibm.com
1327P: Jon D. Mason 1327P: Jon D. Mason
1328M: jdmason@kudzu.us 1328M: jdmason@kudzu.us
1329L: linux-kernel@vger.kernel.org
1330L: discuss@x86-64.org 1329L: discuss@x86-64.org
1331S: Maintained 1330S: Maintained
1332F: arch/x86/kernel/pci-calgary_64.c 1331F: arch/x86/kernel/pci-calgary_64.c
@@ -1378,7 +1377,6 @@ F: include/linux/usb/wusb*
1378CFAG12864B LCD DRIVER 1377CFAG12864B LCD DRIVER
1379P: Miguel Ojeda Sandonis 1378P: Miguel Ojeda Sandonis
1380M: miguel.ojeda.sandonis@gmail.com 1379M: miguel.ojeda.sandonis@gmail.com
1381L: linux-kernel@vger.kernel.org
1382W: http://miguelojeda.es/auxdisplay.htm 1380W: http://miguelojeda.es/auxdisplay.htm
1383W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm 1381W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm
1384S: Maintained 1382S: Maintained
@@ -1388,7 +1386,6 @@ F: include/linux/cfag12864b.h
1388CFAG12864BFB LCD FRAMEBUFFER DRIVER 1386CFAG12864BFB LCD FRAMEBUFFER DRIVER
1389P: Miguel Ojeda Sandonis 1387P: Miguel Ojeda Sandonis
1390M: miguel.ojeda.sandonis@gmail.com 1388M: miguel.ojeda.sandonis@gmail.com
1391L: linux-kernel@vger.kernel.org
1392W: http://miguelojeda.es/auxdisplay.htm 1389W: http://miguelojeda.es/auxdisplay.htm
1393W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm 1390W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm
1394S: Maintained 1391S: Maintained
@@ -1408,7 +1405,6 @@ X: net/wireless/wext*
1408CHECKPATCH 1405CHECKPATCH
1409P: Andy Whitcroft 1406P: Andy Whitcroft
1410M: apw@canonical.com 1407M: apw@canonical.com
1411L: linux-kernel@vger.kernel.org
1412S: Supported 1408S: Supported
1413F: scripts/checkpatch.pl 1409F: scripts/checkpatch.pl
1414 1410
@@ -1437,7 +1433,7 @@ F: drivers/usb/host/ohci-ep93xx.c
1437CIRRUS LOGIC CS4270 SOUND DRIVER 1433CIRRUS LOGIC CS4270 SOUND DRIVER
1438P: Timur Tabi 1434P: Timur Tabi
1439M: timur@freescale.com 1435M: timur@freescale.com
1440L: alsa-devel@alsa-project.org 1436L: alsa-devel@alsa-project.org (moderated for non-subscribers)
1441S: Supported 1437S: Supported
1442F: sound/soc/codecs/cs4270* 1438F: sound/soc/codecs/cs4270*
1443 1439
@@ -1462,6 +1458,7 @@ P: Joe Eykholt
1462M: jeykholt@cisco.com 1458M: jeykholt@cisco.com
1463L: linux-scsi@vger.kernel.org 1459L: linux-scsi@vger.kernel.org
1464S: Supported 1460S: Supported
1461F: drivers/scsi/fnic/
1465 1462
1466CODA FILE SYSTEM 1463CODA FILE SYSTEM
1467P: Jan Harkes 1464P: Jan Harkes
@@ -1534,7 +1531,6 @@ F: drivers/usb/atm/cxacru.c
1534CONFIGFS 1531CONFIGFS
1535P: Joel Becker 1532P: Joel Becker
1536M: joel.becker@oracle.com 1533M: joel.becker@oracle.com
1537L: linux-kernel@vger.kernel.org
1538S: Supported 1534S: Supported
1539F: fs/configfs/ 1535F: fs/configfs/
1540F: include/linux/configfs.h 1536F: include/linux/configfs.h
@@ -1592,7 +1588,6 @@ F: arch/x86/kernel/msr.c
1592CPUSETS 1588CPUSETS
1593P: Paul Menage 1589P: Paul Menage
1594M: menage@google.com 1590M: menage@google.com
1595L: linux-kernel@vger.kernel.org
1596W: http://www.bullopensource.org/cpuset/ 1591W: http://www.bullopensource.org/cpuset/
1597W: http://oss.sgi.com/projects/cpusets/ 1592W: http://oss.sgi.com/projects/cpusets/
1598S: Supported 1593S: Supported
@@ -1799,7 +1794,6 @@ DEVICE NUMBER REGISTRY
1799P: Torben Mathiasen 1794P: Torben Mathiasen
1800M: device@lanana.org 1795M: device@lanana.org
1801W: http://lanana.org/docs/device-list/index.html 1796W: http://lanana.org/docs/device-list/index.html
1802L: linux-kernel@vger.kernel.org
1803S: Maintained 1797S: Maintained
1804 1798
1805DEVICE-MAPPER (LVM) 1799DEVICE-MAPPER (LVM)
@@ -1825,7 +1819,6 @@ F: drivers/char/digi*
1825DIRECTORY NOTIFICATION (DNOTIFY) 1819DIRECTORY NOTIFICATION (DNOTIFY)
1826P: Eric Paris 1820P: Eric Paris
1827M: eparis@parisplace.org 1821M: eparis@parisplace.org
1828L: linux-kernel@vger.kernel.org
1829S: Maintained 1822S: Maintained
1830F: Documentation/filesystems/dnotify.txt 1823F: Documentation/filesystems/dnotify.txt
1831F: fs/notify/dnotify/ 1824F: fs/notify/dnotify/
@@ -1842,7 +1835,6 @@ S: Maintained
1842DISKQUOTA 1835DISKQUOTA
1843P: Jan Kara 1836P: Jan Kara
1844M: jack@suse.cz 1837M: jack@suse.cz
1845L: linux-kernel@vger.kernel.org
1846S: Maintained 1838S: Maintained
1847F: Documentation/filesystems/quota.txt 1839F: Documentation/filesystems/quota.txt
1848F: fs/quota/ 1840F: fs/quota/
@@ -1864,7 +1856,6 @@ P: Maciej Sosnowski
1864M: maciej.sosnowski@intel.com 1856M: maciej.sosnowski@intel.com
1865P: Dan Williams 1857P: Dan Williams
1866M: dan.j.williams@intel.com 1858M: dan.j.williams@intel.com
1867L: linux-kernel@vger.kernel.org
1868S: Supported 1859S: Supported
1869F: drivers/dma/ 1860F: drivers/dma/
1870F: include/linux/dma* 1861F: include/linux/dma*
@@ -1916,7 +1907,6 @@ F: drivers/scsi/dpt/
1916DRIVER CORE, KOBJECTS, AND SYSFS 1907DRIVER CORE, KOBJECTS, AND SYSFS
1917P: Greg Kroah-Hartman 1908P: Greg Kroah-Hartman
1918M: gregkh@suse.de 1909M: gregkh@suse.de
1919L: linux-kernel@vger.kernel.org
1920T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/ 1910T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
1921S: Supported 1911S: Supported
1922F: Documentation/kobject.txt 1912F: Documentation/kobject.txt
@@ -1982,8 +1972,8 @@ F: net/bridge/netfilter/ebt*.c
1982ECRYPT FILE SYSTEM 1972ECRYPT FILE SYSTEM
1983P: Tyler Hicks 1973P: Tyler Hicks
1984M: tyhicks@linux.vnet.ibm.com 1974M: tyhicks@linux.vnet.ibm.com
1985M: Dustin Kirkland 1975P: Dustin Kirkland
1986P: kirkland@canonical.com 1976M: kirkland@canonical.com
1987L: ecryptfs-devel@lists.launchpad.net 1977L: ecryptfs-devel@lists.launchpad.net
1988W: https://launchpad.net/ecryptfs 1978W: https://launchpad.net/ecryptfs
1989S: Supported 1979S: Supported
@@ -2263,7 +2253,6 @@ F: drivers/firewire/
2263F: include/linux/firewire*.h 2253F: include/linux/firewire*.h
2264 2254
2265FIRMWARE LOADER (request_firmware) 2255FIRMWARE LOADER (request_firmware)
2266L: linux-kernel@vger.kernel.org
2267S: Orphan 2256S: Orphan
2268F: Documentation/firmware_class/ 2257F: Documentation/firmware_class/
2269F: drivers/base/firmware*.c 2258F: drivers/base/firmware*.c
@@ -2300,7 +2289,6 @@ M: leoli@freescale.com
2300P: Zhang Wei 2289P: Zhang Wei
2301M: zw@zh-kernel.org 2290M: zw@zh-kernel.org
2302L: linuxppc-dev@ozlabs.org 2291L: linuxppc-dev@ozlabs.org
2303L: linux-kernel@vger.kernel.org
2304S: Maintained 2292S: Maintained
2305F: drivers/dma/fsldma.* 2293F: drivers/dma/fsldma.*
2306 2294
@@ -2366,7 +2354,7 @@ F: drivers/serial/ucc_uart.c
2366FREESCALE SOC SOUND DRIVERS 2354FREESCALE SOC SOUND DRIVERS
2367P: Timur Tabi 2355P: Timur Tabi
2368M: timur@freescale.com 2356M: timur@freescale.com
2369L: alsa-devel@alsa-project.org 2357L: alsa-devel@alsa-project.org (moderated for non-subscribers)
2370L: linuxppc-dev@ozlabs.org 2358L: linuxppc-dev@ozlabs.org
2371S: Supported 2359S: Supported
2372F: sound/soc/fsl/fsl* 2360F: sound/soc/fsl/fsl*
@@ -2500,7 +2488,6 @@ F: drivers/hwmon/hdaps.c
2500 2488
2501HYPERVISOR VIRTUAL CONSOLE DRIVER 2489HYPERVISOR VIRTUAL CONSOLE DRIVER
2502L: linuxppc-dev@ozlabs.org 2490L: linuxppc-dev@ozlabs.org
2503L: linux-kernel@vger.kernel.org
2504S: Odd Fixes 2491S: Odd Fixes
2505F: drivers/char/hvc_* 2492F: drivers/char/hvc_*
2506 2493
@@ -2567,7 +2554,6 @@ F: sound/parisc/harmony.*
2567HAYES ESP SERIAL DRIVER 2554HAYES ESP SERIAL DRIVER
2568P: Andrew J. Robinson 2555P: Andrew J. Robinson
2569M: arobinso@nyx.net 2556M: arobinso@nyx.net
2570L: linux-kernel@vger.kernel.org
2571W: http://www.nyx.net/~arobinso 2557W: http://www.nyx.net/~arobinso
2572S: Maintained 2558S: Maintained
2573F: Documentation/serial/hayes-esp.txt 2559F: Documentation/serial/hayes-esp.txt
@@ -2593,7 +2579,6 @@ F: include/linux/cciss_ioctl.h
2593HFS FILESYSTEM 2579HFS FILESYSTEM
2594P: Roman Zippel 2580P: Roman Zippel
2595M: zippel@linux-m68k.org 2581M: zippel@linux-m68k.org
2596L: linux-kernel@vger.kernel.org
2597S: Maintained 2582S: Maintained
2598F: Documentation/filesystems/hfs.txt 2583F: Documentation/filesystems/hfs.txt
2599F: fs/hfs/ 2584F: fs/hfs/
@@ -2633,7 +2618,6 @@ F: include/linux/hid*
2633HIGH-RESOLUTION TIMERS, CLOCKEVENTS, DYNTICKS 2618HIGH-RESOLUTION TIMERS, CLOCKEVENTS, DYNTICKS
2634P: Thomas Gleixner 2619P: Thomas Gleixner
2635M: tglx@linutronix.de 2620M: tglx@linutronix.de
2636L: linux-kernel@vger.kernel.org
2637S: Maintained 2621S: Maintained
2638F: Documentation/timers/ 2622F: Documentation/timers/
2639F: kernel/hrtimer.c 2623F: kernel/hrtimer.c
@@ -2772,7 +2756,6 @@ F: drivers/i2c/busses/i2c-tiny-usb.c
2772i386 BOOT CODE 2756i386 BOOT CODE
2773P: H. Peter Anvin 2757P: H. Peter Anvin
2774M: hpa@zytor.com 2758M: hpa@zytor.com
2775L: Linux-Kernel@vger.kernel.org
2776S: Maintained 2759S: Maintained
2777F: arch/x86/boot/ 2760F: arch/x86/boot/
2778 2761
@@ -2902,7 +2885,6 @@ P: Robert Love
2902M: rlove@rlove.org 2885M: rlove@rlove.org
2903P: Eric Paris 2886P: Eric Paris
2904M: eparis@parisplace.org 2887M: eparis@parisplace.org
2905L: linux-kernel@vger.kernel.org
2906S: Maintained 2888S: Maintained
2907F: Documentation/filesystems/inotify.txt 2889F: Documentation/filesystems/inotify.txt
2908F: fs/notify/inotify/ 2890F: fs/notify/inotify/
@@ -2950,7 +2932,6 @@ F: arch/x86/kernel/microcode_intel.c
2950INTEL I/OAT DMA DRIVER 2932INTEL I/OAT DMA DRIVER
2951P: Maciej Sosnowski 2933P: Maciej Sosnowski
2952M: maciej.sosnowski@intel.com 2934M: maciej.sosnowski@intel.com
2953L: linux-kernel@vger.kernel.org
2954S: Supported 2935S: Supported
2955F: drivers/dma/ioat* 2936F: drivers/dma/ioat*
2956 2937
@@ -2966,7 +2947,6 @@ F: include/linux/intel-iommu.h
2966INTEL IOP-ADMA DMA DRIVER 2947INTEL IOP-ADMA DMA DRIVER
2967P: Dan Williams 2948P: Dan Williams
2968M: dan.j.williams@intel.com 2949M: dan.j.williams@intel.com
2969L: linux-kernel@vger.kernel.org
2970S: Supported 2950S: Supported
2971F: drivers/dma/iop-adma.c 2951F: drivers/dma/iop-adma.c
2972 2952
@@ -3279,7 +3259,6 @@ M: vgoyal@redhat.com
3279P: Haren Myneni 3259P: Haren Myneni
3280M: hbabu@us.ibm.com 3260M: hbabu@us.ibm.com
3281L: kexec@lists.infradead.org 3261L: kexec@lists.infradead.org
3282L: linux-kernel@vger.kernel.org
3283W: http://lse.sourceforge.net/kdump/ 3262W: http://lse.sourceforge.net/kdump/
3284S: Maintained 3263S: Maintained
3285F: Documentation/kdump/ 3264F: Documentation/kdump/
@@ -3389,7 +3368,6 @@ KEXEC
3389P: Eric Biederman 3368P: Eric Biederman
3390M: ebiederm@xmission.com 3369M: ebiederm@xmission.com
3391W: http://ftp.kernel.org/pub/linux/kernel/people/horms/kexec-tools/ 3370W: http://ftp.kernel.org/pub/linux/kernel/people/horms/kexec-tools/
3392L: linux-kernel@vger.kernel.org
3393L: kexec@lists.infradead.org 3371L: kexec@lists.infradead.org
3394S: Maintained 3372S: Maintained
3395F: include/linux/kexec.h 3373F: include/linux/kexec.h
@@ -3427,7 +3405,6 @@ F: mm/kmemleak-test.c
3427KMEMTRACE 3405KMEMTRACE
3428P: Eduard - Gabriel Munteanu 3406P: Eduard - Gabriel Munteanu
3429M: eduard.munteanu@linux360.ro 3407M: eduard.munteanu@linux360.ro
3430L: linux-kernel@vger.kernel.org
3431S: Maintained 3408S: Maintained
3432F: Documentation/trace/kmemtrace.txt 3409F: Documentation/trace/kmemtrace.txt
3433F: include/trace/kmemtrace.h 3410F: include/trace/kmemtrace.h
@@ -3442,7 +3419,6 @@ P: David S. Miller
3442M: davem@davemloft.net 3419M: davem@davemloft.net
3443P: Masami Hiramatsu 3420P: Masami Hiramatsu
3444M: mhiramat@redhat.com 3421M: mhiramat@redhat.com
3445L: linux-kernel@vger.kernel.org
3446S: Maintained 3422S: Maintained
3447F: Documentation/kprobes.txt 3423F: Documentation/kprobes.txt
3448F: include/linux/kprobes.h 3424F: include/linux/kprobes.h
@@ -3451,7 +3427,6 @@ F: kernel/kprobes.c
3451KS0108 LCD CONTROLLER DRIVER 3427KS0108 LCD CONTROLLER DRIVER
3452P: Miguel Ojeda Sandonis 3428P: Miguel Ojeda Sandonis
3453M: miguel.ojeda.sandonis@gmail.com 3429M: miguel.ojeda.sandonis@gmail.com
3454L: linux-kernel@vger.kernel.org
3455W: http://miguelojeda.es/auxdisplay.htm 3430W: http://miguelojeda.es/auxdisplay.htm
3456W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm 3431W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm
3457S: Maintained 3432S: Maintained
@@ -3615,7 +3590,6 @@ P: Peter Zijlstra
3615M: peterz@infradead.org 3590M: peterz@infradead.org
3616P: Ingo Molnar 3591P: Ingo Molnar
3617M: mingo@redhat.com 3592M: mingo@redhat.com
3618L: linux-kernel@vger.kernel.org
3619T: git git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-lockdep.git 3593T: git git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-lockdep.git
3620S: Maintained 3594S: Maintained
3621F: Documentation/lockdep*.txt 3595F: Documentation/lockdep*.txt
@@ -3667,7 +3641,6 @@ L: linux-m32r-ja@ml.linux-m32r.org (in Japanese)
3667W: http://www.linux-m32r.org/ 3641W: http://www.linux-m32r.org/
3668S: Maintained 3642S: Maintained
3669F: arch/m32r/ 3643F: arch/m32r/
3670F: include/asm-m32r/
3671 3644
3672M68K ARCHITECTURE 3645M68K ARCHITECTURE
3673P: Geert Uytterhoeven 3646P: Geert Uytterhoeven
@@ -3751,7 +3724,6 @@ F: include/linux/mv643xx.h
3751MARVELL SOC MMC/SD/SDIO CONTROLLER DRIVER 3724MARVELL SOC MMC/SD/SDIO CONTROLLER DRIVER
3752P: Nicolas Pitre 3725P: Nicolas Pitre
3753M: nico@cam.org 3726M: nico@cam.org
3754L: linux-kernel@vger.kernel.org
3755S: Maintained 3727S: Maintained
3756 3728
3757MARVELL YUKON / SYSKONNECT DRIVER 3729MARVELL YUKON / SYSKONNECT DRIVER
@@ -3805,7 +3777,6 @@ F: drivers/scsi/megaraid/
3805 3777
3806MEMORY MANAGEMENT 3778MEMORY MANAGEMENT
3807L: linux-mm@kvack.org 3779L: linux-mm@kvack.org
3808L: linux-kernel@vger.kernel.org
3809W: http://www.linux-mm.org 3780W: http://www.linux-mm.org
3810S: Maintained 3781S: Maintained
3811F: include/linux/mm.h 3782F: include/linux/mm.h
@@ -3819,7 +3790,6 @@ M: xemul@openvz.org
3819P: KAMEZAWA Hiroyuki 3790P: KAMEZAWA Hiroyuki
3820M: kamezawa.hiroyu@jp.fujitsu.com 3791M: kamezawa.hiroyu@jp.fujitsu.com
3821L: linux-mm@kvack.org 3792L: linux-mm@kvack.org
3822L: linux-kernel@vger.kernel.org
3823S: Maintained 3793S: Maintained
3824F: mm/memcontrol.c 3794F: mm/memcontrol.c
3825 3795
@@ -3862,7 +3832,6 @@ F: arch/mips/
3862MISCELLANEOUS MCA-SUPPORT 3832MISCELLANEOUS MCA-SUPPORT
3863P: James Bottomley 3833P: James Bottomley
3864M: James.Bottomley@HansenPartnership.com 3834M: James.Bottomley@HansenPartnership.com
3865L: linux-kernel@vger.kernel.org
3866S: Maintained 3835S: Maintained
3867F: Documentation/ia64/mca.txt 3836F: Documentation/ia64/mca.txt
3868F: Documentation/mca.txt 3837F: Documentation/mca.txt
@@ -3872,7 +3841,6 @@ F: include/linux/mca*
3872MODULE SUPPORT 3841MODULE SUPPORT
3873P: Rusty Russell 3842P: Rusty Russell
3874M: rusty@rustcorp.com.au 3843M: rusty@rustcorp.com.au
3875L: linux-kernel@vger.kernel.org
3876S: Maintained 3844S: Maintained
3877F: include/linux/module.h 3845F: include/linux/module.h
3878F: kernel/module.c 3846F: kernel/module.c
@@ -3896,7 +3864,6 @@ F: drivers/mmc/host/imxmmc.*
3896MOUSE AND MISC DEVICES [GENERAL] 3864MOUSE AND MISC DEVICES [GENERAL]
3897P: Alessandro Rubini 3865P: Alessandro Rubini
3898M: rubini@ipvvis.unipv.it 3866M: rubini@ipvvis.unipv.it
3899L: linux-kernel@vger.kernel.org
3900S: Maintained 3867S: Maintained
3901F: drivers/input/mouse/ 3868F: drivers/input/mouse/
3902F: include/linux/gpio_mouse.h 3869F: include/linux/gpio_mouse.h
@@ -3904,7 +3871,6 @@ F: include/linux/gpio_mouse.h
3904MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD 3871MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD
3905P: Jiri Slaby 3872P: Jiri Slaby
3906M: jirislaby@gmail.com 3873M: jirislaby@gmail.com
3907L: linux-kernel@vger.kernel.org
3908S: Maintained 3874S: Maintained
3909F: Documentation/serial/moxa-smartio 3875F: Documentation/serial/moxa-smartio
3910F: drivers/char/mxser.* 3876F: drivers/char/mxser.*
@@ -3920,7 +3886,6 @@ F: drivers/platform/x86/msi-laptop.c
3920MULTIFUNCTION DEVICES (MFD) 3886MULTIFUNCTION DEVICES (MFD)
3921P: Samuel Ortiz 3887P: Samuel Ortiz
3922M: sameo@linux.intel.com 3888M: sameo@linux.intel.com
3923L: linux-kernel@vger.kernel.org
3924T: git git://git.kernel.org/pub/scm/linux/kernel/git/sameo/mfd-2.6.git 3889T: git git://git.kernel.org/pub/scm/linux/kernel/git/sameo/mfd-2.6.git
3925S: Supported 3890S: Supported
3926F: drivers/mfd/ 3891F: drivers/mfd/
@@ -3928,7 +3893,6 @@ F: drivers/mfd/
3928MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM 3893MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM
3929P: Pierre Ossman 3894P: Pierre Ossman
3930M: pierre@ossman.eu 3895M: pierre@ossman.eu
3931L: linux-kernel@vger.kernel.org
3932S: Maintained 3896S: Maintained
3933F: drivers/mmc/ 3897F: drivers/mmc/
3934F: include/linux/mmc/ 3898F: include/linux/mmc/
@@ -3936,7 +3900,6 @@ F: include/linux/mmc/
3936MULTIMEDIA CARD (MMC) ETC. OVER SPI 3900MULTIMEDIA CARD (MMC) ETC. OVER SPI
3937P: David Brownell 3901P: David Brownell
3938M: dbrownell@users.sourceforge.net 3902M: dbrownell@users.sourceforge.net
3939L: linux-kernel@vger.kernel.org
3940S: Odd Fixes 3903S: Odd Fixes
3941F: drivers/mmc/host/mmc_spi.c 3904F: drivers/mmc/host/mmc_spi.c
3942F: include/linux/spi/mmc_spi.h 3905F: include/linux/spi/mmc_spi.h
@@ -3951,7 +3914,6 @@ F: sound/oss/msnd*
3951MULTITECH MULTIPORT CARD (ISICOM) 3914MULTITECH MULTIPORT CARD (ISICOM)
3952P: Jiri Slaby 3915P: Jiri Slaby
3953M: jirislaby@gmail.com 3916M: jirislaby@gmail.com
3954L: linux-kernel@vger.kernel.org
3955S: Maintained 3917S: Maintained
3956F: drivers/char/isicom.c 3918F: drivers/char/isicom.c
3957F: include/linux/isicom.h 3919F: include/linux/isicom.h
@@ -4195,7 +4157,6 @@ NTFS FILESYSTEM
4195P: Anton Altaparmakov 4157P: Anton Altaparmakov
4196M: aia21@cantab.net 4158M: aia21@cantab.net
4197L: linux-ntfs-dev@lists.sourceforge.net 4159L: linux-ntfs-dev@lists.sourceforge.net
4198L: linux-kernel@vger.kernel.org
4199W: http://www.linux-ntfs.org/ 4160W: http://www.linux-ntfs.org/
4200T: git git://git.kernel.org/pub/scm/linux/kernel/git/aia21/ntfs-2.6.git 4161T: git git://git.kernel.org/pub/scm/linux/kernel/git/aia21/ntfs-2.6.git
4201S: Maintained 4162S: Maintained
@@ -4429,7 +4390,6 @@ M: akataria@vmware.com
4429P: Rusty Russell 4390P: Rusty Russell
4430M: rusty@rustcorp.com.au 4391M: rusty@rustcorp.com.au
4431L: virtualization@lists.osdl.org 4392L: virtualization@lists.osdl.org
4432L: linux-kernel@vger.kernel.org
4433S: Supported 4393S: Supported
4434F: Documentation/ia64/paravirt_ops.txt 4394F: Documentation/ia64/paravirt_ops.txt
4435F: arch/*/kernel/paravirt* 4395F: arch/*/kernel/paravirt*
@@ -4480,7 +4440,6 @@ F: include/linux/leds-pca9532.h
4480PCI ERROR RECOVERY 4440PCI ERROR RECOVERY
4481P: Linas Vepstas 4441P: Linas Vepstas
4482M: linas@austin.ibm.com 4442M: linas@austin.ibm.com
4483L: linux-kernel@vger.kernel.org
4484L: linux-pci@vger.kernel.org 4443L: linux-pci@vger.kernel.org
4485S: Supported 4444S: Supported
4486F: Documentation/PCI/pci-error-recovery.txt 4445F: Documentation/PCI/pci-error-recovery.txt
@@ -4489,7 +4448,6 @@ F: Documentation/powerpc/eeh-pci-error-recovery.txt
4489PCI SUBSYSTEM 4448PCI SUBSYSTEM
4490P: Jesse Barnes 4449P: Jesse Barnes
4491M: jbarnes@virtuousgeek.org 4450M: jbarnes@virtuousgeek.org
4492L: linux-kernel@vger.kernel.org
4493L: linux-pci@vger.kernel.org 4451L: linux-pci@vger.kernel.org
4494T: git git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6.git 4452T: git git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6.git
4495S: Supported 4453S: Supported
@@ -4524,7 +4482,6 @@ F: drivers/net/pcnet32.c
4524PER-TASK DELAY ACCOUNTING 4482PER-TASK DELAY ACCOUNTING
4525P: Balbir Singh 4483P: Balbir Singh
4526M: balbir@linux.vnet.ibm.com 4484M: balbir@linux.vnet.ibm.com
4527L: linux-kernel@vger.kernel.org
4528S: Maintained 4485S: Maintained
4529F: include/linux/delayacct.h 4486F: include/linux/delayacct.h
4530F: kernel/delayacct.c 4487F: kernel/delayacct.c
@@ -4556,7 +4513,6 @@ F: drivers/mtd/devices/phram.c
4556PKTCDVD DRIVER 4513PKTCDVD DRIVER
4557P: Peter Osterlund 4514P: Peter Osterlund
4558M: petero2@telia.com 4515M: petero2@telia.com
4559L: linux-kernel@vger.kernel.org
4560S: Maintained 4516S: Maintained
4561F: drivers/block/pktcdvd.c 4517F: drivers/block/pktcdvd.c
4562F: include/linux/pktcdvd.h 4518F: include/linux/pktcdvd.h
@@ -4564,7 +4520,6 @@ F: include/linux/pktcdvd.h
4564POSIX CLOCKS and TIMERS 4520POSIX CLOCKS and TIMERS
4565P: Thomas Gleixner 4521P: Thomas Gleixner
4566M: tglx@linutronix.de 4522M: tglx@linutronix.de
4567L: linux-kernel@vger.kernel.org
4568S: Supported 4523S: Supported
4569F: fs/timerfd.c 4524F: fs/timerfd.c
4570F: include/linux/timer* 4525F: include/linux/timer*
@@ -4575,7 +4530,6 @@ P: Anton Vorontsov
4575M: cbou@mail.ru 4530M: cbou@mail.ru
4576P: David Woodhouse 4531P: David Woodhouse
4577M: dwmw2@infradead.org 4532M: dwmw2@infradead.org
4578L: linux-kernel@vger.kernel.org
4579T: git git://git.infradead.org/battery-2.6.git 4533T: git git://git.infradead.org/battery-2.6.git
4580S: Maintained 4534S: Maintained
4581F: include/linux/power_supply.h 4535F: include/linux/power_supply.h
@@ -4627,7 +4581,6 @@ F: include/linux/if_pppol2tp.h
4627PREEMPTIBLE KERNEL 4581PREEMPTIBLE KERNEL
4628P: Robert Love 4582P: Robert Love
4629M: rml@tech9.net 4583M: rml@tech9.net
4630L: linux-kernel@vger.kernel.org
4631L: kpreempt-tech@lists.sourceforge.net 4584L: kpreempt-tech@lists.sourceforge.net
4632W: ftp://ftp.kernel.org/pub/linux/kernel/people/rml/preempt-kernel 4585W: ftp://ftp.kernel.org/pub/linux/kernel/people/rml/preempt-kernel
4633S: Supported 4586S: Supported
@@ -4690,7 +4643,6 @@ P: Roland McGrath
4690M: roland@redhat.com 4643M: roland@redhat.com
4691P: Oleg Nesterov 4644P: Oleg Nesterov
4692M: oleg@redhat.com 4645M: oleg@redhat.com
4693L: linux-kernel@vger.kernel.org
4694S: Maintained 4646S: Maintained
4695F: include/asm-generic/syscall.h 4647F: include/asm-generic/syscall.h
4696F: include/linux/ptrace.h 4648F: include/linux/ptrace.h
@@ -4776,7 +4728,6 @@ F: drivers/net/qlge/
4776QNX4 FILESYSTEM 4728QNX4 FILESYSTEM
4777P: Anders Larsen 4729P: Anders Larsen
4778M: al@alarsen.net 4730M: al@alarsen.net
4779L: linux-kernel@vger.kernel.org
4780W: http://www.alarsen.net/linux/qnx4fs/ 4731W: http://www.alarsen.net/linux/qnx4fs/
4781S: Maintained 4732S: Maintained
4782F: fs/qnx4/ 4733F: fs/qnx4/
@@ -4823,7 +4774,6 @@ F: drivers/char/random.c
4823RAPIDIO SUBSYSTEM 4774RAPIDIO SUBSYSTEM
4824P: Matt Porter 4775P: Matt Porter
4825M: mporter@kernel.crashing.org 4776M: mporter@kernel.crashing.org
4826L: linux-kernel@vger.kernel.org
4827S: Maintained 4777S: Maintained
4828F: drivers/rapidio/ 4778F: drivers/rapidio/
4829 4779
@@ -4837,7 +4787,8 @@ F: drivers/net/wireless/ray*
4837RCUTORTURE MODULE 4787RCUTORTURE MODULE
4838P: Josh Triplett 4788P: Josh Triplett
4839M: josh@freedesktop.org 4789M: josh@freedesktop.org
4840L: linux-kernel@vger.kernel.org 4790P: Paul E. McKenney
4791M: paulmck@linux.vnet.ibm.com
4841S: Maintained 4792S: Maintained
4842F: Documentation/RCU/torture.txt 4793F: Documentation/RCU/torture.txt
4843F: kernel/rcutorture.c 4794F: kernel/rcutorture.c
@@ -4845,7 +4796,6 @@ F: kernel/rcutorture.c
4845RDC R-321X SoC 4796RDC R-321X SoC
4846P: Florian Fainelli 4797P: Florian Fainelli
4847M: florian@openwrt.org 4798M: florian@openwrt.org
4848L: linux-kernel@vger.kernel.org
4849S: Maintained 4799S: Maintained
4850 4800
4851RDC R6040 FAST ETHERNET DRIVER 4801RDC R6040 FAST ETHERNET DRIVER
@@ -4865,8 +4815,9 @@ F: net/rds/
4865READ-COPY UPDATE (RCU) 4815READ-COPY UPDATE (RCU)
4866P: Dipankar Sarma 4816P: Dipankar Sarma
4867M: dipankar@in.ibm.com 4817M: dipankar@in.ibm.com
4818P: Paul E. McKenney
4819M: paulmck@linux.vnet.ibm.com
4868W: http://www.rdrop.com/users/paulmck/rclock/ 4820W: http://www.rdrop.com/users/paulmck/rclock/
4869L: linux-kernel@vger.kernel.org
4870S: Supported 4821S: Supported
4871F: Documentation/RCU/rcu.txt 4822F: Documentation/RCU/rcu.txt
4872F: Documentation/RCU/rcuref.txt 4823F: Documentation/RCU/rcuref.txt
@@ -4877,7 +4828,6 @@ F: kernel/rcupdate.c
4877REAL TIME CLOCK DRIVER 4828REAL TIME CLOCK DRIVER
4878P: Paul Gortmaker 4829P: Paul Gortmaker
4879M: p_gortmaker@yahoo.com 4830M: p_gortmaker@yahoo.com
4880L: linux-kernel@vger.kernel.org
4881S: Maintained 4831S: Maintained
4882F: Documentation/rtc.txt 4832F: Documentation/rtc.txt
4883F: drivers/rtc/ 4833F: drivers/rtc/
@@ -5015,7 +4965,6 @@ S3C24XX SD/MMC Driver
5015P: Ben Dooks 4965P: Ben Dooks
5016M: ben-linux@fluff.org 4966M: ben-linux@fluff.org
5017L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) 4967L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
5018L: linux-kernel@vger.kernel.org
5019S: Supported 4968S: Supported
5020F: drivers/mmc/host/s3cmci.* 4969F: drivers/mmc/host/s3cmci.*
5021 4970
@@ -5041,7 +4990,6 @@ P: Ingo Molnar
5041M: mingo@elte.hu 4990M: mingo@elte.hu
5042P: Peter Zijlstra 4991P: Peter Zijlstra
5043M: peterz@infradead.org 4992M: peterz@infradead.org
5044L: linux-kernel@vger.kernel.org
5045S: Maintained 4993S: Maintained
5046F: kernel/sched* 4994F: kernel/sched*
5047F: include/linux/sched.h 4995F: include/linux/sched.h
@@ -5143,7 +5091,6 @@ F: drivers/mmc/host/sdhci.*
5143SECURITY SUBSYSTEM 5091SECURITY SUBSYSTEM
5144P: James Morris 5092P: James Morris
5145M: jmorris@namei.org 5093M: jmorris@namei.org
5146L: linux-kernel@vger.kernel.org
5147L: linux-security-module@vger.kernel.org (suggested Cc:) 5094L: linux-security-module@vger.kernel.org (suggested Cc:)
5148T: git git://www.kernel.org/pub/scm/linux/kernel/git/jmorris/security-testing-2.6.git 5095T: git git://www.kernel.org/pub/scm/linux/kernel/git/jmorris/security-testing-2.6.git
5149W: http://security.wiki.kernel.org/ 5096W: http://security.wiki.kernel.org/
@@ -5162,7 +5109,6 @@ P: James Morris
5162M: jmorris@namei.org 5109M: jmorris@namei.org
5163P: Eric Paris 5110P: Eric Paris
5164M: eparis@parisplace.org 5111M: eparis@parisplace.org
5165L: linux-kernel@vger.kernel.org (kernel issues)
5166L: selinux@tycho.nsa.gov (subscribers-only, general discussion) 5112L: selinux@tycho.nsa.gov (subscribers-only, general discussion)
5167W: http://selinuxproject.org 5113W: http://selinuxproject.org
5168T: git git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/security-testing-2.6.git 5114T: git git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/security-testing-2.6.git
@@ -5425,7 +5371,6 @@ F: include/linux/sony-laptop.h
5425SONY MEMORYSTICK CARD SUPPORT 5371SONY MEMORYSTICK CARD SUPPORT
5426P: Alex Dubov 5372P: Alex Dubov
5427M: oakad@yahoo.com 5373M: oakad@yahoo.com
5428L: linux-kernel@vger.kernel.org
5429W: http://tifmxx.berlios.de/ 5374W: http://tifmxx.berlios.de/
5430S: Maintained 5375S: Maintained
5431F: drivers/memstick/host/tifm_ms.c 5376F: drivers/memstick/host/tifm_ms.c
@@ -5435,7 +5380,7 @@ P: Jaroslav Kysela
5435M: perex@perex.cz 5380M: perex@perex.cz
5436P: Takashi Iwai 5381P: Takashi Iwai
5437M: tiwai@suse.de 5382M: tiwai@suse.de
5438L: alsa-devel@alsa-project.org (subscribers-only) 5383L: alsa-devel@alsa-project.org (moderated for non-subscribers)
5439W: http://www.alsa-project.org/ 5384W: http://www.alsa-project.org/
5440T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6.git 5385T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6.git
5441T: git git://git.alsa-project.org/alsa-kernel.git 5386T: git git://git.alsa-project.org/alsa-kernel.git
@@ -5450,7 +5395,7 @@ M: lrg@slimlogic.co.uk
5450P: Mark Brown 5395P: Mark Brown
5451M: broonie@opensource.wolfsonmicro.com 5396M: broonie@opensource.wolfsonmicro.com
5452T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound-2.6.git 5397T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound-2.6.git
5453L: alsa-devel@alsa-project.org (subscribers-only) 5398L: alsa-devel@alsa-project.org (moderated for non-subscribers)
5454W: http://alsa-project.org/main/index.php/ASoC 5399W: http://alsa-project.org/main/index.php/ASoC
5455S: Supported 5400S: Supported
5456F: sound/soc/ 5401F: sound/soc/
@@ -5468,7 +5413,6 @@ F: arch/sparc/
5468SPECIALIX IO8+ MULTIPORT SERIAL CARD DRIVER 5413SPECIALIX IO8+ MULTIPORT SERIAL CARD DRIVER
5469P: Roger Wolff 5414P: Roger Wolff
5470M: R.E.Wolff@BitWizard.nl 5415M: R.E.Wolff@BitWizard.nl
5471L: linux-kernel@vger.kernel.org
5472S: Supported 5416S: Supported
5473F: Documentation/serial/specialix.txt 5417F: Documentation/serial/specialix.txt
5474F: drivers/char/specialix* 5418F: drivers/char/specialix*
@@ -5514,7 +5458,6 @@ F: fs/squashfs/
5514SRM (Alpha) environment access 5458SRM (Alpha) environment access
5515P: Jan-Benedict Glaw 5459P: Jan-Benedict Glaw
5516M: jbglaw@lug-owl.de 5460M: jbglaw@lug-owl.de
5517L: linux-kernel@vger.kernel.org
5518S: Maintained 5461S: Maintained
5519F: arch/alpha/kernel/srm_env.c 5462F: arch/alpha/kernel/srm_env.c
5520 5463
@@ -5529,7 +5472,6 @@ S: Maintained
5529STAGING SUBSYSTEM 5472STAGING SUBSYSTEM
5530P: Greg Kroah-Hartman 5473P: Greg Kroah-Hartman
5531M: gregkh@suse.de 5474M: gregkh@suse.de
5532L: linux-kernel@vger.kernel.org
5533T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/ 5475T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
5534S: Maintained 5476S: Maintained
5535F: drivers/staging/ 5477F: drivers/staging/
@@ -5609,7 +5551,6 @@ F: include/linux/sysv_fs.h
5609TASKSTATS STATISTICS INTERFACE 5551TASKSTATS STATISTICS INTERFACE
5610P: Balbir Singh 5552P: Balbir Singh
5611M: balbir@linux.vnet.ibm.com 5553M: balbir@linux.vnet.ibm.com
5612L: linux-kernel@vger.kernel.org
5613S: Maintained 5554S: Maintained
5614F: Documentation/accounting/taskstats* 5555F: Documentation/accounting/taskstats*
5615F: include/linux/taskstats* 5556F: include/linux/taskstats*
@@ -5702,7 +5643,6 @@ P: Kentaro Takeda
5702M: takedakn@nttdata.co.jp 5643M: takedakn@nttdata.co.jp
5703P: Tetsuo Handa 5644P: Tetsuo Handa
5704M: penguin-kernel@I-love.SAKURA.ne.jp 5645M: penguin-kernel@I-love.SAKURA.ne.jp
5705L: linux-kernel@vger.kernel.org (kernel issues)
5706L: tomoyo-users-en@lists.sourceforge.jp (subscribers-only, for developers and users in English) 5646L: tomoyo-users-en@lists.sourceforge.jp (subscribers-only, for developers and users in English)
5707L: tomoyo-dev@lists.sourceforge.jp (subscribers-only, for developers in Japanese) 5647L: tomoyo-dev@lists.sourceforge.jp (subscribers-only, for developers in Japanese)
5708L: tomoyo-users@lists.sourceforge.jp (subscribers-only, for users in Japanese) 5648L: tomoyo-users@lists.sourceforge.jp (subscribers-only, for users in Japanese)
@@ -5754,14 +5694,17 @@ F: drivers/char/tpm/
5754TRIVIAL PATCHES 5694TRIVIAL PATCHES
5755P: Jiri Kosina 5695P: Jiri Kosina
5756M: trivial@kernel.org 5696M: trivial@kernel.org
5757L: linux-kernel@vger.kernel.org
5758T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial.git 5697T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial.git
5759S: Maintained 5698S: Maintained
5699F: drivers/char/tty_*
5700F: drivers/serial/serial_core.c
5701F: include/linux/serial_core.h
5702F: include/linux/serial.h
5703F: include/linux/tty.h
5760 5704
5761TTY LAYER 5705TTY LAYER
5762P: Alan Cox 5706P: Alan Cox
5763M: alan@lxorguk.ukuu.org.uk 5707M: alan@lxorguk.ukuu.org.uk
5764L: linux-kernel@vger.kernel.org
5765S: Maintained 5708S: Maintained
5766T: stgit http://zeniv.linux.org.uk/~alan/ttydev/ 5709T: stgit http://zeniv.linux.org.uk/~alan/ttydev/
5767 5710
@@ -5834,7 +5777,6 @@ F: fs/udf/
5834UFS FILESYSTEM 5777UFS FILESYSTEM
5835P: Evgeniy Dushistov 5778P: Evgeniy Dushistov
5836M: dushistov@mail.ru 5779M: dushistov@mail.ru
5837L: linux-kernel@vger.kernel.org
5838S: Maintained 5780S: Maintained
5839F: Documentation/filesystems/ufs.txt 5781F: Documentation/filesystems/ufs.txt
5840F: fs/ufs/ 5782F: fs/ufs/
@@ -5851,7 +5793,6 @@ F: include/linux/uwb/
5851UNIFORM CDROM DRIVER 5793UNIFORM CDROM DRIVER
5852P: Jens Axboe 5794P: Jens Axboe
5853M: axboe@kernel.dk 5795M: axboe@kernel.dk
5854L: linux-kernel@vger.kernel.org
5855W: http://www.kernel.dk 5796W: http://www.kernel.dk
5856S: Maintained 5797S: Maintained
5857F: Documentation/cdrom/ 5798F: Documentation/cdrom/
@@ -5880,7 +5821,6 @@ F: drivers/usb/class/cdc-acm.*
5880USB BLOCK DRIVER (UB ub) 5821USB BLOCK DRIVER (UB ub)
5881P: Pete Zaitcev 5822P: Pete Zaitcev
5882M: zaitcev@redhat.com 5823M: zaitcev@redhat.com
5883L: linux-kernel@vger.kernel.org
5884L: linux-usb@vger.kernel.org 5824L: linux-usb@vger.kernel.org
5885S: Supported 5825S: Supported
5886F: drivers/block/ub.c 5826F: drivers/block/ub.c
@@ -6226,7 +6166,6 @@ P: Hans J. Koch
6226M: hjk@linutronix.de 6166M: hjk@linutronix.de
6227P: Greg Kroah-Hartman 6167P: Greg Kroah-Hartman
6228M: gregkh@suse.de 6168M: gregkh@suse.de
6229L: linux-kernel@vger.kernel.org
6230S: Maintained 6169S: Maintained
6231F: Documentation/DocBook/uio-howto.tmpl 6170F: Documentation/DocBook/uio-howto.tmpl
6232F: drivers/uio/ 6171F: drivers/uio/
@@ -6252,7 +6191,6 @@ F: drivers/video/uvesafb.*
6252VFAT/FAT/MSDOS FILESYSTEM 6191VFAT/FAT/MSDOS FILESYSTEM
6253P: OGAWA Hirofumi 6192P: OGAWA Hirofumi
6254M: hirofumi@mail.parknet.co.jp 6193M: hirofumi@mail.parknet.co.jp
6255L: linux-kernel@vger.kernel.org
6256S: Maintained 6194S: Maintained
6257F: Documentation/filesystems/vfat.txt 6195F: Documentation/filesystems/vfat.txt
6258F: fs/fat/ 6196F: fs/fat/
@@ -6296,6 +6234,14 @@ F: drivers/net/macvlan.c
6296F: include/linux/if_*vlan.h 6234F: include/linux/if_*vlan.h
6297F: net/8021q/ 6235F: net/8021q/
6298 6236
6237VLYNQ BUS
6238P: Florian Fainelli
6239M: florian@openwrt.org
6240L: openwrt-devel@lists.openwrt.org
6241S: Maintained
6242F: drivers/vlynq/vlynq.c
6243F: include/linux/vlynq.h
6244
6299VOLTAGE AND CURRENT REGULATOR FRAMEWORK 6245VOLTAGE AND CURRENT REGULATOR FRAMEWORK
6300P: Liam Girdwood 6246P: Liam Girdwood
6301M: lrg@slimlogic.co.uk 6247M: lrg@slimlogic.co.uk
@@ -6349,7 +6295,6 @@ F: drivers/hwmon/w83793.c
6349W83L51xD SD/MMC CARD INTERFACE DRIVER 6295W83L51xD SD/MMC CARD INTERFACE DRIVER
6350P: Pierre Ossman 6296P: Pierre Ossman
6351M: pierre@ossman.eu 6297M: pierre@ossman.eu
6352L: linux-kernel@vger.kernel.org
6353S: Maintained 6298S: Maintained
6354F: drivers/mmc/host/wbsd.* 6299F: drivers/mmc/host/wbsd.*
6355 6300
@@ -6436,7 +6381,6 @@ M: mingo@redhat.com
6436P: H. Peter Anvin 6381P: H. Peter Anvin
6437M: hpa@zytor.com 6382M: hpa@zytor.com
6438M: x86@kernel.org 6383M: x86@kernel.org
6439L: linux-kernel@vger.kernel.org
6440T: git git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86.git 6384T: git git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86.git
6441S: Maintained 6385S: Maintained
6442F: Documentation/x86/ 6386F: Documentation/x86/
@@ -6472,7 +6416,6 @@ XILINX SYSTEMACE DRIVER
6472P: Grant Likely 6416P: Grant Likely
6473M: grant.likely@secretlab.ca 6417M: grant.likely@secretlab.ca
6474W: http://www.secretlab.ca/ 6418W: http://www.secretlab.ca/
6475L: linux-kernel@vger.kernel.org
6476S: Maintained 6419S: Maintained
6477F: drivers/block/xsysace.c 6420F: drivers/block/xsysace.c
6478 6421
@@ -6537,5 +6480,9 @@ F: drivers/serial/zs.*
6537 6480
6538THE REST 6481THE REST
6539P: Linus Torvalds 6482P: Linus Torvalds
6483M: torvalds@linux-foundation.org
6484L: linux-kernel@vger.kernel.org
6540T: git git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git 6485T: git git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
6541S: Buried alive in reporters 6486S: Buried alive in reporters
6487F: *
6488F: */
diff --git a/arch/alpha/include/asm/8253pit.h b/arch/alpha/include/asm/8253pit.h
index fef5c1450e47..a71c9c1455a7 100644
--- a/arch/alpha/include/asm/8253pit.h
+++ b/arch/alpha/include/asm/8253pit.h
@@ -1,10 +1,3 @@
1/* 1/*
2 * 8253/8254 Programmable Interval Timer 2 * 8253/8254 Programmable Interval Timer
3 */ 3 */
4
5#ifndef _8253PIT_H
6#define _8253PIT_H
7
8#define PIT_TICK_RATE 1193180UL
9
10#endif
diff --git a/arch/alpha/include/asm/kmap_types.h b/arch/alpha/include/asm/kmap_types.h
index 3e6735a34c57..a8d4ec8ea4b6 100644
--- a/arch/alpha/include/asm/kmap_types.h
+++ b/arch/alpha/include/asm/kmap_types.h
@@ -3,30 +3,12 @@
3 3
4/* Dummy header just to define km_type. */ 4/* Dummy header just to define km_type. */
5 5
6
7#ifdef CONFIG_DEBUG_HIGHMEM 6#ifdef CONFIG_DEBUG_HIGHMEM
8# define D(n) __KM_FENCE_##n , 7#define __WITH_KM_FENCE
9#else
10# define D(n)
11#endif 8#endif
12 9
13enum km_type { 10#include <asm-generic/kmap_types.h>
14D(0) KM_BOUNCE_READ,
15D(1) KM_SKB_SUNRPC_DATA,
16D(2) KM_SKB_DATA_SOFTIRQ,
17D(3) KM_USER0,
18D(4) KM_USER1,
19D(5) KM_BIO_SRC_IRQ,
20D(6) KM_BIO_DST_IRQ,
21D(7) KM_PTE0,
22D(8) KM_PTE1,
23D(9) KM_IRQ0,
24D(10) KM_IRQ1,
25D(11) KM_SOFTIRQ0,
26D(12) KM_SOFTIRQ1,
27D(13) KM_TYPE_NR
28};
29 11
30#undef D 12#undef __WITH_KM_FENCE
31 13
32#endif 14#endif
diff --git a/arch/alpha/kernel/init_task.c b/arch/alpha/kernel/init_task.c
index c2938e574a40..19b86328ffd7 100644
--- a/arch/alpha/kernel/init_task.c
+++ b/arch/alpha/kernel/init_task.c
@@ -10,10 +10,7 @@
10 10
11static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 11static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
12static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 12static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
13struct mm_struct init_mm = INIT_MM(init_mm);
14struct task_struct init_task = INIT_TASK(init_task); 13struct task_struct init_task = INIT_TASK(init_task);
15
16EXPORT_SYMBOL(init_mm);
17EXPORT_SYMBOL(init_task); 14EXPORT_SYMBOL(init_task);
18 15
19union thread_union init_thread_union 16union thread_union init_thread_union
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c
index 67c19f8a9944..38c805dfc544 100644
--- a/arch/alpha/kernel/irq_alpha.c
+++ b/arch/alpha/kernel/irq_alpha.c
@@ -227,7 +227,7 @@ struct irqaction timer_irqaction = {
227 .name = "timer", 227 .name = "timer",
228}; 228};
229 229
230static struct hw_interrupt_type rtc_irq_type = { 230static struct irq_chip rtc_irq_type = {
231 .typename = "RTC", 231 .typename = "RTC",
232 .startup = rtc_startup, 232 .startup = rtc_startup,
233 .shutdown = rtc_enable_disable, 233 .shutdown = rtc_enable_disable,
diff --git a/arch/alpha/kernel/irq_i8259.c b/arch/alpha/kernel/irq_i8259.c
index 9405bee9894e..50bfec9b588f 100644
--- a/arch/alpha/kernel/irq_i8259.c
+++ b/arch/alpha/kernel/irq_i8259.c
@@ -83,7 +83,7 @@ i8259a_end_irq(unsigned int irq)
83 i8259a_enable_irq(irq); 83 i8259a_enable_irq(irq);
84} 84}
85 85
86struct hw_interrupt_type i8259a_irq_type = { 86struct irq_chip i8259a_irq_type = {
87 .typename = "XT-PIC", 87 .typename = "XT-PIC",
88 .startup = i8259a_startup_irq, 88 .startup = i8259a_startup_irq,
89 .shutdown = i8259a_disable_irq, 89 .shutdown = i8259a_disable_irq,
diff --git a/arch/alpha/kernel/irq_impl.h b/arch/alpha/kernel/irq_impl.h
index cc9a8a7aa279..b63ccd7386f1 100644
--- a/arch/alpha/kernel/irq_impl.h
+++ b/arch/alpha/kernel/irq_impl.h
@@ -36,7 +36,7 @@ extern void i8259a_disable_irq(unsigned int);
36extern void i8259a_mask_and_ack_irq(unsigned int); 36extern void i8259a_mask_and_ack_irq(unsigned int);
37extern unsigned int i8259a_startup_irq(unsigned int); 37extern unsigned int i8259a_startup_irq(unsigned int);
38extern void i8259a_end_irq(unsigned int); 38extern void i8259a_end_irq(unsigned int);
39extern struct hw_interrupt_type i8259a_irq_type; 39extern struct irq_chip i8259a_irq_type;
40extern void init_i8259a_irqs(void); 40extern void init_i8259a_irqs(void);
41 41
42extern void handle_irq(int irq); 42extern void handle_irq(int irq);
diff --git a/arch/alpha/kernel/irq_pyxis.c b/arch/alpha/kernel/irq_pyxis.c
index d53edbccbfe5..69199a76ec4a 100644
--- a/arch/alpha/kernel/irq_pyxis.c
+++ b/arch/alpha/kernel/irq_pyxis.c
@@ -70,7 +70,7 @@ pyxis_mask_and_ack_irq(unsigned int irq)
70 *(vulp)PYXIS_INT_MASK; 70 *(vulp)PYXIS_INT_MASK;
71} 71}
72 72
73static struct hw_interrupt_type pyxis_irq_type = { 73static struct irq_chip pyxis_irq_type = {
74 .typename = "PYXIS", 74 .typename = "PYXIS",
75 .startup = pyxis_startup_irq, 75 .startup = pyxis_startup_irq,
76 .shutdown = pyxis_disable_irq, 76 .shutdown = pyxis_disable_irq,
diff --git a/arch/alpha/kernel/irq_srm.c b/arch/alpha/kernel/irq_srm.c
index a03fbca4940e..85229369a1f8 100644
--- a/arch/alpha/kernel/irq_srm.c
+++ b/arch/alpha/kernel/irq_srm.c
@@ -48,7 +48,7 @@ srm_end_irq(unsigned int irq)
48} 48}
49 49
50/* Handle interrupts from the SRM, assuming no additional weirdness. */ 50/* Handle interrupts from the SRM, assuming no additional weirdness. */
51static struct hw_interrupt_type srm_irq_type = { 51static struct irq_chip srm_irq_type = {
52 .typename = "SRM", 52 .typename = "SRM",
53 .startup = srm_startup_irq, 53 .startup = srm_startup_irq,
54 .shutdown = srm_disable_irq, 54 .shutdown = srm_disable_irq,
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index 80df86cd746b..d2634e4476b4 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -252,9 +252,9 @@ reserve_std_resources(void)
252} 252}
253 253
254#define PFN_MAX PFN_DOWN(0x80000000) 254#define PFN_MAX PFN_DOWN(0x80000000)
255#define for_each_mem_cluster(memdesc, cluster, i) \ 255#define for_each_mem_cluster(memdesc, _cluster, i) \
256 for ((cluster) = (memdesc)->cluster, (i) = 0; \ 256 for ((_cluster) = (memdesc)->cluster, (i) = 0; \
257 (i) < (memdesc)->numclusters; (i)++, (cluster)++) 257 (i) < (memdesc)->numclusters; (i)++, (_cluster)++)
258 258
259static unsigned long __init 259static unsigned long __init
260get_mem_size_limit(char *s) 260get_mem_size_limit(char *s)
diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c
index e53a1e1c2f21..382035ef7394 100644
--- a/arch/alpha/kernel/sys_alcor.c
+++ b/arch/alpha/kernel/sys_alcor.c
@@ -89,7 +89,7 @@ alcor_end_irq(unsigned int irq)
89 alcor_enable_irq(irq); 89 alcor_enable_irq(irq);
90} 90}
91 91
92static struct hw_interrupt_type alcor_irq_type = { 92static struct irq_chip alcor_irq_type = {
93 .typename = "ALCOR", 93 .typename = "ALCOR",
94 .startup = alcor_startup_irq, 94 .startup = alcor_startup_irq,
95 .shutdown = alcor_disable_irq, 95 .shutdown = alcor_disable_irq,
diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c
index ace475c124f6..ed349436732b 100644
--- a/arch/alpha/kernel/sys_cabriolet.c
+++ b/arch/alpha/kernel/sys_cabriolet.c
@@ -71,7 +71,7 @@ cabriolet_end_irq(unsigned int irq)
71 cabriolet_enable_irq(irq); 71 cabriolet_enable_irq(irq);
72} 72}
73 73
74static struct hw_interrupt_type cabriolet_irq_type = { 74static struct irq_chip cabriolet_irq_type = {
75 .typename = "CABRIOLET", 75 .typename = "CABRIOLET",
76 .startup = cabriolet_startup_irq, 76 .startup = cabriolet_startup_irq,
77 .shutdown = cabriolet_disable_irq, 77 .shutdown = cabriolet_disable_irq,
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c
index 5bd5259324b7..46e70ece5176 100644
--- a/arch/alpha/kernel/sys_dp264.c
+++ b/arch/alpha/kernel/sys_dp264.c
@@ -198,7 +198,7 @@ clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
198 return 0; 198 return 0;
199} 199}
200 200
201static struct hw_interrupt_type dp264_irq_type = { 201static struct irq_chip dp264_irq_type = {
202 .typename = "DP264", 202 .typename = "DP264",
203 .startup = dp264_startup_irq, 203 .startup = dp264_startup_irq,
204 .shutdown = dp264_disable_irq, 204 .shutdown = dp264_disable_irq,
@@ -209,7 +209,7 @@ static struct hw_interrupt_type dp264_irq_type = {
209 .set_affinity = dp264_set_affinity, 209 .set_affinity = dp264_set_affinity,
210}; 210};
211 211
212static struct hw_interrupt_type clipper_irq_type = { 212static struct irq_chip clipper_irq_type = {
213 .typename = "CLIPPER", 213 .typename = "CLIPPER",
214 .startup = clipper_startup_irq, 214 .startup = clipper_startup_irq,
215 .shutdown = clipper_disable_irq, 215 .shutdown = clipper_disable_irq,
@@ -298,7 +298,7 @@ clipper_srm_device_interrupt(unsigned long vector)
298} 298}
299 299
300static void __init 300static void __init
301init_tsunami_irqs(struct hw_interrupt_type * ops, int imin, int imax) 301init_tsunami_irqs(struct irq_chip * ops, int imin, int imax)
302{ 302{
303 long i; 303 long i;
304 for (i = imin; i <= imax; ++i) { 304 for (i = imin; i <= imax; ++i) {
diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c
index 9c5a306dc0ee..660c23ef661f 100644
--- a/arch/alpha/kernel/sys_eb64p.c
+++ b/arch/alpha/kernel/sys_eb64p.c
@@ -69,7 +69,7 @@ eb64p_end_irq(unsigned int irq)
69 eb64p_enable_irq(irq); 69 eb64p_enable_irq(irq);
70} 70}
71 71
72static struct hw_interrupt_type eb64p_irq_type = { 72static struct irq_chip eb64p_irq_type = {
73 .typename = "EB64P", 73 .typename = "EB64P",
74 .startup = eb64p_startup_irq, 74 .startup = eb64p_startup_irq,
75 .shutdown = eb64p_disable_irq, 75 .shutdown = eb64p_disable_irq,
diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c
index baf60f36cbd7..b99ea488d844 100644
--- a/arch/alpha/kernel/sys_eiger.c
+++ b/arch/alpha/kernel/sys_eiger.c
@@ -80,7 +80,7 @@ eiger_end_irq(unsigned int irq)
80 eiger_enable_irq(irq); 80 eiger_enable_irq(irq);
81} 81}
82 82
83static struct hw_interrupt_type eiger_irq_type = { 83static struct irq_chip eiger_irq_type = {
84 .typename = "EIGER", 84 .typename = "EIGER",
85 .startup = eiger_startup_irq, 85 .startup = eiger_startup_irq,
86 .shutdown = eiger_disable_irq, 86 .shutdown = eiger_disable_irq,
diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c
index 2b5caf3d9b15..ef0b83a070ac 100644
--- a/arch/alpha/kernel/sys_jensen.c
+++ b/arch/alpha/kernel/sys_jensen.c
@@ -118,7 +118,7 @@ jensen_local_end(unsigned int irq)
118 i8259a_end_irq(1); 118 i8259a_end_irq(1);
119} 119}
120 120
121static struct hw_interrupt_type jensen_local_irq_type = { 121static struct irq_chip jensen_local_irq_type = {
122 .typename = "LOCAL", 122 .typename = "LOCAL",
123 .startup = jensen_local_startup, 123 .startup = jensen_local_startup,
124 .shutdown = jensen_local_shutdown, 124 .shutdown = jensen_local_shutdown,
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c
index c5a1a2438c67..bbfc4f20ca72 100644
--- a/arch/alpha/kernel/sys_marvel.c
+++ b/arch/alpha/kernel/sys_marvel.c
@@ -169,7 +169,7 @@ marvel_irq_noop_return(unsigned int irq)
169 return 0; 169 return 0;
170} 170}
171 171
172static struct hw_interrupt_type marvel_legacy_irq_type = { 172static struct irq_chip marvel_legacy_irq_type = {
173 .typename = "LEGACY", 173 .typename = "LEGACY",
174 .startup = marvel_irq_noop_return, 174 .startup = marvel_irq_noop_return,
175 .shutdown = marvel_irq_noop, 175 .shutdown = marvel_irq_noop,
@@ -179,7 +179,7 @@ static struct hw_interrupt_type marvel_legacy_irq_type = {
179 .end = marvel_irq_noop, 179 .end = marvel_irq_noop,
180}; 180};
181 181
182static struct hw_interrupt_type io7_lsi_irq_type = { 182static struct irq_chip io7_lsi_irq_type = {
183 .typename = "LSI", 183 .typename = "LSI",
184 .startup = io7_startup_irq, 184 .startup = io7_startup_irq,
185 .shutdown = io7_disable_irq, 185 .shutdown = io7_disable_irq,
@@ -189,7 +189,7 @@ static struct hw_interrupt_type io7_lsi_irq_type = {
189 .end = io7_end_irq, 189 .end = io7_end_irq,
190}; 190};
191 191
192static struct hw_interrupt_type io7_msi_irq_type = { 192static struct irq_chip io7_msi_irq_type = {
193 .typename = "MSI", 193 .typename = "MSI",
194 .startup = io7_startup_irq, 194 .startup = io7_startup_irq,
195 .shutdown = io7_disable_irq, 195 .shutdown = io7_disable_irq,
@@ -273,8 +273,8 @@ init_one_io7_msi(struct io7 *io7, unsigned int which, unsigned int where)
273 273
274static void __init 274static void __init
275init_io7_irqs(struct io7 *io7, 275init_io7_irqs(struct io7 *io7,
276 struct hw_interrupt_type *lsi_ops, 276 struct irq_chip *lsi_ops,
277 struct hw_interrupt_type *msi_ops) 277 struct irq_chip *msi_ops)
278{ 278{
279 long base = (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT) + 16; 279 long base = (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT) + 16;
280 long i; 280 long i;
diff --git a/arch/alpha/kernel/sys_mikasa.c b/arch/alpha/kernel/sys_mikasa.c
index 8d3e9429c5ee..4e366641a08e 100644
--- a/arch/alpha/kernel/sys_mikasa.c
+++ b/arch/alpha/kernel/sys_mikasa.c
@@ -68,7 +68,7 @@ mikasa_end_irq(unsigned int irq)
68 mikasa_enable_irq(irq); 68 mikasa_enable_irq(irq);
69} 69}
70 70
71static struct hw_interrupt_type mikasa_irq_type = { 71static struct irq_chip mikasa_irq_type = {
72 .typename = "MIKASA", 72 .typename = "MIKASA",
73 .startup = mikasa_startup_irq, 73 .startup = mikasa_startup_irq,
74 .shutdown = mikasa_disable_irq, 74 .shutdown = mikasa_disable_irq,
diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c
index 538876b62449..35753a173bac 100644
--- a/arch/alpha/kernel/sys_noritake.c
+++ b/arch/alpha/kernel/sys_noritake.c
@@ -73,7 +73,7 @@ noritake_end_irq(unsigned int irq)
73 noritake_enable_irq(irq); 73 noritake_enable_irq(irq);
74} 74}
75 75
76static struct hw_interrupt_type noritake_irq_type = { 76static struct irq_chip noritake_irq_type = {
77 .typename = "NORITAKE", 77 .typename = "NORITAKE",
78 .startup = noritake_startup_irq, 78 .startup = noritake_startup_irq,
79 .shutdown = noritake_disable_irq, 79 .shutdown = noritake_disable_irq,
diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c
index 672cb2df53df..f3aec7e085c8 100644
--- a/arch/alpha/kernel/sys_rawhide.c
+++ b/arch/alpha/kernel/sys_rawhide.c
@@ -135,7 +135,7 @@ rawhide_end_irq(unsigned int irq)
135 rawhide_enable_irq(irq); 135 rawhide_enable_irq(irq);
136} 136}
137 137
138static struct hw_interrupt_type rawhide_irq_type = { 138static struct irq_chip rawhide_irq_type = {
139 .typename = "RAWHIDE", 139 .typename = "RAWHIDE",
140 .startup = rawhide_startup_irq, 140 .startup = rawhide_startup_irq,
141 .shutdown = rawhide_disable_irq, 141 .shutdown = rawhide_disable_irq,
diff --git a/arch/alpha/kernel/sys_ruffian.c b/arch/alpha/kernel/sys_ruffian.c
index f15a329b6011..d9f9cfeb9931 100644
--- a/arch/alpha/kernel/sys_ruffian.c
+++ b/arch/alpha/kernel/sys_ruffian.c
@@ -14,6 +14,7 @@
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/pci.h> 15#include <linux/pci.h>
16#include <linux/ioport.h> 16#include <linux/ioport.h>
17#include <linux/timex.h>
17#include <linux/init.h> 18#include <linux/init.h>
18 19
19#include <asm/ptrace.h> 20#include <asm/ptrace.h>
diff --git a/arch/alpha/kernel/sys_rx164.c b/arch/alpha/kernel/sys_rx164.c
index ce1faa6f1df1..fc9246373452 100644
--- a/arch/alpha/kernel/sys_rx164.c
+++ b/arch/alpha/kernel/sys_rx164.c
@@ -72,7 +72,7 @@ rx164_end_irq(unsigned int irq)
72 rx164_enable_irq(irq); 72 rx164_enable_irq(irq);
73} 73}
74 74
75static struct hw_interrupt_type rx164_irq_type = { 75static struct irq_chip rx164_irq_type = {
76 .typename = "RX164", 76 .typename = "RX164",
77 .startup = rx164_startup_irq, 77 .startup = rx164_startup_irq,
78 .shutdown = rx164_disable_irq, 78 .shutdown = rx164_disable_irq,
diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c
index 9e263256a42d..426eb6906d01 100644
--- a/arch/alpha/kernel/sys_sable.c
+++ b/arch/alpha/kernel/sys_sable.c
@@ -501,7 +501,7 @@ sable_lynx_mask_and_ack_irq(unsigned int irq)
501 spin_unlock(&sable_lynx_irq_lock); 501 spin_unlock(&sable_lynx_irq_lock);
502} 502}
503 503
504static struct hw_interrupt_type sable_lynx_irq_type = { 504static struct irq_chip sable_lynx_irq_type = {
505 .typename = "SABLE/LYNX", 505 .typename = "SABLE/LYNX",
506 .startup = sable_lynx_startup_irq, 506 .startup = sable_lynx_startup_irq,
507 .shutdown = sable_lynx_disable_irq, 507 .shutdown = sable_lynx_disable_irq,
diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c
index 9bd9a31450c6..830318c21661 100644
--- a/arch/alpha/kernel/sys_takara.c
+++ b/arch/alpha/kernel/sys_takara.c
@@ -74,7 +74,7 @@ takara_end_irq(unsigned int irq)
74 takara_enable_irq(irq); 74 takara_enable_irq(irq);
75} 75}
76 76
77static struct hw_interrupt_type takara_irq_type = { 77static struct irq_chip takara_irq_type = {
78 .typename = "TAKARA", 78 .typename = "TAKARA",
79 .startup = takara_startup_irq, 79 .startup = takara_startup_irq,
80 .shutdown = takara_disable_irq, 80 .shutdown = takara_disable_irq,
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c
index 8dd239ebdb9e..88978fc60f83 100644
--- a/arch/alpha/kernel/sys_titan.c
+++ b/arch/alpha/kernel/sys_titan.c
@@ -185,7 +185,7 @@ titan_srm_device_interrupt(unsigned long vector)
185 185
186 186
187static void __init 187static void __init
188init_titan_irqs(struct hw_interrupt_type * ops, int imin, int imax) 188init_titan_irqs(struct irq_chip * ops, int imin, int imax)
189{ 189{
190 long i; 190 long i;
191 for (i = imin; i <= imax; ++i) { 191 for (i = imin; i <= imax; ++i) {
@@ -194,7 +194,7 @@ init_titan_irqs(struct hw_interrupt_type * ops, int imin, int imax)
194 } 194 }
195} 195}
196 196
197static struct hw_interrupt_type titan_irq_type = { 197static struct irq_chip titan_irq_type = {
198 .typename = "TITAN", 198 .typename = "TITAN",
199 .startup = titan_startup_irq, 199 .startup = titan_startup_irq,
200 .shutdown = titan_disable_irq, 200 .shutdown = titan_disable_irq,
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c
index 42c3eede4d09..e91b4c3838a8 100644
--- a/arch/alpha/kernel/sys_wildfire.c
+++ b/arch/alpha/kernel/sys_wildfire.c
@@ -157,7 +157,7 @@ wildfire_end_irq(unsigned int irq)
157 wildfire_enable_irq(irq); 157 wildfire_enable_irq(irq);
158} 158}
159 159
160static struct hw_interrupt_type wildfire_irq_type = { 160static struct irq_chip wildfire_irq_type = {
161 .typename = "WILDFIRE", 161 .typename = "WILDFIRE",
162 .startup = wildfire_startup_irq, 162 .startup = wildfire_startup_irq,
163 .shutdown = wildfire_disable_irq, 163 .shutdown = wildfire_disable_irq,
diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c
index a13de49d1265..0eab55749423 100644
--- a/arch/alpha/mm/numa.c
+++ b/arch/alpha/mm/numa.c
@@ -28,9 +28,9 @@ EXPORT_SYMBOL(node_data);
28#define DBGDCONT(args...) 28#define DBGDCONT(args...)
29#endif 29#endif
30 30
31#define for_each_mem_cluster(memdesc, cluster, i) \ 31#define for_each_mem_cluster(memdesc, _cluster, i) \
32 for ((cluster) = (memdesc)->cluster, (i) = 0; \ 32 for ((_cluster) = (memdesc)->cluster, (i) = 0; \
33 (i) < (memdesc)->numclusters; (i)++, (cluster)++) 33 (i) < (memdesc)->numclusters; (i)++, (_cluster)++)
34 34
35static void __init show_mem_layout(void) 35static void __init show_mem_layout(void)
36{ 36{
diff --git a/arch/arm/kernel/init_task.c b/arch/arm/kernel/init_task.c
index e859af349467..3f470866bb89 100644
--- a/arch/arm/kernel/init_task.c
+++ b/arch/arm/kernel/init_task.c
@@ -14,10 +14,6 @@
14 14
15static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 15static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
16static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 16static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17struct mm_struct init_mm = INIT_MM(init_mm);
18
19EXPORT_SYMBOL(init_mm);
20
21/* 17/*
22 * Initial thread structure. 18 * Initial thread structure.
23 * 19 *
diff --git a/arch/avr32/kernel/init_task.c b/arch/avr32/kernel/init_task.c
index 993d56ee3cf3..57ec9f2dcd95 100644
--- a/arch/avr32/kernel/init_task.c
+++ b/arch/avr32/kernel/init_task.c
@@ -15,10 +15,6 @@
15 15
16static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 16static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
17static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 17static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
18struct mm_struct init_mm = INIT_MM(init_mm);
19
20EXPORT_SYMBOL(init_mm);
21
22/* 18/*
23 * Initial thread structure. Must be aligned on an 8192-byte boundary. 19 * Initial thread structure. Must be aligned on an 8192-byte boundary.
24 */ 20 */
diff --git a/arch/blackfin/include/asm/kmap_types.h b/arch/blackfin/include/asm/kmap_types.h
index e215f7104974..0a88622339ee 100644
--- a/arch/blackfin/include/asm/kmap_types.h
+++ b/arch/blackfin/include/asm/kmap_types.h
@@ -1,21 +1,6 @@
1#ifndef _ASM_KMAP_TYPES_H 1#ifndef _ASM_KMAP_TYPES_H
2#define _ASM_KMAP_TYPES_H 2#define _ASM_KMAP_TYPES_H
3 3
4enum km_type { 4#include <asm-generic/kmap_types.h>
5 KM_BOUNCE_READ,
6 KM_SKB_SUNRPC_DATA,
7 KM_SKB_DATA_SOFTIRQ,
8 KM_USER0,
9 KM_USER1,
10 KM_BIO_SRC_IRQ,
11 KM_BIO_DST_IRQ,
12 KM_PTE0,
13 KM_PTE1,
14 KM_IRQ0,
15 KM_IRQ1,
16 KM_SOFTIRQ0,
17 KM_SOFTIRQ1,
18 KM_TYPE_NR
19};
20 5
21#endif 6#endif
diff --git a/arch/blackfin/kernel/init_task.c b/arch/blackfin/kernel/init_task.c
index 2c228c020978..c26c34de9f3c 100644
--- a/arch/blackfin/kernel/init_task.c
+++ b/arch/blackfin/kernel/init_task.c
@@ -35,10 +35,6 @@
35 35
36static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 36static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
37static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 37static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
38
39struct mm_struct init_mm = INIT_MM(init_mm);
40EXPORT_SYMBOL(init_mm);
41
42/* 38/*
43 * Initial task structure. 39 * Initial task structure.
44 * 40 *
diff --git a/arch/cris/include/asm/kmap_types.h b/arch/cris/include/asm/kmap_types.h
index 492988cb9077..d2d643c4ea59 100644
--- a/arch/cris/include/asm/kmap_types.h
+++ b/arch/cris/include/asm/kmap_types.h
@@ -5,21 +5,6 @@
5 * is actually used on cris. 5 * is actually used on cris.
6 */ 6 */
7 7
8enum km_type { 8#include <asm-generic/kmap_types.h>
9 KM_BOUNCE_READ,
10 KM_SKB_SUNRPC_DATA,
11 KM_SKB_DATA_SOFTIRQ,
12 KM_USER0,
13 KM_USER1,
14 KM_BIO_SRC_IRQ,
15 KM_BIO_DST_IRQ,
16 KM_PTE0,
17 KM_PTE1,
18 KM_IRQ0,
19 KM_IRQ1,
20 KM_SOFTIRQ0,
21 KM_SOFTIRQ1,
22 KM_TYPE_NR
23};
24 9
25#endif 10#endif
diff --git a/arch/cris/kernel/process.c b/arch/cris/kernel/process.c
index 4df0b320d524..51dcd04d2777 100644
--- a/arch/cris/kernel/process.c
+++ b/arch/cris/kernel/process.c
@@ -38,10 +38,6 @@
38 38
39static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 39static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
40static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 40static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
41struct mm_struct init_mm = INIT_MM(init_mm);
42
43EXPORT_SYMBOL(init_mm);
44
45/* 41/*
46 * Initial thread structure. 42 * Initial thread structure.
47 * 43 *
diff --git a/arch/frv/kernel/init_task.c b/arch/frv/kernel/init_task.c
index 29429a8b7f6a..1d3df1d9495c 100644
--- a/arch/frv/kernel/init_task.c
+++ b/arch/frv/kernel/init_task.c
@@ -12,10 +12,6 @@
12 12
13static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 13static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
14static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 14static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
15struct mm_struct init_mm = INIT_MM(init_mm);
16
17EXPORT_SYMBOL(init_mm);
18
19/* 15/*
20 * Initial thread structure. 16 * Initial thread structure.
21 * 17 *
diff --git a/arch/h8300/include/asm/kmap_types.h b/arch/h8300/include/asm/kmap_types.h
index 1ec8a3427120..be12a7160116 100644
--- a/arch/h8300/include/asm/kmap_types.h
+++ b/arch/h8300/include/asm/kmap_types.h
@@ -1,21 +1,6 @@
1#ifndef _ASM_H8300_KMAP_TYPES_H 1#ifndef _ASM_H8300_KMAP_TYPES_H
2#define _ASM_H8300_KMAP_TYPES_H 2#define _ASM_H8300_KMAP_TYPES_H
3 3
4enum km_type { 4#include <asm-generic/kmap_types.h>
5 KM_BOUNCE_READ,
6 KM_SKB_SUNRPC_DATA,
7 KM_SKB_DATA_SOFTIRQ,
8 KM_USER0,
9 KM_USER1,
10 KM_BIO_SRC_IRQ,
11 KM_BIO_DST_IRQ,
12 KM_PTE0,
13 KM_PTE1,
14 KM_IRQ0,
15 KM_IRQ1,
16 KM_SOFTIRQ0,
17 KM_SOFTIRQ1,
18 KM_TYPE_NR
19};
20 5
21#endif 6#endif
diff --git a/arch/h8300/kernel/init_task.c b/arch/h8300/kernel/init_task.c
index cb5dc552da97..089c65ed6eb3 100644
--- a/arch/h8300/kernel/init_task.c
+++ b/arch/h8300/kernel/init_task.c
@@ -14,10 +14,6 @@
14 14
15static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 15static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
16static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 16static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17struct mm_struct init_mm = INIT_MM(init_mm);
18
19EXPORT_SYMBOL(init_mm);
20
21/* 17/*
22 * Initial task structure. 18 * Initial task structure.
23 * 19 *
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 56ceb68eb99d..fe63b2dc9d07 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -1131,7 +1131,7 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp
1131#ifdef CONFIG_NUMA 1131#ifdef CONFIG_NUMA
1132 { 1132 {
1133 struct page *page; 1133 struct page *page;
1134 page = alloc_pages_node(ioc->node == MAX_NUMNODES ? 1134 page = alloc_pages_exact_node(ioc->node == MAX_NUMNODES ?
1135 numa_node_id() : ioc->node, flags, 1135 numa_node_id() : ioc->node, flags,
1136 get_order(size)); 1136 get_order(size));
1137 1137
diff --git a/arch/ia64/include/asm/kmap_types.h b/arch/ia64/include/asm/kmap_types.h
index 5d1658aa2b3b..05d5f9996105 100644
--- a/arch/ia64/include/asm/kmap_types.h
+++ b/arch/ia64/include/asm/kmap_types.h
@@ -1,30 +1,12 @@
1#ifndef _ASM_IA64_KMAP_TYPES_H 1#ifndef _ASM_IA64_KMAP_TYPES_H
2#define _ASM_IA64_KMAP_TYPES_H 2#define _ASM_IA64_KMAP_TYPES_H
3 3
4
5#ifdef CONFIG_DEBUG_HIGHMEM 4#ifdef CONFIG_DEBUG_HIGHMEM
6# define D(n) __KM_FENCE_##n , 5#define __WITH_KM_FENCE
7#else
8# define D(n)
9#endif 6#endif
10 7
11enum km_type { 8#include <asm-generic/kmap_types.h>
12D(0) KM_BOUNCE_READ,
13D(1) KM_SKB_SUNRPC_DATA,
14D(2) KM_SKB_DATA_SOFTIRQ,
15D(3) KM_USER0,
16D(4) KM_USER1,
17D(5) KM_BIO_SRC_IRQ,
18D(6) KM_BIO_DST_IRQ,
19D(7) KM_PTE0,
20D(8) KM_PTE1,
21D(9) KM_IRQ0,
22D(10) KM_IRQ1,
23D(11) KM_SOFTIRQ0,
24D(12) KM_SOFTIRQ1,
25D(13) KM_TYPE_NR
26};
27 9
28#undef D 10#undef __WITH_KM_FENCE
29 11
30#endif /* _ASM_IA64_KMAP_TYPES_H */ 12#endif /* _ASM_IA64_KMAP_TYPES_H */
diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c
index 5b0e830c6f33..c475fc281be7 100644
--- a/arch/ia64/kernel/init_task.c
+++ b/arch/ia64/kernel/init_task.c
@@ -19,10 +19,6 @@
19 19
20static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 20static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
21static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 21static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
22struct mm_struct init_mm = INIT_MM(init_mm);
23
24EXPORT_SYMBOL(init_mm);
25
26/* 22/*
27 * Initial task structure. 23 * Initial task structure.
28 * 24 *
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 8f33a8840422..5b17bd402275 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1829,8 +1829,7 @@ ia64_mca_cpu_init(void *cpu_data)
1829 data = mca_bootmem(); 1829 data = mca_bootmem();
1830 first_time = 0; 1830 first_time = 0;
1831 } else 1831 } else
1832 data = page_address(alloc_pages_node(numa_node_id(), 1832 data = __get_free_pages(GFP_KERNEL, get_order(sz));
1833 GFP_KERNEL, get_order(sz)));
1834 if (!data) 1833 if (!data)
1835 panic("Could not allocate MCA memory for cpu %d\n", 1834 panic("Could not allocate MCA memory for cpu %d\n",
1836 cpu); 1835 cpu);
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 8a06dc480594..bdc176cb5e85 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -5595,7 +5595,7 @@ pfm_interrupt_handler(int irq, void *arg)
5595 (*pfm_alt_intr_handler->handler)(irq, arg, regs); 5595 (*pfm_alt_intr_handler->handler)(irq, arg, regs);
5596 } 5596 }
5597 5597
5598 put_cpu_no_resched(); 5598 put_cpu();
5599 return IRQ_HANDLED; 5599 return IRQ_HANDLED;
5600} 5600}
5601 5601
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index 8eff8c1d40a6..6ba72ab42fcc 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -98,7 +98,8 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
98 98
99 /* attempt to allocate a granule's worth of cached memory pages */ 99 /* attempt to allocate a granule's worth of cached memory pages */
100 100
101 page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 101 page = alloc_pages_exact_node(nid,
102 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
102 IA64_GRANULE_SHIFT-PAGE_SHIFT); 103 IA64_GRANULE_SHIFT-PAGE_SHIFT);
103 if (!page) { 104 if (!page) {
104 mutex_unlock(&uc_pool->add_chunk_mutex); 105 mutex_unlock(&uc_pool->add_chunk_mutex);
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index d876423e4e75..98b684928e12 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -90,7 +90,8 @@ static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
90 */ 90 */
91 node = pcibus_to_node(pdev->bus); 91 node = pcibus_to_node(pdev->bus);
92 if (likely(node >=0)) { 92 if (likely(node >=0)) {
93 struct page *p = alloc_pages_node(node, flags, get_order(size)); 93 struct page *p = alloc_pages_exact_node(node,
94 flags, get_order(size));
94 95
95 if (likely(p)) 96 if (likely(p))
96 cpuaddr = page_address(p); 97 cpuaddr = page_address(p);
diff --git a/arch/m32r/include/asm/kmap_types.h b/arch/m32r/include/asm/kmap_types.h
index fa94dc6410ea..4cdb5e3a06bf 100644
--- a/arch/m32r/include/asm/kmap_types.h
+++ b/arch/m32r/include/asm/kmap_types.h
@@ -2,28 +2,11 @@
2#define __M32R_KMAP_TYPES_H 2#define __M32R_KMAP_TYPES_H
3 3
4#ifdef CONFIG_DEBUG_HIGHMEM 4#ifdef CONFIG_DEBUG_HIGHMEM
5# define D(n) __KM_FENCE_##n , 5#define __WITH_KM_FENCE
6#else
7# define D(n)
8#endif 6#endif
9 7
10enum km_type { 8#include <asm-generic/kmap_types.h>
11D(0) KM_BOUNCE_READ,
12D(1) KM_SKB_SUNRPC_DATA,
13D(2) KM_SKB_DATA_SOFTIRQ,
14D(3) KM_USER0,
15D(4) KM_USER1,
16D(5) KM_BIO_SRC_IRQ,
17D(6) KM_BIO_DST_IRQ,
18D(7) KM_PTE0,
19D(8) KM_PTE1,
20D(9) KM_IRQ0,
21D(10) KM_IRQ1,
22D(11) KM_SOFTIRQ0,
23D(12) KM_SOFTIRQ1,
24D(13) KM_TYPE_NR
25};
26 9
27#undef D 10#undef __WITH_KM_FENCE
28 11
29#endif /* __M32R_KMAP_TYPES_H */ 12#endif /* __M32R_KMAP_TYPES_H */
diff --git a/arch/m32r/kernel/init_task.c b/arch/m32r/kernel/init_task.c
index 016885c6f260..fce57e5d3f91 100644
--- a/arch/m32r/kernel/init_task.c
+++ b/arch/m32r/kernel/init_task.c
@@ -13,10 +13,6 @@
13 13
14static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 14static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
15static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 15static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
16struct mm_struct init_mm = INIT_MM(init_mm);
17
18EXPORT_SYMBOL(init_mm);
19
20/* 16/*
21 * Initial thread structure. 17 * Initial thread structure.
22 * 18 *
diff --git a/arch/m32r/mm/discontig.c b/arch/m32r/mm/discontig.c
index 7daf897292cf..b7a78ad429b7 100644
--- a/arch/m32r/mm/discontig.c
+++ b/arch/m32r/mm/discontig.c
@@ -154,9 +154,9 @@ unsigned long __init zone_sizes_init(void)
154 * Use all area of internal RAM. 154 * Use all area of internal RAM.
155 * see __alloc_pages() 155 * see __alloc_pages()
156 */ 156 */
157 NODE_DATA(1)->node_zones->pages_min = 0; 157 NODE_DATA(1)->node_zones->watermark[WMARK_MIN] = 0;
158 NODE_DATA(1)->node_zones->pages_low = 0; 158 NODE_DATA(1)->node_zones->watermark[WMARK_LOW] = 0;
159 NODE_DATA(1)->node_zones->pages_high = 0; 159 NODE_DATA(1)->node_zones->watermark[WMARK_HIGH] = 0;
160 160
161 return holes; 161 return holes;
162} 162}
diff --git a/arch/m32r/platforms/m32104ut/setup.c b/arch/m32r/platforms/m32104ut/setup.c
index 98138b4e9220..922fdfdadeaa 100644
--- a/arch/m32r/platforms/m32104ut/setup.c
+++ b/arch/m32r/platforms/m32104ut/setup.c
@@ -63,7 +63,7 @@ static void shutdown_m32104ut_irq(unsigned int irq)
63 outl(M32R_ICUCR_ILEVEL7, port); 63 outl(M32R_ICUCR_ILEVEL7, port);
64} 64}
65 65
66static struct hw_interrupt_type m32104ut_irq_type = 66static struct irq_chip m32104ut_irq_type =
67{ 67{
68 .typename = "M32104UT-IRQ", 68 .typename = "M32104UT-IRQ",
69 .startup = startup_m32104ut_irq, 69 .startup = startup_m32104ut_irq,
diff --git a/arch/m32r/platforms/m32700ut/setup.c b/arch/m32r/platforms/m32700ut/setup.c
index 77b0ae9379e9..9c1bc7487c1e 100644
--- a/arch/m32r/platforms/m32700ut/setup.c
+++ b/arch/m32r/platforms/m32700ut/setup.c
@@ -69,7 +69,7 @@ static void shutdown_m32700ut_irq(unsigned int irq)
69 outl(M32R_ICUCR_ILEVEL7, port); 69 outl(M32R_ICUCR_ILEVEL7, port);
70} 70}
71 71
72static struct hw_interrupt_type m32700ut_irq_type = 72static struct irq_chip m32700ut_irq_type =
73{ 73{
74 .typename = "M32700UT-IRQ", 74 .typename = "M32700UT-IRQ",
75 .startup = startup_m32700ut_irq, 75 .startup = startup_m32700ut_irq,
@@ -146,7 +146,7 @@ static void shutdown_m32700ut_pld_irq(unsigned int irq)
146 outw(PLD_ICUCR_ILEVEL7, port); 146 outw(PLD_ICUCR_ILEVEL7, port);
147} 147}
148 148
149static struct hw_interrupt_type m32700ut_pld_irq_type = 149static struct irq_chip m32700ut_pld_irq_type =
150{ 150{
151 .typename = "M32700UT-PLD-IRQ", 151 .typename = "M32700UT-PLD-IRQ",
152 .startup = startup_m32700ut_pld_irq, 152 .startup = startup_m32700ut_pld_irq,
@@ -215,7 +215,7 @@ static void shutdown_m32700ut_lanpld_irq(unsigned int irq)
215 outw(PLD_ICUCR_ILEVEL7, port); 215 outw(PLD_ICUCR_ILEVEL7, port);
216} 216}
217 217
218static struct hw_interrupt_type m32700ut_lanpld_irq_type = 218static struct irq_chip m32700ut_lanpld_irq_type =
219{ 219{
220 .typename = "M32700UT-PLD-LAN-IRQ", 220 .typename = "M32700UT-PLD-LAN-IRQ",
221 .startup = startup_m32700ut_lanpld_irq, 221 .startup = startup_m32700ut_lanpld_irq,
@@ -284,7 +284,7 @@ static void shutdown_m32700ut_lcdpld_irq(unsigned int irq)
284 outw(PLD_ICUCR_ILEVEL7, port); 284 outw(PLD_ICUCR_ILEVEL7, port);
285} 285}
286 286
287static struct hw_interrupt_type m32700ut_lcdpld_irq_type = 287static struct irq_chip m32700ut_lcdpld_irq_type =
288{ 288{
289 .typename = "M32700UT-PLD-LCD-IRQ", 289 .typename = "M32700UT-PLD-LCD-IRQ",
290 .startup = startup_m32700ut_lcdpld_irq, 290 .startup = startup_m32700ut_lcdpld_irq,
diff --git a/arch/m32r/platforms/mappi/setup.c b/arch/m32r/platforms/mappi/setup.c
index 3ec087ff2214..fb4b17799b66 100644
--- a/arch/m32r/platforms/mappi/setup.c
+++ b/arch/m32r/platforms/mappi/setup.c
@@ -63,7 +63,7 @@ static void shutdown_mappi_irq(unsigned int irq)
63 outl(M32R_ICUCR_ILEVEL7, port); 63 outl(M32R_ICUCR_ILEVEL7, port);
64} 64}
65 65
66static struct hw_interrupt_type mappi_irq_type = 66static struct irq_chip mappi_irq_type =
67{ 67{
68 .typename = "MAPPI-IRQ", 68 .typename = "MAPPI-IRQ",
69 .startup = startup_mappi_irq, 69 .startup = startup_mappi_irq,
diff --git a/arch/m32r/platforms/mappi2/setup.c b/arch/m32r/platforms/mappi2/setup.c
index d87969c6356e..6a65eda0a056 100644
--- a/arch/m32r/platforms/mappi2/setup.c
+++ b/arch/m32r/platforms/mappi2/setup.c
@@ -70,7 +70,7 @@ static void shutdown_mappi2_irq(unsigned int irq)
70 outl(M32R_ICUCR_ILEVEL7, port); 70 outl(M32R_ICUCR_ILEVEL7, port);
71} 71}
72 72
73static struct hw_interrupt_type mappi2_irq_type = 73static struct irq_chip mappi2_irq_type =
74{ 74{
75 .typename = "MAPPI2-IRQ", 75 .typename = "MAPPI2-IRQ",
76 .startup = startup_mappi2_irq, 76 .startup = startup_mappi2_irq,
diff --git a/arch/m32r/platforms/mappi3/setup.c b/arch/m32r/platforms/mappi3/setup.c
index 785b4bd6d9fd..9c337aeac94b 100644
--- a/arch/m32r/platforms/mappi3/setup.c
+++ b/arch/m32r/platforms/mappi3/setup.c
@@ -70,7 +70,7 @@ static void shutdown_mappi3_irq(unsigned int irq)
70 outl(M32R_ICUCR_ILEVEL7, port); 70 outl(M32R_ICUCR_ILEVEL7, port);
71} 71}
72 72
73static struct hw_interrupt_type mappi3_irq_type = 73static struct irq_chip mappi3_irq_type =
74{ 74{
75 .typename = "MAPPI3-IRQ", 75 .typename = "MAPPI3-IRQ",
76 .startup = startup_mappi3_irq, 76 .startup = startup_mappi3_irq,
diff --git a/arch/m32r/platforms/oaks32r/setup.c b/arch/m32r/platforms/oaks32r/setup.c
index 6faa5db68e95..ed865741c38d 100644
--- a/arch/m32r/platforms/oaks32r/setup.c
+++ b/arch/m32r/platforms/oaks32r/setup.c
@@ -61,7 +61,7 @@ static void shutdown_oaks32r_irq(unsigned int irq)
61 outl(M32R_ICUCR_ILEVEL7, port); 61 outl(M32R_ICUCR_ILEVEL7, port);
62} 62}
63 63
64static struct hw_interrupt_type oaks32r_irq_type = 64static struct irq_chip oaks32r_irq_type =
65{ 65{
66 .typename = "OAKS32R-IRQ", 66 .typename = "OAKS32R-IRQ",
67 .startup = startup_oaks32r_irq, 67 .startup = startup_oaks32r_irq,
diff --git a/arch/m32r/platforms/opsput/setup.c b/arch/m32r/platforms/opsput/setup.c
index fab13fd85422..80d680657019 100644
--- a/arch/m32r/platforms/opsput/setup.c
+++ b/arch/m32r/platforms/opsput/setup.c
@@ -70,7 +70,7 @@ static void shutdown_opsput_irq(unsigned int irq)
70 outl(M32R_ICUCR_ILEVEL7, port); 70 outl(M32R_ICUCR_ILEVEL7, port);
71} 71}
72 72
73static struct hw_interrupt_type opsput_irq_type = 73static struct irq_chip opsput_irq_type =
74{ 74{
75 .typename = "OPSPUT-IRQ", 75 .typename = "OPSPUT-IRQ",
76 .startup = startup_opsput_irq, 76 .startup = startup_opsput_irq,
@@ -147,7 +147,7 @@ static void shutdown_opsput_pld_irq(unsigned int irq)
147 outw(PLD_ICUCR_ILEVEL7, port); 147 outw(PLD_ICUCR_ILEVEL7, port);
148} 148}
149 149
150static struct hw_interrupt_type opsput_pld_irq_type = 150static struct irq_chip opsput_pld_irq_type =
151{ 151{
152 .typename = "OPSPUT-PLD-IRQ", 152 .typename = "OPSPUT-PLD-IRQ",
153 .startup = startup_opsput_pld_irq, 153 .startup = startup_opsput_pld_irq,
@@ -216,7 +216,7 @@ static void shutdown_opsput_lanpld_irq(unsigned int irq)
216 outw(PLD_ICUCR_ILEVEL7, port); 216 outw(PLD_ICUCR_ILEVEL7, port);
217} 217}
218 218
219static struct hw_interrupt_type opsput_lanpld_irq_type = 219static struct irq_chip opsput_lanpld_irq_type =
220{ 220{
221 .typename = "OPSPUT-PLD-LAN-IRQ", 221 .typename = "OPSPUT-PLD-LAN-IRQ",
222 .startup = startup_opsput_lanpld_irq, 222 .startup = startup_opsput_lanpld_irq,
@@ -285,7 +285,7 @@ static void shutdown_opsput_lcdpld_irq(unsigned int irq)
285 outw(PLD_ICUCR_ILEVEL7, port); 285 outw(PLD_ICUCR_ILEVEL7, port);
286} 286}
287 287
288static struct hw_interrupt_type opsput_lcdpld_irq_type = 288static struct irq_chip opsput_lcdpld_irq_type =
289{ 289{
290 "OPSPUT-PLD-LCD-IRQ", 290 "OPSPUT-PLD-LCD-IRQ",
291 startup_opsput_lcdpld_irq, 291 startup_opsput_lcdpld_irq,
diff --git a/arch/m32r/platforms/usrv/setup.c b/arch/m32r/platforms/usrv/setup.c
index 89588d649eb7..757302660af8 100644
--- a/arch/m32r/platforms/usrv/setup.c
+++ b/arch/m32r/platforms/usrv/setup.c
@@ -61,7 +61,7 @@ static void shutdown_mappi_irq(unsigned int irq)
61 outl(M32R_ICUCR_ILEVEL7, port); 61 outl(M32R_ICUCR_ILEVEL7, port);
62} 62}
63 63
64static struct hw_interrupt_type mappi_irq_type = 64static struct irq_chip mappi_irq_type =
65{ 65{
66 .typename = "M32700-IRQ", 66 .typename = "M32700-IRQ",
67 .startup = startup_mappi_irq, 67 .startup = startup_mappi_irq,
@@ -134,7 +134,7 @@ static void shutdown_m32700ut_pld_irq(unsigned int irq)
134 outw(PLD_ICUCR_ILEVEL7, port); 134 outw(PLD_ICUCR_ILEVEL7, port);
135} 135}
136 136
137static struct hw_interrupt_type m32700ut_pld_irq_type = 137static struct irq_chip m32700ut_pld_irq_type =
138{ 138{
139 .typename = "USRV-PLD-IRQ", 139 .typename = "USRV-PLD-IRQ",
140 .startup = startup_m32700ut_pld_irq, 140 .startup = startup_m32700ut_pld_irq,
diff --git a/arch/m68k/include/asm/kmap_types.h b/arch/m68k/include/asm/kmap_types.h
index c843c63d3801..3413cc1390ec 100644
--- a/arch/m68k/include/asm/kmap_types.h
+++ b/arch/m68k/include/asm/kmap_types.h
@@ -1,21 +1,6 @@
1#ifndef __ASM_M68K_KMAP_TYPES_H 1#ifndef __ASM_M68K_KMAP_TYPES_H
2#define __ASM_M68K_KMAP_TYPES_H 2#define __ASM_M68K_KMAP_TYPES_H
3 3
4enum km_type { 4#include <asm-generic/kmap_types.h>
5 KM_BOUNCE_READ,
6 KM_SKB_SUNRPC_DATA,
7 KM_SKB_DATA_SOFTIRQ,
8 KM_USER0,
9 KM_USER1,
10 KM_BIO_SRC_IRQ,
11 KM_BIO_DST_IRQ,
12 KM_PTE0,
13 KM_PTE1,
14 KM_IRQ0,
15 KM_IRQ1,
16 KM_SOFTIRQ0,
17 KM_SOFTIRQ1,
18 KM_TYPE_NR
19};
20 5
21#endif /* __ASM_M68K_KMAP_TYPES_H */ 6#endif /* __ASM_M68K_KMAP_TYPES_H */
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index ec37fb56c127..72bad65dba3a 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -42,10 +42,6 @@
42 */ 42 */
43static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 43static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
44static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 44static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
45struct mm_struct init_mm = INIT_MM(init_mm);
46
47EXPORT_SYMBOL(init_mm);
48
49union thread_union init_thread_union 45union thread_union init_thread_union
50__attribute__((section(".data.init_task"), aligned(THREAD_SIZE))) 46__attribute__((section(".data.init_task"), aligned(THREAD_SIZE)))
51 = { INIT_THREAD_INFO(init_task) }; 47 = { INIT_THREAD_INFO(init_task) };
diff --git a/arch/m68knommu/kernel/init_task.c b/arch/m68knommu/kernel/init_task.c
index fe282de1d596..45e97a207fed 100644
--- a/arch/m68knommu/kernel/init_task.c
+++ b/arch/m68knommu/kernel/init_task.c
@@ -14,10 +14,6 @@
14 14
15static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 15static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
16static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 16static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17struct mm_struct init_mm = INIT_MM(init_mm);
18
19EXPORT_SYMBOL(init_mm);
20
21/* 17/*
22 * Initial task structure. 18 * Initial task structure.
23 * 19 *
diff --git a/arch/microblaze/include/asm/kmap_types.h b/arch/microblaze/include/asm/kmap_types.h
index 4d7e222f5dd7..25975252d83d 100644
--- a/arch/microblaze/include/asm/kmap_types.h
+++ b/arch/microblaze/include/asm/kmap_types.h
@@ -1,29 +1,6 @@
1/*
2 * Copyright (C) 2006 Atmark Techno, Inc.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 */
8
9#ifndef _ASM_MICROBLAZE_KMAP_TYPES_H 1#ifndef _ASM_MICROBLAZE_KMAP_TYPES_H
10#define _ASM_MICROBLAZE_KMAP_TYPES_H 2#define _ASM_MICROBLAZE_KMAP_TYPES_H
11 3
12enum km_type { 4#include <asm-generic/kmap_types.h>
13 KM_BOUNCE_READ,
14 KM_SKB_SUNRPC_DATA,
15 KM_SKB_DATA_SOFTIRQ,
16 KM_USER0,
17 KM_USER1,
18 KM_BIO_SRC_IRQ,
19 KM_BIO_DST_IRQ,
20 KM_PTE0,
21 KM_PTE1,
22 KM_IRQ0,
23 KM_IRQ1,
24 KM_SOFTIRQ0,
25 KM_SOFTIRQ1,
26 KM_TYPE_NR,
27};
28 5
29#endif /* _ASM_MICROBLAZE_KMAP_TYPES_H */ 6#endif /* _ASM_MICROBLAZE_KMAP_TYPES_H */
diff --git a/arch/mips/include/asm/i8253.h b/arch/mips/include/asm/i8253.h
index 5dabc870b322..032ca73f181b 100644
--- a/arch/mips/include/asm/i8253.h
+++ b/arch/mips/include/asm/i8253.h
@@ -12,8 +12,6 @@
12#define PIT_CH0 0x40 12#define PIT_CH0 0x40
13#define PIT_CH2 0x42 13#define PIT_CH2 0x42
14 14
15#define PIT_TICK_RATE 1193182UL
16
17extern spinlock_t i8253_lock; 15extern spinlock_t i8253_lock;
18 16
19extern void setup_pit_timer(void); 17extern void setup_pit_timer(void);
diff --git a/arch/mips/include/asm/kmap_types.h b/arch/mips/include/asm/kmap_types.h
index 806aae3c5338..58e91ed0388f 100644
--- a/arch/mips/include/asm/kmap_types.h
+++ b/arch/mips/include/asm/kmap_types.h
@@ -1,30 +1,12 @@
1#ifndef _ASM_KMAP_TYPES_H 1#ifndef _ASM_KMAP_TYPES_H
2#define _ASM_KMAP_TYPES_H 2#define _ASM_KMAP_TYPES_H
3 3
4
5#ifdef CONFIG_DEBUG_HIGHMEM 4#ifdef CONFIG_DEBUG_HIGHMEM
6# define D(n) __KM_FENCE_##n , 5#define __WITH_KM_FENCE
7#else
8# define D(n)
9#endif 6#endif
10 7
11enum km_type { 8#include <asm-generic/kmap_types.h>
12D(0) KM_BOUNCE_READ,
13D(1) KM_SKB_SUNRPC_DATA,
14D(2) KM_SKB_DATA_SOFTIRQ,
15D(3) KM_USER0,
16D(4) KM_USER1,
17D(5) KM_BIO_SRC_IRQ,
18D(6) KM_BIO_DST_IRQ,
19D(7) KM_PTE0,
20D(8) KM_PTE1,
21D(9) KM_IRQ0,
22D(10) KM_IRQ1,
23D(11) KM_SOFTIRQ0,
24D(12) KM_SOFTIRQ1,
25D(13) KM_TYPE_NR
26};
27 9
28#undef D 10#undef __WITH_KM_FENCE
29 11
30#endif 12#endif
diff --git a/arch/mips/kernel/init_task.c b/arch/mips/kernel/init_task.c
index 149cd914526e..5b457a40c784 100644
--- a/arch/mips/kernel/init_task.c
+++ b/arch/mips/kernel/init_task.c
@@ -11,10 +11,6 @@
11 11
12static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 12static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
13static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 13static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
14struct mm_struct init_mm = INIT_MM(init_mm);
15
16EXPORT_SYMBOL(init_mm);
17
18/* 14/*
19 * Initial thread structure. 15 * Initial thread structure.
20 * 16 *
diff --git a/arch/mn10300/include/asm/kmap_types.h b/arch/mn10300/include/asm/kmap_types.h
index 3398f9f35603..76d093b58d4f 100644
--- a/arch/mn10300/include/asm/kmap_types.h
+++ b/arch/mn10300/include/asm/kmap_types.h
@@ -1,31 +1,6 @@
1/* MN10300 kmap_atomic() slot IDs
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#ifndef _ASM_KMAP_TYPES_H 1#ifndef _ASM_KMAP_TYPES_H
12#define _ASM_KMAP_TYPES_H 2#define _ASM_KMAP_TYPES_H
13 3
14enum km_type { 4#include <asm-generic/kmap_types.h>
15 KM_BOUNCE_READ,
16 KM_SKB_SUNRPC_DATA,
17 KM_SKB_DATA_SOFTIRQ,
18 KM_USER0,
19 KM_USER1,
20 KM_BIO_SRC_IRQ,
21 KM_BIO_DST_IRQ,
22 KM_PTE0,
23 KM_PTE1,
24 KM_IRQ0,
25 KM_IRQ1,
26 KM_SOFTIRQ0,
27 KM_SOFTIRQ1,
28 KM_TYPE_NR
29};
30 5
31#endif /* _ASM_KMAP_TYPES_H */ 6#endif /* _ASM_KMAP_TYPES_H */
diff --git a/arch/mn10300/kernel/init_task.c b/arch/mn10300/kernel/init_task.c
index 5ac3566f8c98..80d423b80af3 100644
--- a/arch/mn10300/kernel/init_task.c
+++ b/arch/mn10300/kernel/init_task.c
@@ -20,9 +20,6 @@
20 20
21static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 21static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
22static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 22static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
23struct mm_struct init_mm = INIT_MM(init_mm);
24EXPORT_SYMBOL(init_mm);
25
26/* 23/*
27 * Initial thread structure. 24 * Initial thread structure.
28 * 25 *
diff --git a/arch/parisc/include/asm/kmap_types.h b/arch/parisc/include/asm/kmap_types.h
index 806aae3c5338..58e91ed0388f 100644
--- a/arch/parisc/include/asm/kmap_types.h
+++ b/arch/parisc/include/asm/kmap_types.h
@@ -1,30 +1,12 @@
1#ifndef _ASM_KMAP_TYPES_H 1#ifndef _ASM_KMAP_TYPES_H
2#define _ASM_KMAP_TYPES_H 2#define _ASM_KMAP_TYPES_H
3 3
4
5#ifdef CONFIG_DEBUG_HIGHMEM 4#ifdef CONFIG_DEBUG_HIGHMEM
6# define D(n) __KM_FENCE_##n , 5#define __WITH_KM_FENCE
7#else
8# define D(n)
9#endif 6#endif
10 7
11enum km_type { 8#include <asm-generic/kmap_types.h>
12D(0) KM_BOUNCE_READ,
13D(1) KM_SKB_SUNRPC_DATA,
14D(2) KM_SKB_DATA_SOFTIRQ,
15D(3) KM_USER0,
16D(4) KM_USER1,
17D(5) KM_BIO_SRC_IRQ,
18D(6) KM_BIO_DST_IRQ,
19D(7) KM_PTE0,
20D(8) KM_PTE1,
21D(9) KM_IRQ0,
22D(10) KM_IRQ1,
23D(11) KM_SOFTIRQ0,
24D(12) KM_SOFTIRQ1,
25D(13) KM_TYPE_NR
26};
27 9
28#undef D 10#undef __WITH_KM_FENCE
29 11
30#endif 12#endif
diff --git a/arch/parisc/kernel/init_task.c b/arch/parisc/kernel/init_task.c
index 1e25a45d64c1..82974b20fc10 100644
--- a/arch/parisc/kernel/init_task.c
+++ b/arch/parisc/kernel/init_task.c
@@ -36,10 +36,6 @@
36 36
37static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 37static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
38static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 38static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
39struct mm_struct init_mm = INIT_MM(init_mm);
40
41EXPORT_SYMBOL(init_mm);
42
43/* 39/*
44 * Initial task structure. 40 * Initial task structure.
45 * 41 *
diff --git a/arch/powerpc/include/asm/8253pit.h b/arch/powerpc/include/asm/8253pit.h
index b70d6e53b303..a71c9c1455a7 100644
--- a/arch/powerpc/include/asm/8253pit.h
+++ b/arch/powerpc/include/asm/8253pit.h
@@ -1,10 +1,3 @@
1#ifndef _ASM_POWERPC_8253PIT_H
2#define _ASM_POWERPC_8253PIT_H
3
4/* 1/*
5 * 8253/8254 Programmable Interval Timer 2 * 8253/8254 Programmable Interval Timer
6 */ 3 */
7
8#define PIT_TICK_RATE 1193182UL
9
10#endif /* _ASM_POWERPC_8253PIT_H */
diff --git a/arch/powerpc/kernel/init_task.c b/arch/powerpc/kernel/init_task.c
index 688b329800bd..ffc4253fef55 100644
--- a/arch/powerpc/kernel/init_task.c
+++ b/arch/powerpc/kernel/init_task.c
@@ -9,10 +9,6 @@
9 9
10static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 10static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
11static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 11static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
12struct mm_struct init_mm = INIT_MM(init_mm);
13
14EXPORT_SYMBOL(init_mm);
15
16/* 12/*
17 * Initial thread structure. 13 * Initial thread structure.
18 * 14 *
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 2f0e64b53642..ef6f64950e9b 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -44,10 +44,7 @@
44#include <asm/sections.h> 44#include <asm/sections.h>
45#include <asm/machdep.h> 45#include <asm/machdep.h>
46 46
47#ifdef CONFIG_LOGO_LINUX_CLUT224
48#include <linux/linux_logo.h> 47#include <linux/linux_logo.h>
49extern const struct linux_logo logo_linux_clut224;
50#endif
51 48
52/* 49/*
53 * Properties whose value is longer than this get excluded from our 50 * Properties whose value is longer than this get excluded from our
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c
index 296b5268754e..5e0a191764fc 100644
--- a/arch/powerpc/platforms/cell/ras.c
+++ b/arch/powerpc/platforms/cell/ras.c
@@ -122,8 +122,8 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order)
122 122
123 area->nid = nid; 123 area->nid = nid;
124 area->order = order; 124 area->order = order;
125 area->pages = alloc_pages_node(area->nid, GFP_KERNEL | GFP_THISNODE, 125 area->pages = alloc_pages_exact_node(area->nid, GFP_KERNEL|GFP_THISNODE,
126 area->order); 126 area->order);
127 127
128 if (!area->pages) { 128 if (!area->pages) {
129 printk(KERN_WARNING "%s: no page on node %d\n", 129 printk(KERN_WARNING "%s: no page on node %d\n",
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 9abd210d87c1..8547e86bfb42 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -752,17 +752,8 @@ static int __init init_spu_base(void)
752 goto out_unregister_sysdev_class; 752 goto out_unregister_sysdev_class;
753 } 753 }
754 754
755 if (ret > 0) { 755 if (ret > 0)
756 /*
757 * We cannot put the forward declaration in
758 * <linux/linux_logo.h> because of conflicting session type
759 * conflicts for const and __initdata with different compiler
760 * versions
761 */
762 extern const struct linux_logo logo_spe_clut224;
763
764 fb_append_extra_logo(&logo_spe_clut224, ret); 756 fb_append_extra_logo(&logo_spe_clut224, ret);
765 }
766 757
767 mutex_lock(&spu_full_list_mutex); 758 mutex_lock(&spu_full_list_mutex);
768 xmon_register_spus(&spu_full_list); 759 xmon_register_spus(&spu_full_list);
diff --git a/arch/s390/include/asm/kmap_types.h b/arch/s390/include/asm/kmap_types.h
index fd1574648223..94ec3ee07983 100644
--- a/arch/s390/include/asm/kmap_types.h
+++ b/arch/s390/include/asm/kmap_types.h
@@ -2,22 +2,7 @@
2#ifndef _ASM_KMAP_TYPES_H 2#ifndef _ASM_KMAP_TYPES_H
3#define _ASM_KMAP_TYPES_H 3#define _ASM_KMAP_TYPES_H
4 4
5enum km_type { 5#include <asm-generic/kmap_types.h>
6 KM_BOUNCE_READ,
7 KM_SKB_SUNRPC_DATA,
8 KM_SKB_DATA_SOFTIRQ,
9 KM_USER0,
10 KM_USER1,
11 KM_BIO_SRC_IRQ,
12 KM_BIO_DST_IRQ,
13 KM_PTE0,
14 KM_PTE1,
15 KM_IRQ0,
16 KM_IRQ1,
17 KM_SOFTIRQ0,
18 KM_SOFTIRQ1,
19 KM_TYPE_NR
20};
21 6
22#endif 7#endif
23#endif /* __KERNEL__ */ 8#endif /* __KERNEL__ */
diff --git a/arch/s390/kernel/init_task.c b/arch/s390/kernel/init_task.c
index 7db95c0b8693..fe787f9e5f3f 100644
--- a/arch/s390/kernel/init_task.c
+++ b/arch/s390/kernel/init_task.c
@@ -18,10 +18,6 @@
18 18
19static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 19static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
20static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 20static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
21struct mm_struct init_mm = INIT_MM(init_mm);
22
23EXPORT_SYMBOL(init_mm);
24
25/* 21/*
26 * Initial thread structure. 22 * Initial thread structure.
27 * 23 *
diff --git a/arch/sh/include/asm/kmap_types.h b/arch/sh/include/asm/kmap_types.h
index 84d565c696be..5962b08b6dd8 100644
--- a/arch/sh/include/asm/kmap_types.h
+++ b/arch/sh/include/asm/kmap_types.h
@@ -3,30 +3,12 @@
3 3
4/* Dummy header just to define km_type. */ 4/* Dummy header just to define km_type. */
5 5
6
7#ifdef CONFIG_DEBUG_HIGHMEM 6#ifdef CONFIG_DEBUG_HIGHMEM
8# define D(n) __KM_FENCE_##n , 7#define __WITH_KM_FENCE
9#else
10# define D(n)
11#endif 8#endif
12 9
13enum km_type { 10#include <asm-generic/kmap_types.h>
14D(0) KM_BOUNCE_READ,
15D(1) KM_SKB_SUNRPC_DATA,
16D(2) KM_SKB_DATA_SOFTIRQ,
17D(3) KM_USER0,
18D(4) KM_USER1,
19D(5) KM_BIO_SRC_IRQ,
20D(6) KM_BIO_DST_IRQ,
21D(7) KM_PTE0,
22D(8) KM_PTE1,
23D(9) KM_IRQ0,
24D(10) KM_IRQ1,
25D(11) KM_SOFTIRQ0,
26D(12) KM_SOFTIRQ1,
27D(13) KM_TYPE_NR
28};
29 11
30#undef D 12#undef __WITH_KM_FENCE
31 13
32#endif 14#endif
diff --git a/arch/sh/kernel/init_task.c b/arch/sh/kernel/init_task.c
index 80c35ff71d56..1719957c0a69 100644
--- a/arch/sh/kernel/init_task.c
+++ b/arch/sh/kernel/init_task.c
@@ -10,9 +10,6 @@
10static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 10static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
11static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 11static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
12struct pt_regs fake_swapper_regs; 12struct pt_regs fake_swapper_regs;
13struct mm_struct init_mm = INIT_MM(init_mm);
14EXPORT_SYMBOL(init_mm);
15
16/* 13/*
17 * Initial thread structure. 14 * Initial thread structure.
18 * 15 *
diff --git a/arch/sparc/include/asm/kmap_types.h b/arch/sparc/include/asm/kmap_types.h
index 602f5e034f7a..aad21745fbb9 100644
--- a/arch/sparc/include/asm/kmap_types.h
+++ b/arch/sparc/include/asm/kmap_types.h
@@ -5,21 +5,6 @@
5 * is actually used on sparc. -DaveM 5 * is actually used on sparc. -DaveM
6 */ 6 */
7 7
8enum km_type { 8#include <asm-generic/kmap_types.h>
9 KM_BOUNCE_READ,
10 KM_SKB_SUNRPC_DATA,
11 KM_SKB_DATA_SOFTIRQ,
12 KM_USER0,
13 KM_USER1,
14 KM_BIO_SRC_IRQ,
15 KM_BIO_DST_IRQ,
16 KM_PTE0,
17 KM_PTE1,
18 KM_IRQ0,
19 KM_IRQ1,
20 KM_SOFTIRQ0,
21 KM_SOFTIRQ1,
22 KM_TYPE_NR
23};
24 9
25#endif 10#endif
diff --git a/arch/sparc/kernel/init_task.c b/arch/sparc/kernel/init_task.c
index f28cb8278e98..28125c5b3d3c 100644
--- a/arch/sparc/kernel/init_task.c
+++ b/arch/sparc/kernel/init_task.c
@@ -10,10 +10,7 @@
10 10
11static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 11static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
12static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 12static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
13struct mm_struct init_mm = INIT_MM(init_mm);
14struct task_struct init_task = INIT_TASK(init_task); 13struct task_struct init_task = INIT_TASK(init_task);
15
16EXPORT_SYMBOL(init_mm);
17EXPORT_SYMBOL(init_task); 14EXPORT_SYMBOL(init_task);
18 15
19/* .text section in head.S is aligned at 8k boundary and this gets linked 16/* .text section in head.S is aligned at 8k boundary and this gets linked
diff --git a/arch/um/include/shared/init.h b/arch/um/include/shared/init.h
index 37dd097c16c0..b3906f860a87 100644
--- a/arch/um/include/shared/init.h
+++ b/arch/um/include/shared/init.h
@@ -27,7 +27,7 @@
27 * sign followed by value, e.g.: 27 * sign followed by value, e.g.:
28 * 28 *
29 * static int init_variable __initdata = 0; 29 * static int init_variable __initdata = 0;
30 * static char linux_logo[] __initdata = { 0x32, 0x36, ... }; 30 * static const char linux_logo[] __initconst = { 0x32, 0x36, ... };
31 * 31 *
32 * Don't forget to initialize data not at file scope, i.e. within a function, 32 * Don't forget to initialize data not at file scope, i.e. within a function,
33 * as gcc otherwise puts the data into the bss section and not into the init 33 * as gcc otherwise puts the data into the bss section and not into the init
diff --git a/arch/um/include/shared/net_user.h b/arch/um/include/shared/net_user.h
index 63bee158cd8e..3dabbe128e40 100644
--- a/arch/um/include/shared/net_user.h
+++ b/arch/um/include/shared/net_user.h
@@ -8,7 +8,7 @@
8 8
9#define ETH_ADDR_LEN (6) 9#define ETH_ADDR_LEN (6)
10#define ETH_HEADER_ETHERTAP (16) 10#define ETH_HEADER_ETHERTAP (16)
11#define ETH_HEADER_OTHER (14) 11#define ETH_HEADER_OTHER (26) /* 14 for ethernet + VLAN + MPLS for crazy people */
12#define ETH_MAX_PACKET (1500) 12#define ETH_MAX_PACKET (1500)
13 13
14#define UML_NET_VERSION (4) 14#define UML_NET_VERSION (4)
diff --git a/arch/um/kernel/init_task.c b/arch/um/kernel/init_task.c
index 806d381947bf..b25121b537d8 100644
--- a/arch/um/kernel/init_task.c
+++ b/arch/um/kernel/init_task.c
@@ -10,11 +10,8 @@
10#include "linux/mqueue.h" 10#include "linux/mqueue.h"
11#include "asm/uaccess.h" 11#include "asm/uaccess.h"
12 12
13struct mm_struct init_mm = INIT_MM(init_mm);
14static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 13static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
15static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 14static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
16EXPORT_SYMBOL(init_mm);
17
18/* 15/*
19 * Initial task structure. 16 * Initial task structure.
20 * 17 *
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index 336b61569072..454cdb43e351 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -358,7 +358,7 @@ EXPORT_SYMBOL(um_request_irq);
358EXPORT_SYMBOL(reactivate_fd); 358EXPORT_SYMBOL(reactivate_fd);
359 359
360/* 360/*
361 * hw_interrupt_type must define (startup || enable) && 361 * irq_chip must define (startup || enable) &&
362 * (shutdown || disable) && end 362 * (shutdown || disable) && end
363 */ 363 */
364static void dummy(unsigned int irq) 364static void dummy(unsigned int irq)
@@ -366,7 +366,7 @@ static void dummy(unsigned int irq)
366} 366}
367 367
368/* This is used for everything else than the timer. */ 368/* This is used for everything else than the timer. */
369static struct hw_interrupt_type normal_irq_type = { 369static struct irq_chip normal_irq_type = {
370 .typename = "SIGIO", 370 .typename = "SIGIO",
371 .release = free_irq_by_irq_and_dev, 371 .release = free_irq_by_irq_and_dev,
372 .disable = dummy, 372 .disable = dummy,
@@ -375,7 +375,7 @@ static struct hw_interrupt_type normal_irq_type = {
375 .end = dummy 375 .end = dummy
376}; 376};
377 377
378static struct hw_interrupt_type SIGVTALRM_irq_type = { 378static struct irq_chip SIGVTALRM_irq_type = {
379 .typename = "SIGVTALRM", 379 .typename = "SIGVTALRM",
380 .release = free_irq_by_irq_and_dev, 380 .release = free_irq_by_irq_and_dev,
381 .shutdown = dummy, /* never called */ 381 .shutdown = dummy, /* never called */
diff --git a/arch/um/sys-i386/stub.S b/arch/um/sys-i386/stub.S
index c41b04bf5fa0..54a36ec20cb7 100644
--- a/arch/um/sys-i386/stub.S
+++ b/arch/um/sys-i386/stub.S
@@ -1,7 +1,7 @@
1#include "as-layout.h" 1#include "as-layout.h"
2 2
3 .globl syscall_stub 3 .globl syscall_stub
4.section .__syscall_stub, "x" 4.section .__syscall_stub, "ax"
5 5
6 .globl batch_syscall_stub 6 .globl batch_syscall_stub
7batch_syscall_stub: 7batch_syscall_stub:
diff --git a/arch/um/sys-x86_64/asm/elf.h b/arch/um/sys-x86_64/asm/elf.h
index 6e8a9195e952..04b9e87c8dad 100644
--- a/arch/um/sys-x86_64/asm/elf.h
+++ b/arch/um/sys-x86_64/asm/elf.h
@@ -66,28 +66,28 @@ typedef struct user_i387_struct elf_fpregset_t;
66 PT_REGS_R15(regs) = 0; \ 66 PT_REGS_R15(regs) = 0; \
67} while (0) 67} while (0)
68 68
69#define ELF_CORE_COPY_REGS(pr_reg, regs) \ 69#define ELF_CORE_COPY_REGS(pr_reg, _regs) \
70 (pr_reg)[0] = (regs)->regs.gp[0]; \ 70 (pr_reg)[0] = (_regs)->regs.gp[0]; \
71 (pr_reg)[1] = (regs)->regs.gp[1]; \ 71 (pr_reg)[1] = (_regs)->regs.gp[1]; \
72 (pr_reg)[2] = (regs)->regs.gp[2]; \ 72 (pr_reg)[2] = (_regs)->regs.gp[2]; \
73 (pr_reg)[3] = (regs)->regs.gp[3]; \ 73 (pr_reg)[3] = (_regs)->regs.gp[3]; \
74 (pr_reg)[4] = (regs)->regs.gp[4]; \ 74 (pr_reg)[4] = (_regs)->regs.gp[4]; \
75 (pr_reg)[5] = (regs)->regs.gp[5]; \ 75 (pr_reg)[5] = (_regs)->regs.gp[5]; \
76 (pr_reg)[6] = (regs)->regs.gp[6]; \ 76 (pr_reg)[6] = (_regs)->regs.gp[6]; \
77 (pr_reg)[7] = (regs)->regs.gp[7]; \ 77 (pr_reg)[7] = (_regs)->regs.gp[7]; \
78 (pr_reg)[8] = (regs)->regs.gp[8]; \ 78 (pr_reg)[8] = (_regs)->regs.gp[8]; \
79 (pr_reg)[9] = (regs)->regs.gp[9]; \ 79 (pr_reg)[9] = (_regs)->regs.gp[9]; \
80 (pr_reg)[10] = (regs)->regs.gp[10]; \ 80 (pr_reg)[10] = (_regs)->regs.gp[10]; \
81 (pr_reg)[11] = (regs)->regs.gp[11]; \ 81 (pr_reg)[11] = (_regs)->regs.gp[11]; \
82 (pr_reg)[12] = (regs)->regs.gp[12]; \ 82 (pr_reg)[12] = (_regs)->regs.gp[12]; \
83 (pr_reg)[13] = (regs)->regs.gp[13]; \ 83 (pr_reg)[13] = (_regs)->regs.gp[13]; \
84 (pr_reg)[14] = (regs)->regs.gp[14]; \ 84 (pr_reg)[14] = (_regs)->regs.gp[14]; \
85 (pr_reg)[15] = (regs)->regs.gp[15]; \ 85 (pr_reg)[15] = (_regs)->regs.gp[15]; \
86 (pr_reg)[16] = (regs)->regs.gp[16]; \ 86 (pr_reg)[16] = (_regs)->regs.gp[16]; \
87 (pr_reg)[17] = (regs)->regs.gp[17]; \ 87 (pr_reg)[17] = (_regs)->regs.gp[17]; \
88 (pr_reg)[18] = (regs)->regs.gp[18]; \ 88 (pr_reg)[18] = (_regs)->regs.gp[18]; \
89 (pr_reg)[19] = (regs)->regs.gp[19]; \ 89 (pr_reg)[19] = (_regs)->regs.gp[19]; \
90 (pr_reg)[20] = (regs)->regs.gp[20]; \ 90 (pr_reg)[20] = (_regs)->regs.gp[20]; \
91 (pr_reg)[21] = current->thread.arch.fs; \ 91 (pr_reg)[21] = current->thread.arch.fs; \
92 (pr_reg)[22] = 0; \ 92 (pr_reg)[22] = 0; \
93 (pr_reg)[23] = 0; \ 93 (pr_reg)[23] = 0; \
diff --git a/arch/um/sys-x86_64/stub.S b/arch/um/sys-x86_64/stub.S
index 6d9edf9fabce..20e4a96a6dcb 100644
--- a/arch/um/sys-x86_64/stub.S
+++ b/arch/um/sys-x86_64/stub.S
@@ -1,7 +1,7 @@
1#include "as-layout.h" 1#include "as-layout.h"
2 2
3 .globl syscall_stub 3 .globl syscall_stub
4.section .__syscall_stub, "x" 4.section .__syscall_stub, "ax"
5syscall_stub: 5syscall_stub:
6 syscall 6 syscall
7 /* We don't have 64-bit constants, so this constructs the address 7 /* We don't have 64-bit constants, so this constructs the address
diff --git a/arch/x86/include/asm/kmap_types.h b/arch/x86/include/asm/kmap_types.h
index 5759c165a5cf..9e00a731a7fb 100644
--- a/arch/x86/include/asm/kmap_types.h
+++ b/arch/x86/include/asm/kmap_types.h
@@ -2,28 +2,11 @@
2#define _ASM_X86_KMAP_TYPES_H 2#define _ASM_X86_KMAP_TYPES_H
3 3
4#if defined(CONFIG_X86_32) && defined(CONFIG_DEBUG_HIGHMEM) 4#if defined(CONFIG_X86_32) && defined(CONFIG_DEBUG_HIGHMEM)
5# define D(n) __KM_FENCE_##n , 5#define __WITH_KM_FENCE
6#else
7# define D(n)
8#endif 6#endif
9 7
10enum km_type { 8#include <asm-generic/kmap_types.h>
11D(0) KM_BOUNCE_READ,
12D(1) KM_SKB_SUNRPC_DATA,
13D(2) KM_SKB_DATA_SOFTIRQ,
14D(3) KM_USER0,
15D(4) KM_USER1,
16D(5) KM_BIO_SRC_IRQ,
17D(6) KM_BIO_DST_IRQ,
18D(7) KM_PTE0,
19D(8) KM_PTE1,
20D(9) KM_IRQ0,
21D(10) KM_IRQ1,
22D(11) KM_SOFTIRQ0,
23D(12) KM_SOFTIRQ1,
24D(13) KM_TYPE_NR
25};
26 9
27#undef D 10#undef __WITH_KM_FENCE
28 11
29#endif /* _ASM_X86_KMAP_TYPES_H */ 12#endif /* _ASM_X86_KMAP_TYPES_H */
diff --git a/arch/x86/include/asm/timex.h b/arch/x86/include/asm/timex.h
index b5c9d45c981f..1375cfc93960 100644
--- a/arch/x86/include/asm/timex.h
+++ b/arch/x86/include/asm/timex.h
@@ -4,9 +4,7 @@
4#include <asm/processor.h> 4#include <asm/processor.h>
5#include <asm/tsc.h> 5#include <asm/tsc.h>
6 6
7/* The PIT ticks at this frequency (in HZ): */ 7/* Assume we use the PIT time source for the clock tick */
8#define PIT_TICK_RATE 1193182
9
10#define CLOCK_TICK_RATE PIT_TICK_RATE 8#define CLOCK_TICK_RATE PIT_TICK_RATE
11 9
12#define ARCH_HAS_READ_CURRENT_TIMER 10#define ARCH_HAS_READ_CURRENT_TIMER
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 3ffdcfa9abdf..9fa33886c0d7 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -487,7 +487,6 @@ out:
487static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) 487static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
488{ 488{
489 char *v = c->x86_vendor_id; 489 char *v = c->x86_vendor_id;
490 static int printed;
491 int i; 490 int i;
492 491
493 for (i = 0; i < X86_VENDOR_NUM; i++) { 492 for (i = 0; i < X86_VENDOR_NUM; i++) {
@@ -504,13 +503,9 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
504 } 503 }
505 } 504 }
506 505
507 if (!printed) { 506 printk_once(KERN_ERR
508 printed++; 507 "CPU: vendor_id '%s' unknown, using generic init.\n" \
509 printk(KERN_ERR 508 "CPU: Your system may be unstable.\n", v);
510 "CPU: vendor_id '%s' unknown, using generic init.\n", v);
511
512 printk(KERN_ERR "CPU: Your system may be unstable.\n");
513 }
514 509
515 c->x86_vendor = X86_VENDOR_UNKNOWN; 510 c->x86_vendor = X86_VENDOR_UNKNOWN;
516 this_cpu = &default_cpu; 511 this_cpu = &default_cpu;
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index c2e0bb0890d4..5cf36c053ac4 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -7,6 +7,7 @@
7#include <linux/spinlock.h> 7#include <linux/spinlock.h>
8#include <linux/jiffies.h> 8#include <linux/jiffies.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/timex.h>
10#include <linux/delay.h> 11#include <linux/delay.h>
11#include <linux/init.h> 12#include <linux/init.h>
12#include <linux/io.h> 13#include <linux/io.h>
diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
index df3bf269beab..270ff83efc11 100644
--- a/arch/x86/kernel/init_task.c
+++ b/arch/x86/kernel/init_task.c
@@ -12,7 +12,6 @@
12 12
13static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 13static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
14static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 14static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
15struct mm_struct init_mm = INIT_MM(init_mm);
16 15
17/* 16/*
18 * Initial thread structure. 17 * Initial thread structure.
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 3e1c057e98fe..ae3180c506a6 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -9,6 +9,7 @@
9#include <linux/delay.h> 9#include <linux/delay.h>
10#include <linux/clocksource.h> 10#include <linux/clocksource.h>
11#include <linux/percpu.h> 11#include <linux/percpu.h>
12#include <linux/timex.h>
12 13
13#include <asm/hpet.h> 14#include <asm/hpet.h>
14#include <asm/timer.h> 15#include <asm/timer.h>
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 32d6ae8fb60e..e770bf349ec4 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1277,7 +1277,7 @@ static struct vmcs *alloc_vmcs_cpu(int cpu)
1277 struct page *pages; 1277 struct page *pages;
1278 struct vmcs *vmcs; 1278 struct vmcs *vmcs;
1279 1279
1280 pages = alloc_pages_node(node, GFP_KERNEL, vmcs_config.order); 1280 pages = alloc_pages_exact_node(node, GFP_KERNEL, vmcs_config.order);
1281 if (!pages) 1281 if (!pages)
1282 return NULL; 1282 return NULL;
1283 vmcs = page_address(pages); 1283 vmcs = page_address(pages);
diff --git a/arch/xtensa/include/asm/kmap_types.h b/arch/xtensa/include/asm/kmap_types.h
index 9e822d2e3bce..11c687e527f1 100644
--- a/arch/xtensa/include/asm/kmap_types.h
+++ b/arch/xtensa/include/asm/kmap_types.h
@@ -1,31 +1,6 @@
1/*
2 * include/asm-xtensa/kmap_types.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001 - 2005 Tensilica Inc.
9 */
10
11#ifndef _XTENSA_KMAP_TYPES_H 1#ifndef _XTENSA_KMAP_TYPES_H
12#define _XTENSA_KMAP_TYPES_H 2#define _XTENSA_KMAP_TYPES_H
13 3
14enum km_type { 4#include <asm-generic/kmap_types.h>
15 KM_BOUNCE_READ,
16 KM_SKB_SUNRPC_DATA,
17 KM_SKB_DATA_SOFTIRQ,
18 KM_USER0,
19 KM_USER1,
20 KM_BIO_SRC_IRQ,
21 KM_BIO_DST_IRQ,
22 KM_PTE0,
23 KM_PTE1,
24 KM_IRQ0,
25 KM_IRQ1,
26 KM_SOFTIRQ0,
27 KM_SOFTIRQ1,
28 KM_TYPE_NR
29};
30 5
31#endif /* _XTENSA_KMAP_TYPES_H */ 6#endif /* _XTENSA_KMAP_TYPES_H */
diff --git a/arch/xtensa/kernel/init_task.c b/arch/xtensa/kernel/init_task.c
index e07f5c9fcd35..c4302f0e4ba0 100644
--- a/arch/xtensa/kernel/init_task.c
+++ b/arch/xtensa/kernel/init_task.c
@@ -23,10 +23,6 @@
23 23
24static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 24static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
25static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 25static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
26struct mm_struct init_mm = INIT_MM(init_mm);
27
28EXPORT_SYMBOL(init_mm);
29
30union thread_union init_thread_union 26union thread_union init_thread_union
31 __attribute__((__section__(".data.init_task"))) = 27 __attribute__((__section__(".data.init_task"))) =
32{ INIT_THREAD_INFO(init_task) }; 28{ INIT_THREAD_INFO(init_task) };
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 00cf9553f740..a442c8f29fc1 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -104,6 +104,8 @@ source "drivers/auxdisplay/Kconfig"
104 104
105source "drivers/uio/Kconfig" 105source "drivers/uio/Kconfig"
106 106
107source "drivers/vlynq/Kconfig"
108
107source "drivers/xen/Kconfig" 109source "drivers/xen/Kconfig"
108 110
109source "drivers/staging/Kconfig" 111source "drivers/staging/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 9e7d4e56c85b..00b44f4ccf03 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -105,6 +105,7 @@ obj-$(CONFIG_PPC_PS3) += ps3/
105obj-$(CONFIG_OF) += of/ 105obj-$(CONFIG_OF) += of/
106obj-$(CONFIG_SSB) += ssb/ 106obj-$(CONFIG_SSB) += ssb/
107obj-$(CONFIG_VIRTIO) += virtio/ 107obj-$(CONFIG_VIRTIO) += virtio/
108obj-$(CONFIG_VLYNQ) += vlynq/
108obj-$(CONFIG_STAGING) += staging/ 109obj-$(CONFIG_STAGING) += staging/
109obj-y += platform/ 110obj-y += platform/
110obj-y += ieee802154/ 111obj-y += ieee802154/
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 40b809742a1c..91d4087b4039 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -72,10 +72,8 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
72 "Node %d Inactive(anon): %8lu kB\n" 72 "Node %d Inactive(anon): %8lu kB\n"
73 "Node %d Active(file): %8lu kB\n" 73 "Node %d Active(file): %8lu kB\n"
74 "Node %d Inactive(file): %8lu kB\n" 74 "Node %d Inactive(file): %8lu kB\n"
75#ifdef CONFIG_UNEVICTABLE_LRU
76 "Node %d Unevictable: %8lu kB\n" 75 "Node %d Unevictable: %8lu kB\n"
77 "Node %d Mlocked: %8lu kB\n" 76 "Node %d Mlocked: %8lu kB\n"
78#endif
79#ifdef CONFIG_HIGHMEM 77#ifdef CONFIG_HIGHMEM
80 "Node %d HighTotal: %8lu kB\n" 78 "Node %d HighTotal: %8lu kB\n"
81 "Node %d HighFree: %8lu kB\n" 79 "Node %d HighFree: %8lu kB\n"
@@ -105,10 +103,8 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
105 nid, K(node_page_state(nid, NR_INACTIVE_ANON)), 103 nid, K(node_page_state(nid, NR_INACTIVE_ANON)),
106 nid, K(node_page_state(nid, NR_ACTIVE_FILE)), 104 nid, K(node_page_state(nid, NR_ACTIVE_FILE)),
107 nid, K(node_page_state(nid, NR_INACTIVE_FILE)), 105 nid, K(node_page_state(nid, NR_INACTIVE_FILE)),
108#ifdef CONFIG_UNEVICTABLE_LRU
109 nid, K(node_page_state(nid, NR_UNEVICTABLE)), 106 nid, K(node_page_state(nid, NR_UNEVICTABLE)),
110 nid, K(node_page_state(nid, NR_MLOCK)), 107 nid, K(node_page_state(nid, NR_MLOCK)),
111#endif
112#ifdef CONFIG_HIGHMEM 108#ifdef CONFIG_HIGHMEM
113 nid, K(i.totalhigh), 109 nid, K(i.totalhigh),
114 nid, K(i.freehigh), 110 nid, K(i.freehigh),
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index c796a86ab7f3..d9113b4c76e3 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -171,8 +171,9 @@ int do_poke_blanked_console;
171int console_blanked; 171int console_blanked;
172 172
173static int vesa_blank_mode; /* 0:none 1:suspendV 2:suspendH 3:powerdown */ 173static int vesa_blank_mode; /* 0:none 1:suspendV 2:suspendH 3:powerdown */
174static int blankinterval = 10*60*HZ;
175static int vesa_off_interval; 174static int vesa_off_interval;
175static int blankinterval = 10*60;
176core_param(consoleblank, blankinterval, int, 0444);
176 177
177static DECLARE_WORK(console_work, console_callback); 178static DECLARE_WORK(console_work, console_callback);
178 179
@@ -1485,7 +1486,7 @@ static void setterm_command(struct vc_data *vc)
1485 update_attr(vc); 1486 update_attr(vc);
1486 break; 1487 break;
1487 case 9: /* set blanking interval */ 1488 case 9: /* set blanking interval */
1488 blankinterval = ((vc->vc_par[1] < 60) ? vc->vc_par[1] : 60) * 60 * HZ; 1489 blankinterval = ((vc->vc_par[1] < 60) ? vc->vc_par[1] : 60) * 60;
1489 poke_blanked_console(); 1490 poke_blanked_console();
1490 break; 1491 break;
1491 case 10: /* set bell frequency in Hz */ 1492 case 10: /* set bell frequency in Hz */
@@ -2871,7 +2872,7 @@ static int __init con_init(void)
2871 2872
2872 if (blankinterval) { 2873 if (blankinterval) {
2873 blank_state = blank_normal_wait; 2874 blank_state = blank_normal_wait;
2874 mod_timer(&console_timer, jiffies + blankinterval); 2875 mod_timer(&console_timer, jiffies + (blankinterval * HZ));
2875 } 2876 }
2876 2877
2877 for (currcons = 0; currcons < MIN_NR_CONSOLES; currcons++) { 2878 for (currcons = 0; currcons < MIN_NR_CONSOLES; currcons++) {
@@ -3677,7 +3678,7 @@ void do_unblank_screen(int leaving_gfx)
3677 return; /* but leave console_blanked != 0 */ 3678 return; /* but leave console_blanked != 0 */
3678 3679
3679 if (blankinterval) { 3680 if (blankinterval) {
3680 mod_timer(&console_timer, jiffies + blankinterval); 3681 mod_timer(&console_timer, jiffies + (blankinterval * HZ));
3681 blank_state = blank_normal_wait; 3682 blank_state = blank_normal_wait;
3682 } 3683 }
3683 3684
@@ -3711,7 +3712,7 @@ void unblank_screen(void)
3711static void blank_screen_t(unsigned long dummy) 3712static void blank_screen_t(unsigned long dummy)
3712{ 3713{
3713 if (unlikely(!keventd_up())) { 3714 if (unlikely(!keventd_up())) {
3714 mod_timer(&console_timer, jiffies + blankinterval); 3715 mod_timer(&console_timer, jiffies + (blankinterval * HZ));
3715 return; 3716 return;
3716 } 3717 }
3717 blank_timer_expired = 1; 3718 blank_timer_expired = 1;
@@ -3741,7 +3742,7 @@ void poke_blanked_console(void)
3741 if (console_blanked) 3742 if (console_blanked)
3742 unblank_screen(); 3743 unblank_screen();
3743 else if (blankinterval) { 3744 else if (blankinterval) {
3744 mod_timer(&console_timer, jiffies + blankinterval); 3745 mod_timer(&console_timer, jiffies + (blankinterval * HZ));
3745 blank_state = blank_normal_wait; 3746 blank_state = blank_normal_wait;
3746 } 3747 }
3747} 3748}
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index 40bd8c61c7d7..72a633a6ec98 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -18,6 +18,7 @@
18 18
19#include <linux/acpi_pmtmr.h> 19#include <linux/acpi_pmtmr.h>
20#include <linux/clocksource.h> 20#include <linux/clocksource.h>
21#include <linux/timex.h>
21#include <linux/errno.h> 22#include <linux/errno.h>
22#include <linux/init.h> 23#include <linux/init.h>
23#include <linux/pci.h> 24#include <linux/pci.h>
diff --git a/drivers/eisa/eisa.ids b/drivers/eisa/eisa.ids
index ed69837d8b74..6cbb7a514436 100644
--- a/drivers/eisa/eisa.ids
+++ b/drivers/eisa/eisa.ids
@@ -1140,6 +1140,11 @@ NON0301 "c't Universale Graphic Adapter"
1140NON0401 "c't Universal Ethernet Adapter" 1140NON0401 "c't Universal Ethernet Adapter"
1141NON0501 "c't Universal 16-Bit Sound Adapter" 1141NON0501 "c't Universal 16-Bit Sound Adapter"
1142NON0601 "c't Universal 8-Bit Adapter" 1142NON0601 "c't Universal 8-Bit Adapter"
1143NPI0120 "Network Peripherals NP-EISA-1 FDDI Interface"
1144NPI0221 "Network Peripherals NP-EISA-2 FDDI Interface"
1145NPI0223 "Network Peripherals NP-EISA-2E Enhanced FDDI Interface"
1146NPI0301 "Network Peripherals NP-EISA-3 FDDI Interface"
1147NPI0303 "Network Peripherals NP-EISA-3E Enhanced FDDI Interface"
1143NSS0011 "Newport Systems Solutions WNIC Adapter" 1148NSS0011 "Newport Systems Solutions WNIC Adapter"
1144NVL0701 "Novell NE3200 Bus Master Ethernet" 1149NVL0701 "Novell NE3200 Bus Master Ethernet"
1145NVL0702 "Novell NE3200T Bus Master Ethernet" 1150NVL0702 "Novell NE3200T Bus Master Ethernet"
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index 05aa2d406ac6..d5ea8a68d338 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -31,8 +31,12 @@
31 * information is necessary as for the resource tree. 31 * information is necessary as for the resource tree.
32 */ 32 */
33struct firmware_map_entry { 33struct firmware_map_entry {
34 resource_size_t start; /* start of the memory range */ 34 /*
35 resource_size_t end; /* end of the memory range (incl.) */ 35 * start and end must be u64 rather than resource_size_t, because e820
36 * resources can lie at addresses above 4G.
37 */
38 u64 start; /* start of the memory range */
39 u64 end; /* end of the memory range (incl.) */
36 const char *type; /* type of the memory range */ 40 const char *type; /* type of the memory range */
37 struct list_head list; /* entry for the linked list */ 41 struct list_head list; /* entry for the linked list */
38 struct kobject kobj; /* kobject for each entry */ 42 struct kobject kobj; /* kobject for each entry */
@@ -101,7 +105,7 @@ static LIST_HEAD(map_entries);
101 * Common implementation of firmware_map_add() and firmware_map_add_early() 105 * Common implementation of firmware_map_add() and firmware_map_add_early()
102 * which expects a pre-allocated struct firmware_map_entry. 106 * which expects a pre-allocated struct firmware_map_entry.
103 **/ 107 **/
104static int firmware_map_add_entry(resource_size_t start, resource_size_t end, 108static int firmware_map_add_entry(u64 start, u64 end,
105 const char *type, 109 const char *type,
106 struct firmware_map_entry *entry) 110 struct firmware_map_entry *entry)
107{ 111{
@@ -132,8 +136,7 @@ static int firmware_map_add_entry(resource_size_t start, resource_size_t end,
132 * 136 *
133 * Returns 0 on success, or -ENOMEM if no memory could be allocated. 137 * Returns 0 on success, or -ENOMEM if no memory could be allocated.
134 **/ 138 **/
135int firmware_map_add(resource_size_t start, resource_size_t end, 139int firmware_map_add(u64 start, u64 end, const char *type)
136 const char *type)
137{ 140{
138 struct firmware_map_entry *entry; 141 struct firmware_map_entry *entry;
139 142
@@ -157,8 +160,7 @@ int firmware_map_add(resource_size_t start, resource_size_t end,
157 * 160 *
158 * Returns 0 on success, or -ENOMEM if no memory could be allocated. 161 * Returns 0 on success, or -ENOMEM if no memory could be allocated.
159 **/ 162 **/
160int __init firmware_map_add_early(resource_size_t start, resource_size_t end, 163int __init firmware_map_add_early(u64 start, u64 end, const char *type)
161 const char *type)
162{ 164{
163 struct firmware_map_entry *entry; 165 struct firmware_map_entry *entry;
164 166
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 0ecf6b76a401..8e28e5993df5 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -504,6 +504,14 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
504 info->fbops = &intelfb_ops; 504 info->fbops = &intelfb_ops;
505 505
506 info->fix.line_length = fb->pitch; 506 info->fix.line_length = fb->pitch;
507
508 /* setup aperture base/size for vesafb takeover */
509 info->aperture_base = dev->mode_config.fb_base;
510 if (IS_I9XX(dev))
511 info->aperture_size = pci_resource_len(dev->pdev, 2);
512 else
513 info->aperture_size = pci_resource_len(dev->pdev, 0);
514
507 info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset; 515 info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset;
508 info->fix.smem_len = size; 516 info->fix.smem_len = size;
509 517
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index f8090e137fef..2d5016691d40 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -950,6 +950,7 @@ config SENSORS_HDAPS
950config SENSORS_LIS3LV02D 950config SENSORS_LIS3LV02D
951 tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer" 951 tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer"
952 depends on ACPI && INPUT 952 depends on ACPI && INPUT
953 select INPUT_POLLDEV
953 select NEW_LEDS 954 select NEW_LEDS
954 select LEDS_CLASS 955 select LEDS_CLASS
955 default n 956 default n
@@ -977,6 +978,7 @@ config SENSORS_LIS3LV02D
977config SENSORS_LIS3_SPI 978config SENSORS_LIS3_SPI
978 tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (SPI)" 979 tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (SPI)"
979 depends on !ACPI && SPI_MASTER && INPUT 980 depends on !ACPI && SPI_MASTER && INPUT
981 select INPUT_POLLDEV
980 default n 982 default n
981 help 983 help
982 This driver provides support for the LIS3LV02Dx accelerometer connected 984 This driver provides support for the LIS3LV02Dx accelerometer connected
diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c
index abca7e9f953b..6679854c85b0 100644
--- a/drivers/hwmon/hp_accel.c
+++ b/drivers/hwmon/hp_accel.c
@@ -27,9 +27,6 @@
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29#include <linux/interrupt.h> 29#include <linux/interrupt.h>
30#include <linux/input.h>
31#include <linux/kthread.h>
32#include <linux/semaphore.h>
33#include <linux/delay.h> 30#include <linux/delay.h>
34#include <linux/wait.h> 31#include <linux/wait.h>
35#include <linux/poll.h> 32#include <linux/poll.h>
@@ -161,6 +158,7 @@ static struct axis_conversion lis3lv02d_axis_normal = {1, 2, 3};
161static struct axis_conversion lis3lv02d_axis_y_inverted = {1, -2, 3}; 158static struct axis_conversion lis3lv02d_axis_y_inverted = {1, -2, 3};
162static struct axis_conversion lis3lv02d_axis_x_inverted = {-1, 2, 3}; 159static struct axis_conversion lis3lv02d_axis_x_inverted = {-1, 2, 3};
163static struct axis_conversion lis3lv02d_axis_z_inverted = {1, 2, -3}; 160static struct axis_conversion lis3lv02d_axis_z_inverted = {1, 2, -3};
161static struct axis_conversion lis3lv02d_axis_xy_swap = {2, 1, 3};
164static struct axis_conversion lis3lv02d_axis_xy_rotated_left = {-2, 1, 3}; 162static struct axis_conversion lis3lv02d_axis_xy_rotated_left = {-2, 1, 3};
165static struct axis_conversion lis3lv02d_axis_xy_rotated_left_usd = {-2, 1, -3}; 163static struct axis_conversion lis3lv02d_axis_xy_rotated_left_usd = {-2, 1, -3};
166static struct axis_conversion lis3lv02d_axis_xy_swap_inverted = {-2, -1, 3}; 164static struct axis_conversion lis3lv02d_axis_xy_swap_inverted = {-2, -1, 3};
@@ -194,13 +192,16 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
194 AXIS_DMI_MATCH("NX9420", "HP Compaq nx9420", x_inverted), 192 AXIS_DMI_MATCH("NX9420", "HP Compaq nx9420", x_inverted),
195 AXIS_DMI_MATCH("NW9440", "HP Compaq nw9440", x_inverted), 193 AXIS_DMI_MATCH("NW9440", "HP Compaq nw9440", x_inverted),
196 AXIS_DMI_MATCH("NC2510", "HP Compaq 2510", y_inverted), 194 AXIS_DMI_MATCH("NC2510", "HP Compaq 2510", y_inverted),
195 AXIS_DMI_MATCH("NC2710", "HP Compaq 2710", xy_swap),
197 AXIS_DMI_MATCH("NC8510", "HP Compaq 8510", xy_swap_inverted), 196 AXIS_DMI_MATCH("NC8510", "HP Compaq 8510", xy_swap_inverted),
198 AXIS_DMI_MATCH("HP2133", "HP 2133", xy_rotated_left), 197 AXIS_DMI_MATCH("HP2133", "HP 2133", xy_rotated_left),
199 AXIS_DMI_MATCH("HP2140", "HP 2140", xy_swap_inverted), 198 AXIS_DMI_MATCH("HP2140", "HP 2140", xy_swap_inverted),
200 AXIS_DMI_MATCH("NC653x", "HP Compaq 653", xy_rotated_left_usd), 199 AXIS_DMI_MATCH("NC653x", "HP Compaq 653", xy_rotated_left_usd),
201 AXIS_DMI_MATCH("NC673x", "HP Compaq 673", xy_rotated_left_usd), 200 AXIS_DMI_MATCH("NC673x", "HP Compaq 673", xy_rotated_left_usd),
202 AXIS_DMI_MATCH("NC651xx", "HP Compaq 651", xy_rotated_right), 201 AXIS_DMI_MATCH("NC651xx", "HP Compaq 651", xy_rotated_right),
203 AXIS_DMI_MATCH("NC671xx", "HP Compaq 671", xy_swap_yz_inverted), 202 AXIS_DMI_MATCH("NC6710x", "HP Compaq 6710", xy_swap_yz_inverted),
203 AXIS_DMI_MATCH("NC6715x", "HP Compaq 6715", y_inverted),
204 AXIS_DMI_MATCH("NC693xx", "HP EliteBook 693", xy_rotated_right),
204 /* Intel-based HP Pavilion dv5 */ 205 /* Intel-based HP Pavilion dv5 */
205 AXIS_DMI_MATCH2("HPDV5_I", 206 AXIS_DMI_MATCH2("HPDV5_I",
206 PRODUCT_NAME, "HP Pavilion dv5", 207 PRODUCT_NAME, "HP Pavilion dv5",
@@ -216,7 +217,6 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
216 { NULL, } 217 { NULL, }
217/* Laptop models without axis info (yet): 218/* Laptop models without axis info (yet):
218 * "NC6910" "HP Compaq 6910" 219 * "NC6910" "HP Compaq 6910"
219 * HP Compaq 8710x Notebook PC / Mobile Workstation
220 * "NC2400" "HP Compaq nc2400" 220 * "NC2400" "HP Compaq nc2400"
221 * "NX74x0" "HP Compaq nx74" 221 * "NX74x0" "HP Compaq nx74"
222 * "NX6325" "HP Compaq nx6325" 222 * "NX6325" "HP Compaq nx6325"
@@ -324,7 +324,7 @@ static int lis3lv02d_remove(struct acpi_device *device, int type)
324 flush_work(&hpled_led.work); 324 flush_work(&hpled_led.work);
325 led_classdev_unregister(&hpled_led.led_classdev); 325 led_classdev_unregister(&hpled_led.led_classdev);
326 326
327 return lis3lv02d_remove_fs(); 327 return lis3lv02d_remove_fs(&lis3_dev);
328} 328}
329 329
330 330
@@ -338,13 +338,7 @@ static int lis3lv02d_suspend(struct acpi_device *device, pm_message_t state)
338 338
339static int lis3lv02d_resume(struct acpi_device *device) 339static int lis3lv02d_resume(struct acpi_device *device)
340{ 340{
341 /* put back the device in the right state (ACPI might turn it on) */ 341 lis3lv02d_poweron(&lis3_dev);
342 mutex_lock(&lis3_dev.lock);
343 if (lis3_dev.usage > 0)
344 lis3lv02d_poweron(&lis3_dev);
345 else
346 lis3lv02d_poweroff(&lis3_dev);
347 mutex_unlock(&lis3_dev.lock);
348 return 0; 342 return 0;
349} 343}
350#else 344#else
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
index 778eb7795983..271338bdb6be 100644
--- a/drivers/hwmon/lis3lv02d.c
+++ b/drivers/hwmon/lis3lv02d.c
@@ -27,9 +27,7 @@
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29#include <linux/interrupt.h> 29#include <linux/interrupt.h>
30#include <linux/input.h> 30#include <linux/input-polldev.h>
31#include <linux/kthread.h>
32#include <linux/semaphore.h>
33#include <linux/delay.h> 31#include <linux/delay.h>
34#include <linux/wait.h> 32#include <linux/wait.h>
35#include <linux/poll.h> 33#include <linux/poll.h>
@@ -105,56 +103,39 @@ static void lis3lv02d_get_xyz(struct lis3lv02d *lis3, int *x, int *y, int *z)
105{ 103{
106 int position[3]; 104 int position[3];
107 105
108 position[0] = lis3_dev.read_data(lis3, OUTX); 106 position[0] = lis3->read_data(lis3, OUTX);
109 position[1] = lis3_dev.read_data(lis3, OUTY); 107 position[1] = lis3->read_data(lis3, OUTY);
110 position[2] = lis3_dev.read_data(lis3, OUTZ); 108 position[2] = lis3->read_data(lis3, OUTZ);
111 109
112 *x = lis3lv02d_get_axis(lis3_dev.ac.x, position); 110 *x = lis3lv02d_get_axis(lis3->ac.x, position);
113 *y = lis3lv02d_get_axis(lis3_dev.ac.y, position); 111 *y = lis3lv02d_get_axis(lis3->ac.y, position);
114 *z = lis3lv02d_get_axis(lis3_dev.ac.z, position); 112 *z = lis3lv02d_get_axis(lis3->ac.z, position);
115} 113}
116 114
117void lis3lv02d_poweroff(struct lis3lv02d *lis3) 115void lis3lv02d_poweroff(struct lis3lv02d *lis3)
118{ 116{
119 lis3_dev.is_on = 0; 117 /* disable X,Y,Z axis and power down */
118 lis3->write(lis3, CTRL_REG1, 0x00);
120} 119}
121EXPORT_SYMBOL_GPL(lis3lv02d_poweroff); 120EXPORT_SYMBOL_GPL(lis3lv02d_poweroff);
122 121
123void lis3lv02d_poweron(struct lis3lv02d *lis3) 122void lis3lv02d_poweron(struct lis3lv02d *lis3)
124{ 123{
125 lis3_dev.is_on = 1; 124 u8 reg;
126 lis3_dev.init(lis3);
127}
128EXPORT_SYMBOL_GPL(lis3lv02d_poweron);
129 125
130/* 126 lis3->init(lis3);
131 * To be called before starting to use the device. It makes sure that the
132 * device will always be on until a call to lis3lv02d_decrease_use(). Not to be
133 * used from interrupt context.
134 */
135static void lis3lv02d_increase_use(struct lis3lv02d *dev)
136{
137 mutex_lock(&dev->lock);
138 dev->usage++;
139 if (dev->usage == 1) {
140 if (!dev->is_on)
141 lis3lv02d_poweron(dev);
142 }
143 mutex_unlock(&dev->lock);
144}
145 127
146/* 128 /*
147 * To be called whenever a usage of the device is stopped. 129 * Common configuration
148 * It will make sure to turn off the device when there is not usage. 130 * BDU: LSB and MSB values are not updated until both have been read.
149 */ 131 * So the value read will always be correct.
150static void lis3lv02d_decrease_use(struct lis3lv02d *dev) 132 */
151{ 133 lis3->read(lis3, CTRL_REG2, &reg);
152 mutex_lock(&dev->lock); 134 reg |= CTRL2_BDU;
153 dev->usage--; 135 lis3->write(lis3, CTRL_REG2, reg);
154 if (dev->usage == 0)
155 lis3lv02d_poweroff(dev);
156 mutex_unlock(&dev->lock);
157} 136}
137EXPORT_SYMBOL_GPL(lis3lv02d_poweron);
138
158 139
159static irqreturn_t lis302dl_interrupt(int irq, void *dummy) 140static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
160{ 141{
@@ -198,15 +179,12 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
198 printk(KERN_ERR DRIVER_NAME ": IRQ%d allocation failed\n", lis3_dev.irq); 179 printk(KERN_ERR DRIVER_NAME ": IRQ%d allocation failed\n", lis3_dev.irq);
199 return -EBUSY; 180 return -EBUSY;
200 } 181 }
201 lis3lv02d_increase_use(&lis3_dev);
202 printk("lis3: registered interrupt %d\n", lis3_dev.irq);
203 return 0; 182 return 0;
204} 183}
205 184
206static int lis3lv02d_misc_release(struct inode *inode, struct file *file) 185static int lis3lv02d_misc_release(struct inode *inode, struct file *file)
207{ 186{
208 fasync_helper(-1, file, 0, &lis3_dev.async_queue); 187 fasync_helper(-1, file, 0, &lis3_dev.async_queue);
209 lis3lv02d_decrease_use(&lis3_dev);
210 free_irq(lis3_dev.irq, &lis3_dev); 188 free_irq(lis3_dev.irq, &lis3_dev);
211 clear_bit(0, &lis3_dev.misc_opened); /* release the device */ 189 clear_bit(0, &lis3_dev.misc_opened); /* release the device */
212 return 0; 190 return 0;
@@ -290,46 +268,16 @@ static struct miscdevice lis3lv02d_misc_device = {
290 .fops = &lis3lv02d_misc_fops, 268 .fops = &lis3lv02d_misc_fops,
291}; 269};
292 270
293/** 271static void lis3lv02d_joystick_poll(struct input_polled_dev *pidev)
294 * lis3lv02d_joystick_kthread - Kthread polling function
295 * @data: unused - here to conform to threadfn prototype
296 */
297static int lis3lv02d_joystick_kthread(void *data)
298{ 272{
299 int x, y, z; 273 int x, y, z;
300 274
301 while (!kthread_should_stop()) { 275 lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
302 lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z); 276 input_report_abs(pidev->input, ABS_X, x - lis3_dev.xcalib);
303 input_report_abs(lis3_dev.idev, ABS_X, x - lis3_dev.xcalib); 277 input_report_abs(pidev->input, ABS_Y, y - lis3_dev.ycalib);
304 input_report_abs(lis3_dev.idev, ABS_Y, y - lis3_dev.ycalib); 278 input_report_abs(pidev->input, ABS_Z, z - lis3_dev.zcalib);
305 input_report_abs(lis3_dev.idev, ABS_Z, z - lis3_dev.zcalib);
306
307 input_sync(lis3_dev.idev);
308
309 try_to_freeze();
310 msleep_interruptible(MDPS_POLL_INTERVAL);
311 }
312
313 return 0;
314}
315
316static int lis3lv02d_joystick_open(struct input_dev *input)
317{
318 lis3lv02d_increase_use(&lis3_dev);
319 lis3_dev.kthread = kthread_run(lis3lv02d_joystick_kthread, NULL, "klis3lv02d");
320 if (IS_ERR(lis3_dev.kthread)) {
321 lis3lv02d_decrease_use(&lis3_dev);
322 return PTR_ERR(lis3_dev.kthread);
323 }
324
325 return 0;
326} 279}
327 280
328static void lis3lv02d_joystick_close(struct input_dev *input)
329{
330 kthread_stop(lis3_dev.kthread);
331 lis3lv02d_decrease_use(&lis3_dev);
332}
333 281
334static inline void lis3lv02d_calibrate_joystick(void) 282static inline void lis3lv02d_calibrate_joystick(void)
335{ 283{
@@ -339,33 +287,36 @@ static inline void lis3lv02d_calibrate_joystick(void)
339 287
340int lis3lv02d_joystick_enable(void) 288int lis3lv02d_joystick_enable(void)
341{ 289{
290 struct input_dev *input_dev;
342 int err; 291 int err;
343 292
344 if (lis3_dev.idev) 293 if (lis3_dev.idev)
345 return -EINVAL; 294 return -EINVAL;
346 295
347 lis3_dev.idev = input_allocate_device(); 296 lis3_dev.idev = input_allocate_polled_device();
348 if (!lis3_dev.idev) 297 if (!lis3_dev.idev)
349 return -ENOMEM; 298 return -ENOMEM;
350 299
300 lis3_dev.idev->poll = lis3lv02d_joystick_poll;
301 lis3_dev.idev->poll_interval = MDPS_POLL_INTERVAL;
302 input_dev = lis3_dev.idev->input;
303
351 lis3lv02d_calibrate_joystick(); 304 lis3lv02d_calibrate_joystick();
352 305
353 lis3_dev.idev->name = "ST LIS3LV02DL Accelerometer"; 306 input_dev->name = "ST LIS3LV02DL Accelerometer";
354 lis3_dev.idev->phys = DRIVER_NAME "/input0"; 307 input_dev->phys = DRIVER_NAME "/input0";
355 lis3_dev.idev->id.bustype = BUS_HOST; 308 input_dev->id.bustype = BUS_HOST;
356 lis3_dev.idev->id.vendor = 0; 309 input_dev->id.vendor = 0;
357 lis3_dev.idev->dev.parent = &lis3_dev.pdev->dev; 310 input_dev->dev.parent = &lis3_dev.pdev->dev;
358 lis3_dev.idev->open = lis3lv02d_joystick_open;
359 lis3_dev.idev->close = lis3lv02d_joystick_close;
360 311
361 set_bit(EV_ABS, lis3_dev.idev->evbit); 312 set_bit(EV_ABS, input_dev->evbit);
362 input_set_abs_params(lis3_dev.idev, ABS_X, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3); 313 input_set_abs_params(input_dev, ABS_X, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3);
363 input_set_abs_params(lis3_dev.idev, ABS_Y, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3); 314 input_set_abs_params(input_dev, ABS_Y, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3);
364 input_set_abs_params(lis3_dev.idev, ABS_Z, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3); 315 input_set_abs_params(input_dev, ABS_Z, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3);
365 316
366 err = input_register_device(lis3_dev.idev); 317 err = input_register_polled_device(lis3_dev.idev);
367 if (err) { 318 if (err) {
368 input_free_device(lis3_dev.idev); 319 input_free_polled_device(lis3_dev.idev);
369 lis3_dev.idev = NULL; 320 lis3_dev.idev = NULL;
370 } 321 }
371 322
@@ -378,8 +329,9 @@ void lis3lv02d_joystick_disable(void)
378 if (!lis3_dev.idev) 329 if (!lis3_dev.idev)
379 return; 330 return;
380 331
381 misc_deregister(&lis3lv02d_misc_device); 332 if (lis3_dev.irq)
382 input_unregister_device(lis3_dev.idev); 333 misc_deregister(&lis3lv02d_misc_device);
334 input_unregister_polled_device(lis3_dev.idev);
383 lis3_dev.idev = NULL; 335 lis3_dev.idev = NULL;
384} 336}
385EXPORT_SYMBOL_GPL(lis3lv02d_joystick_disable); 337EXPORT_SYMBOL_GPL(lis3lv02d_joystick_disable);
@@ -390,9 +342,7 @@ static ssize_t lis3lv02d_position_show(struct device *dev,
390{ 342{
391 int x, y, z; 343 int x, y, z;
392 344
393 lis3lv02d_increase_use(&lis3_dev);
394 lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z); 345 lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
395 lis3lv02d_decrease_use(&lis3_dev);
396 return sprintf(buf, "(%d,%d,%d)\n", x, y, z); 346 return sprintf(buf, "(%d,%d,%d)\n", x, y, z);
397} 347}
398 348
@@ -406,9 +356,7 @@ static ssize_t lis3lv02d_calibrate_store(struct device *dev,
406 struct device_attribute *attr, 356 struct device_attribute *attr,
407 const char *buf, size_t count) 357 const char *buf, size_t count)
408{ 358{
409 lis3lv02d_increase_use(&lis3_dev);
410 lis3lv02d_calibrate_joystick(); 359 lis3lv02d_calibrate_joystick();
411 lis3lv02d_decrease_use(&lis3_dev);
412 return count; 360 return count;
413} 361}
414 362
@@ -420,9 +368,7 @@ static ssize_t lis3lv02d_rate_show(struct device *dev,
420 u8 ctrl; 368 u8 ctrl;
421 int val; 369 int val;
422 370
423 lis3lv02d_increase_use(&lis3_dev);
424 lis3_dev.read(&lis3_dev, CTRL_REG1, &ctrl); 371 lis3_dev.read(&lis3_dev, CTRL_REG1, &ctrl);
425 lis3lv02d_decrease_use(&lis3_dev);
426 val = (ctrl & (CTRL1_DF0 | CTRL1_DF1)) >> 4; 372 val = (ctrl & (CTRL1_DF0 | CTRL1_DF1)) >> 4;
427 return sprintf(buf, "%d\n", lis3lv02dl_df_val[val]); 373 return sprintf(buf, "%d\n", lis3lv02dl_df_val[val]);
428} 374}
@@ -446,17 +392,17 @@ static struct attribute_group lis3lv02d_attribute_group = {
446 392
447static int lis3lv02d_add_fs(struct lis3lv02d *lis3) 393static int lis3lv02d_add_fs(struct lis3lv02d *lis3)
448{ 394{
449 lis3_dev.pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0); 395 lis3->pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
450 if (IS_ERR(lis3_dev.pdev)) 396 if (IS_ERR(lis3->pdev))
451 return PTR_ERR(lis3_dev.pdev); 397 return PTR_ERR(lis3->pdev);
452 398
453 return sysfs_create_group(&lis3_dev.pdev->dev.kobj, &lis3lv02d_attribute_group); 399 return sysfs_create_group(&lis3->pdev->dev.kobj, &lis3lv02d_attribute_group);
454} 400}
455 401
456int lis3lv02d_remove_fs(void) 402int lis3lv02d_remove_fs(struct lis3lv02d *lis3)
457{ 403{
458 sysfs_remove_group(&lis3_dev.pdev->dev.kobj, &lis3lv02d_attribute_group); 404 sysfs_remove_group(&lis3->pdev->dev.kobj, &lis3lv02d_attribute_group);
459 platform_device_unregister(lis3_dev.pdev); 405 platform_device_unregister(lis3->pdev);
460 return 0; 406 return 0;
461} 407}
462EXPORT_SYMBOL_GPL(lis3lv02d_remove_fs); 408EXPORT_SYMBOL_GPL(lis3lv02d_remove_fs);
@@ -482,18 +428,35 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
482 break; 428 break;
483 default: 429 default:
484 printk(KERN_ERR DRIVER_NAME 430 printk(KERN_ERR DRIVER_NAME
485 ": unknown sensor type 0x%X\n", lis3_dev.whoami); 431 ": unknown sensor type 0x%X\n", dev->whoami);
486 return -EINVAL; 432 return -EINVAL;
487 } 433 }
488 434
489 mutex_init(&dev->lock);
490 lis3lv02d_add_fs(dev); 435 lis3lv02d_add_fs(dev);
491 lis3lv02d_increase_use(dev); 436 lis3lv02d_poweron(dev);
492 437
493 if (lis3lv02d_joystick_enable()) 438 if (lis3lv02d_joystick_enable())
494 printk(KERN_ERR DRIVER_NAME ": joystick initialization failed\n"); 439 printk(KERN_ERR DRIVER_NAME ": joystick initialization failed\n");
495 440
496 printk("lis3_init_device: irq %d\n", dev->irq); 441 /* passing in platform specific data is purely optional and only
442 * used by the SPI transport layer at the moment */
443 if (dev->pdata) {
444 struct lis3lv02d_platform_data *p = dev->pdata;
445
446 if (p->click_flags && (dev->whoami == LIS_SINGLE_ID)) {
447 dev->write(dev, CLICK_CFG, p->click_flags);
448 dev->write(dev, CLICK_TIMELIMIT, p->click_time_limit);
449 dev->write(dev, CLICK_LATENCY, p->click_latency);
450 dev->write(dev, CLICK_WINDOW, p->click_window);
451 dev->write(dev, CLICK_THSZ, p->click_thresh_z & 0xf);
452 dev->write(dev, CLICK_THSY_X,
453 (p->click_thresh_x & 0xf) |
454 (p->click_thresh_y << 4));
455 }
456
457 if (p->irq_cfg)
458 dev->write(dev, CTRL_REG3, p->irq_cfg);
459 }
497 460
498 /* bail if we did not get an IRQ from the bus layer */ 461 /* bail if we did not get an IRQ from the bus layer */
499 if (!dev->irq) { 462 if (!dev->irq) {
@@ -502,11 +465,9 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
502 goto out; 465 goto out;
503 } 466 }
504 467
505 printk("lis3: registering device\n");
506 if (misc_register(&lis3lv02d_misc_device)) 468 if (misc_register(&lis3lv02d_misc_device))
507 printk(KERN_ERR DRIVER_NAME ": misc_register failed\n"); 469 printk(KERN_ERR DRIVER_NAME ": misc_register failed\n");
508out: 470out:
509 lis3lv02d_decrease_use(dev);
510 return 0; 471 return 0;
511} 472}
512EXPORT_SYMBOL_GPL(lis3lv02d_init_device); 473EXPORT_SYMBOL_GPL(lis3lv02d_init_device);
diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
index 745ec96806d4..e320e2f511f1 100644
--- a/drivers/hwmon/lis3lv02d.h
+++ b/drivers/hwmon/lis3lv02d.h
@@ -18,6 +18,8 @@
18 * along with this program; if not, write to the Free Software 18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 20 */
21#include <linux/platform_device.h>
22#include <linux/input-polldev.h>
21 23
22/* 24/*
23 * The actual chip is STMicroelectronics LIS3LV02DL or LIS3LV02DQ that seems to 25 * The actual chip is STMicroelectronics LIS3LV02DL or LIS3LV02DQ that seems to
@@ -27,12 +29,14 @@
27 * They can also be connected via I²C. 29 * They can also be connected via I²C.
28 */ 30 */
29 31
32#include <linux/lis3lv02d.h>
33
30/* 2-byte registers */ 34/* 2-byte registers */
31#define LIS_DOUBLE_ID 0x3A /* LIS3LV02D[LQ] */ 35#define LIS_DOUBLE_ID 0x3A /* LIS3LV02D[LQ] */
32/* 1-byte registers */ 36/* 1-byte registers */
33#define LIS_SINGLE_ID 0x3B /* LIS[32]02DL and others */ 37#define LIS_SINGLE_ID 0x3B /* LIS[32]02DL and others */
34 38
35enum lis3lv02d_reg { 39enum lis3_reg {
36 WHO_AM_I = 0x0F, 40 WHO_AM_I = 0x0F,
37 OFFSET_X = 0x16, 41 OFFSET_X = 0x16,
38 OFFSET_Y = 0x17, 42 OFFSET_Y = 0x17,
@@ -60,6 +64,19 @@ enum lis3lv02d_reg {
60 FF_WU_THS_L = 0x34, 64 FF_WU_THS_L = 0x34,
61 FF_WU_THS_H = 0x35, 65 FF_WU_THS_H = 0x35,
62 FF_WU_DURATION = 0x36, 66 FF_WU_DURATION = 0x36,
67};
68
69enum lis302d_reg {
70 CLICK_CFG = 0x38,
71 CLICK_SRC = 0x39,
72 CLICK_THSY_X = 0x3B,
73 CLICK_THSZ = 0x3C,
74 CLICK_TIMELIMIT = 0x3D,
75 CLICK_LATENCY = 0x3E,
76 CLICK_WINDOW = 0x3F,
77};
78
79enum lis3lv02d_reg {
63 DD_CFG = 0x38, 80 DD_CFG = 0x38,
64 DD_SRC = 0x39, 81 DD_SRC = 0x39,
65 DD_ACK = 0x3A, 82 DD_ACK = 0x3A,
@@ -169,22 +186,20 @@ struct lis3lv02d {
169 s16 (*read_data) (struct lis3lv02d *lis3, int reg); 186 s16 (*read_data) (struct lis3lv02d *lis3, int reg);
170 int mdps_max_val; 187 int mdps_max_val;
171 188
172 struct input_dev *idev; /* input device */ 189 struct input_polled_dev *idev; /* input device */
173 struct task_struct *kthread; /* kthread for input */
174 struct mutex lock;
175 struct platform_device *pdev; /* platform device */ 190 struct platform_device *pdev; /* platform device */
176 atomic_t count; /* interrupt count after last read */ 191 atomic_t count; /* interrupt count after last read */
177 int xcalib; /* calibrated null value for x */ 192 int xcalib; /* calibrated null value for x */
178 int ycalib; /* calibrated null value for y */ 193 int ycalib; /* calibrated null value for y */
179 int zcalib; /* calibrated null value for z */ 194 int zcalib; /* calibrated null value for z */
180 unsigned char is_on; /* whether the device is on or off */
181 unsigned char usage; /* usage counter */
182 struct axis_conversion ac; /* hw -> logical axis */ 195 struct axis_conversion ac; /* hw -> logical axis */
183 196
184 u32 irq; /* IRQ number */ 197 u32 irq; /* IRQ number */
185 struct fasync_struct *async_queue; /* queue for the misc device */ 198 struct fasync_struct *async_queue; /* queue for the misc device */
186 wait_queue_head_t misc_wait; /* Wait queue for the misc device */ 199 wait_queue_head_t misc_wait; /* Wait queue for the misc device */
187 unsigned long misc_opened; /* bit0: whether the device is open */ 200 unsigned long misc_opened; /* bit0: whether the device is open */
201
202 struct lis3lv02d_platform_data *pdata; /* for passing board config */
188}; 203};
189 204
190int lis3lv02d_init_device(struct lis3lv02d *lis3); 205int lis3lv02d_init_device(struct lis3lv02d *lis3);
@@ -192,6 +207,6 @@ int lis3lv02d_joystick_enable(void);
192void lis3lv02d_joystick_disable(void); 207void lis3lv02d_joystick_disable(void);
193void lis3lv02d_poweroff(struct lis3lv02d *lis3); 208void lis3lv02d_poweroff(struct lis3lv02d *lis3);
194void lis3lv02d_poweron(struct lis3lv02d *lis3); 209void lis3lv02d_poweron(struct lis3lv02d *lis3);
195int lis3lv02d_remove_fs(void); 210int lis3lv02d_remove_fs(struct lis3lv02d *lis3);
196 211
197extern struct lis3lv02d lis3_dev; 212extern struct lis3lv02d lis3_dev;
diff --git a/drivers/hwmon/lis3lv02d_spi.c b/drivers/hwmon/lis3lv02d_spi.c
index 07ae74b0e191..3827ff04485f 100644
--- a/drivers/hwmon/lis3lv02d_spi.c
+++ b/drivers/hwmon/lis3lv02d_spi.c
@@ -72,6 +72,7 @@ static int __devinit lis302dl_spi_probe(struct spi_device *spi)
72 lis3_dev.write = lis3_spi_write; 72 lis3_dev.write = lis3_spi_write;
73 lis3_dev.irq = spi->irq; 73 lis3_dev.irq = spi->irq;
74 lis3_dev.ac = lis3lv02d_axis_normal; 74 lis3_dev.ac = lis3lv02d_axis_normal;
75 lis3_dev.pdata = spi->dev.platform_data;
75 spi_set_drvdata(spi, &lis3_dev); 76 spi_set_drvdata(spi, &lis3_dev);
76 77
77 ret = lis3lv02d_init_device(&lis3_dev); 78 ret = lis3lv02d_init_device(&lis3_dev);
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 356b3a25efa2..1c0b529c06aa 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -35,7 +35,7 @@
35#include <linux/input.h> 35#include <linux/input.h>
36#include <linux/gameport.h> 36#include <linux/gameport.h>
37#include <linux/jiffies.h> 37#include <linux/jiffies.h>
38#include <asm/timex.h> 38#include <linux/timex.h>
39 39
40#define DRIVER_DESC "Analog joystick and gamepad driver" 40#define DRIVER_DESC "Analog joystick and gamepad driver"
41 41
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
index d6a30cee7bc7..6d67af5387ad 100644
--- a/drivers/input/misc/pcspkr.c
+++ b/drivers/input/misc/pcspkr.c
@@ -17,6 +17,7 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/input.h> 18#include <linux/input.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/timex.h>
20#include <asm/io.h> 21#include <asm/io.h>
21 22
22MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); 23MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c
index 6109fb5f34e2..0c29a019bc89 100644
--- a/drivers/media/video/videobuf-dma-contig.c
+++ b/drivers/media/video/videobuf-dma-contig.c
@@ -17,6 +17,7 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/mm.h> 19#include <linux/mm.h>
20#include <linux/pagemap.h>
20#include <linux/dma-mapping.h> 21#include <linux/dma-mapping.h>
21#include <media/videobuf-dma-contig.h> 22#include <media/videobuf-dma-contig.h>
22 23
@@ -25,6 +26,7 @@ struct videobuf_dma_contig_memory {
25 void *vaddr; 26 void *vaddr;
26 dma_addr_t dma_handle; 27 dma_addr_t dma_handle;
27 unsigned long size; 28 unsigned long size;
29 int is_userptr;
28}; 30};
29 31
30#define MAGIC_DC_MEM 0x0733ac61 32#define MAGIC_DC_MEM 0x0733ac61
@@ -108,6 +110,82 @@ static struct vm_operations_struct videobuf_vm_ops = {
108 .close = videobuf_vm_close, 110 .close = videobuf_vm_close,
109}; 111};
110 112
113/**
114 * videobuf_dma_contig_user_put() - reset pointer to user space buffer
115 * @mem: per-buffer private videobuf-dma-contig data
116 *
117 * This function resets the user space pointer
118 */
119static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
120{
121 mem->is_userptr = 0;
122 mem->dma_handle = 0;
123 mem->size = 0;
124}
125
126/**
127 * videobuf_dma_contig_user_get() - setup user space memory pointer
128 * @mem: per-buffer private videobuf-dma-contig data
129 * @vb: video buffer to map
130 *
131 * This function validates and sets up a pointer to user space memory.
132 * Only physically contiguous pfn-mapped memory is accepted.
133 *
134 * Returns 0 if successful.
135 */
136static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
137 struct videobuf_buffer *vb)
138{
139 struct mm_struct *mm = current->mm;
140 struct vm_area_struct *vma;
141 unsigned long prev_pfn, this_pfn;
142 unsigned long pages_done, user_address;
143 int ret;
144
145 mem->size = PAGE_ALIGN(vb->size);
146 mem->is_userptr = 0;
147 ret = -EINVAL;
148
149 down_read(&mm->mmap_sem);
150
151 vma = find_vma(mm, vb->baddr);
152 if (!vma)
153 goto out_up;
154
155 if ((vb->baddr + mem->size) > vma->vm_end)
156 goto out_up;
157
158 pages_done = 0;
159 prev_pfn = 0; /* kill warning */
160 user_address = vb->baddr;
161
162 while (pages_done < (mem->size >> PAGE_SHIFT)) {
163 ret = follow_pfn(vma, user_address, &this_pfn);
164 if (ret)
165 break;
166
167 if (pages_done == 0)
168 mem->dma_handle = this_pfn << PAGE_SHIFT;
169 else if (this_pfn != (prev_pfn + 1))
170 ret = -EFAULT;
171
172 if (ret)
173 break;
174
175 prev_pfn = this_pfn;
176 user_address += PAGE_SIZE;
177 pages_done++;
178 }
179
180 if (!ret)
181 mem->is_userptr = 1;
182
183 out_up:
184 up_read(&current->mm->mmap_sem);
185
186 return ret;
187}
188
111static void *__videobuf_alloc(size_t size) 189static void *__videobuf_alloc(size_t size)
112{ 190{
113 struct videobuf_dma_contig_memory *mem; 191 struct videobuf_dma_contig_memory *mem;
@@ -154,12 +232,11 @@ static int __videobuf_iolock(struct videobuf_queue *q,
154 case V4L2_MEMORY_USERPTR: 232 case V4L2_MEMORY_USERPTR:
155 dev_dbg(q->dev, "%s memory method USERPTR\n", __func__); 233 dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
156 234
157 /* The only USERPTR currently supported is the one needed for 235 /* handle pointer from user space */
158 read() method.
159 */
160 if (vb->baddr) 236 if (vb->baddr)
161 return -EINVAL; 237 return videobuf_dma_contig_user_get(mem, vb);
162 238
239 /* allocate memory for the read() method */
163 mem->size = PAGE_ALIGN(vb->size); 240 mem->size = PAGE_ALIGN(vb->size);
164 mem->vaddr = dma_alloc_coherent(q->dev, mem->size, 241 mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
165 &mem->dma_handle, GFP_KERNEL); 242 &mem->dma_handle, GFP_KERNEL);
@@ -400,7 +477,7 @@ void videobuf_dma_contig_free(struct videobuf_queue *q,
400 So, it should free memory only if the memory were allocated for 477 So, it should free memory only if the memory were allocated for
401 read() operation. 478 read() operation.
402 */ 479 */
403 if ((buf->memory != V4L2_MEMORY_USERPTR) || buf->baddr) 480 if (buf->memory != V4L2_MEMORY_USERPTR)
404 return; 481 return;
405 482
406 if (!mem) 483 if (!mem)
@@ -408,6 +485,13 @@ void videobuf_dma_contig_free(struct videobuf_queue *q,
408 485
409 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM); 486 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
410 487
488 /* handle user space pointer case */
489 if (buf->baddr) {
490 videobuf_dma_contig_user_put(mem);
491 return;
492 }
493
494 /* read() method */
411 dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle); 495 dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle);
412 mem->vaddr = NULL; 496 mem->vaddr = NULL;
413} 497}
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index bbefe77c67a9..3ce2920e2bf3 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -302,7 +302,7 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
302 pnode = uv_node_to_pnode(nid); 302 pnode = uv_node_to_pnode(nid);
303 if (bid < 0 || gru_base[bid]) 303 if (bid < 0 || gru_base[bid])
304 continue; 304 continue;
305 page = alloc_pages_node(nid, GFP_KERNEL, order); 305 page = alloc_pages_exact_node(nid, GFP_KERNEL, order);
306 if (!page) 306 if (!page)
307 goto fail; 307 goto fail;
308 gru_base[bid] = page_address(page); 308 gru_base[bid] = page_address(page);
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 9172fcdee4e2..c76677afda1b 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -232,7 +232,7 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
232 mq->mmr_blade = uv_cpu_to_blade_id(cpu); 232 mq->mmr_blade = uv_cpu_to_blade_id(cpu);
233 233
234 nid = cpu_to_node(cpu); 234 nid = cpu_to_node(cpu);
235 page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 235 page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
236 pg_order); 236 pg_order);
237 if (page == NULL) { 237 if (page == NULL) {
238 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " 238 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
index 1703b20cad5d..6095f8daecd7 100644
--- a/drivers/pcmcia/pcmcia_ioctl.c
+++ b/drivers/pcmcia/pcmcia_ioctl.c
@@ -915,12 +915,9 @@ static int ds_ioctl(struct inode * inode, struct file * file,
915 err = -EPERM; 915 err = -EPERM;
916 goto free_out; 916 goto free_out;
917 } else { 917 } else {
918 static int printed = 0; 918 printk_once(KERN_WARNING
919 if (!printed) { 919 "2.6. kernels use pcmciamtd instead of memory_cs.c and do not require special\n");
920 printk(KERN_WARNING "2.6. kernels use pcmciamtd instead of memory_cs.c and do not require special\n"); 920 printk_once(KERN_WARNING "MTD handling any more.\n");
921 printk(KERN_WARNING "MTD handling any more.\n");
922 printed++;
923 }
924 } 921 }
925 err = -EINVAL; 922 err = -EINVAL;
926 goto free_out; 923 goto free_out;
diff --git a/drivers/spi/spi_mpc83xx.c b/drivers/spi/spi_mpc83xx.c
index f4573a96af24..a32ccb44065e 100644
--- a/drivers/spi/spi_mpc83xx.c
+++ b/drivers/spi/spi_mpc83xx.c
@@ -711,12 +711,12 @@ static int of_mpc83xx_spi_get_chipselects(struct device *dev)
711 return 0; 711 return 0;
712 } 712 }
713 713
714 pinfo->gpios = kmalloc(ngpios * sizeof(pinfo->gpios), GFP_KERNEL); 714 pinfo->gpios = kmalloc(ngpios * sizeof(*pinfo->gpios), GFP_KERNEL);
715 if (!pinfo->gpios) 715 if (!pinfo->gpios)
716 return -ENOMEM; 716 return -ENOMEM;
717 memset(pinfo->gpios, -1, ngpios * sizeof(pinfo->gpios)); 717 memset(pinfo->gpios, -1, ngpios * sizeof(*pinfo->gpios));
718 718
719 pinfo->alow_flags = kzalloc(ngpios * sizeof(pinfo->alow_flags), 719 pinfo->alow_flags = kzalloc(ngpios * sizeof(*pinfo->alow_flags),
720 GFP_KERNEL); 720 GFP_KERNEL);
721 if (!pinfo->alow_flags) { 721 if (!pinfo->alow_flags) {
722 ret = -ENOMEM; 722 ret = -ENOMEM;
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 2b5a691064b7..932ffdbf86d9 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -2104,6 +2104,7 @@ config FB_MB862XX_LIME
2104 bool "Lime GDC" 2104 bool "Lime GDC"
2105 depends on FB_MB862XX 2105 depends on FB_MB862XX
2106 depends on OF && !FB_MB862XX_PCI_GDC 2106 depends on OF && !FB_MB862XX_PCI_GDC
2107 depends on PPC
2107 select FB_FOREIGN_ENDIAN 2108 select FB_FOREIGN_ENDIAN
2108 select FB_LITTLE_ENDIAN 2109 select FB_LITTLE_ENDIAN
2109 ---help--- 2110 ---help---
diff --git a/drivers/video/acornfb.c b/drivers/video/acornfb.c
index 6995fe1e86d4..0bcc59eb37fa 100644
--- a/drivers/video/acornfb.c
+++ b/drivers/video/acornfb.c
@@ -859,43 +859,6 @@ acornfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
859 return 0; 859 return 0;
860} 860}
861 861
862/*
863 * Note that we are entered with the kernel locked.
864 */
865static int
866acornfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
867{
868 unsigned long off, start;
869 u32 len;
870
871 off = vma->vm_pgoff << PAGE_SHIFT;
872
873 start = info->fix.smem_start;
874 len = PAGE_ALIGN(start & ~PAGE_MASK) + info->fix.smem_len;
875 start &= PAGE_MASK;
876 if ((vma->vm_end - vma->vm_start + off) > len)
877 return -EINVAL;
878 off += start;
879 vma->vm_pgoff = off >> PAGE_SHIFT;
880
881 /* This is an IO map - tell maydump to skip this VMA */
882 vma->vm_flags |= VM_IO;
883
884 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
885
886 /*
887 * Don't alter the page protection flags; we want to keep the area
888 * cached for better performance. This does mean that we may miss
889 * some updates to the screen occasionally, but process switches
890 * should cause the caches and buffers to be flushed often enough.
891 */
892 if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
893 vma->vm_end - vma->vm_start,
894 vma->vm_page_prot))
895 return -EAGAIN;
896 return 0;
897}
898
899static struct fb_ops acornfb_ops = { 862static struct fb_ops acornfb_ops = {
900 .owner = THIS_MODULE, 863 .owner = THIS_MODULE,
901 .fb_check_var = acornfb_check_var, 864 .fb_check_var = acornfb_check_var,
@@ -905,7 +868,6 @@ static struct fb_ops acornfb_ops = {
905 .fb_fillrect = cfb_fillrect, 868 .fb_fillrect = cfb_fillrect,
906 .fb_copyarea = cfb_copyarea, 869 .fb_copyarea = cfb_copyarea,
907 .fb_imageblit = cfb_imageblit, 870 .fb_imageblit = cfb_imageblit,
908 .fb_mmap = acornfb_mmap,
909}; 871};
910 872
911/* 873/*
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 2fb63f6ea2f1..5afd64482f55 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -345,7 +345,7 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
345 dev_dbg(dev, " bpp: %u\n", var->bits_per_pixel); 345 dev_dbg(dev, " bpp: %u\n", var->bits_per_pixel);
346 dev_dbg(dev, " clk: %lu KHz\n", clk_value_khz); 346 dev_dbg(dev, " clk: %lu KHz\n", clk_value_khz);
347 347
348 if ((PICOS2KHZ(var->pixclock) * var->bits_per_pixel / 8) > clk_value_khz) { 348 if (PICOS2KHZ(var->pixclock) > clk_value_khz) {
349 dev_err(dev, "%lu KHz pixel clock is too fast\n", PICOS2KHZ(var->pixclock)); 349 dev_err(dev, "%lu KHz pixel clock is too fast\n", PICOS2KHZ(var->pixclock));
350 return -EINVAL; 350 return -EINVAL;
351 } 351 }
diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c
index 97a1f095f327..515cf1978d19 100644
--- a/drivers/video/aty/radeon_pm.c
+++ b/drivers/video/aty/radeon_pm.c
@@ -213,7 +213,6 @@ static void radeon_pm_disable_dynamic_mode(struct radeonfb_info *rinfo)
213 PIXCLKS_CNTL__R300_PIXCLK_TRANS_ALWAYS_ONb | 213 PIXCLKS_CNTL__R300_PIXCLK_TRANS_ALWAYS_ONb |
214 PIXCLKS_CNTL__R300_PIXCLK_TVO_ALWAYS_ONb | 214 PIXCLKS_CNTL__R300_PIXCLK_TVO_ALWAYS_ONb |
215 PIXCLKS_CNTL__R300_P2G2CLK_ALWAYS_ONb | 215 PIXCLKS_CNTL__R300_P2G2CLK_ALWAYS_ONb |
216 PIXCLKS_CNTL__R300_P2G2CLK_ALWAYS_ONb |
217 PIXCLKS_CNTL__R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF); 216 PIXCLKS_CNTL__R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
218 OUTPLL(pllPIXCLKS_CNTL, tmp); 217 OUTPLL(pllPIXCLKS_CNTL, tmp);
219 218
@@ -395,7 +394,7 @@ static void radeon_pm_enable_dynamic_mode(struct radeonfb_info *rinfo)
395 PIXCLKS_CNTL__R300_PIXCLK_TRANS_ALWAYS_ONb | 394 PIXCLKS_CNTL__R300_PIXCLK_TRANS_ALWAYS_ONb |
396 PIXCLKS_CNTL__R300_PIXCLK_TVO_ALWAYS_ONb | 395 PIXCLKS_CNTL__R300_PIXCLK_TVO_ALWAYS_ONb |
397 PIXCLKS_CNTL__R300_P2G2CLK_ALWAYS_ONb | 396 PIXCLKS_CNTL__R300_P2G2CLK_ALWAYS_ONb |
398 PIXCLKS_CNTL__R300_P2G2CLK_ALWAYS_ONb); 397 PIXCLKS_CNTL__R300_P2G2CLK_DAC_ALWAYS_ONb);
399 OUTPLL(pllPIXCLKS_CNTL, tmp); 398 OUTPLL(pllPIXCLKS_CNTL, tmp);
400 399
401 tmp = INPLL(pllMCLK_MISC); 400 tmp = INPLL(pllMCLK_MISC);
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index 37e60b1d2ed9..e49ae5edcc00 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -323,7 +323,6 @@ static int bfin_bf54x_fb_release(struct fb_info *info, int user)
323 bfin_write_EPPI0_CONTROL(0); 323 bfin_write_EPPI0_CONTROL(0);
324 SSYNC(); 324 SSYNC();
325 disable_dma(CH_EPPI0); 325 disable_dma(CH_EPPI0);
326 memset(fbi->fb_buffer, 0, info->fix.smem_len);
327 } 326 }
328 327
329 spin_unlock(&fbi->lock); 328 spin_unlock(&fbi->lock);
@@ -530,7 +529,7 @@ static irqreturn_t bfin_bf54x_irq_error(int irq, void *dev_id)
530 return IRQ_HANDLED; 529 return IRQ_HANDLED;
531} 530}
532 531
533static int __init bfin_bf54x_probe(struct platform_device *pdev) 532static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
534{ 533{
535 struct bfin_bf54xfb_info *info; 534 struct bfin_bf54xfb_info *info;
536 struct fb_info *fbinfo; 535 struct fb_info *fbinfo;
@@ -626,14 +625,12 @@ static int __init bfin_bf54x_probe(struct platform_device *pdev)
626 goto out3; 625 goto out3;
627 } 626 }
628 627
629 memset(info->fb_buffer, 0, fbinfo->fix.smem_len);
630
631 fbinfo->screen_base = (void *)info->fb_buffer; 628 fbinfo->screen_base = (void *)info->fb_buffer;
632 fbinfo->fix.smem_start = (int)info->fb_buffer; 629 fbinfo->fix.smem_start = (int)info->fb_buffer;
633 630
634 fbinfo->fbops = &bfin_bf54x_fb_ops; 631 fbinfo->fbops = &bfin_bf54x_fb_ops;
635 632
636 fbinfo->pseudo_palette = kmalloc(sizeof(u32) * 16, GFP_KERNEL); 633 fbinfo->pseudo_palette = kzalloc(sizeof(u32) * 16, GFP_KERNEL);
637 if (!fbinfo->pseudo_palette) { 634 if (!fbinfo->pseudo_palette) {
638 printk(KERN_ERR DRIVER_NAME 635 printk(KERN_ERR DRIVER_NAME
639 "Fail to allocate pseudo_palette\n"); 636 "Fail to allocate pseudo_palette\n");
@@ -642,8 +639,6 @@ static int __init bfin_bf54x_probe(struct platform_device *pdev)
642 goto out4; 639 goto out4;
643 } 640 }
644 641
645 memset(fbinfo->pseudo_palette, 0, sizeof(u32) * 16);
646
647 if (fb_alloc_cmap(&fbinfo->cmap, BFIN_LCD_NBR_PALETTE_ENTRIES, 0) 642 if (fb_alloc_cmap(&fbinfo->cmap, BFIN_LCD_NBR_PALETTE_ENTRIES, 0)
648 < 0) { 643 < 0) {
649 printk(KERN_ERR DRIVER_NAME 644 printk(KERN_ERR DRIVER_NAME
@@ -712,7 +707,7 @@ out1:
712 return ret; 707 return ret;
713} 708}
714 709
715static int bfin_bf54x_remove(struct platform_device *pdev) 710static int __devexit bfin_bf54x_remove(struct platform_device *pdev)
716{ 711{
717 712
718 struct fb_info *fbinfo = platform_get_drvdata(pdev); 713 struct fb_info *fbinfo = platform_get_drvdata(pdev);
@@ -781,7 +776,7 @@ static int bfin_bf54x_resume(struct platform_device *pdev)
781 776
782static struct platform_driver bfin_bf54x_driver = { 777static struct platform_driver bfin_bf54x_driver = {
783 .probe = bfin_bf54x_probe, 778 .probe = bfin_bf54x_probe,
784 .remove = bfin_bf54x_remove, 779 .remove = __devexit_p(bfin_bf54x_remove),
785 .suspend = bfin_bf54x_suspend, 780 .suspend = bfin_bf54x_suspend,
786 .resume = bfin_bf54x_resume, 781 .resume = bfin_bf54x_resume,
787 .driver = { 782 .driver = {
@@ -790,7 +785,7 @@ static struct platform_driver bfin_bf54x_driver = {
790 }, 785 },
791}; 786};
792 787
793static int __devinit bfin_bf54x_driver_init(void) 788static int __init bfin_bf54x_driver_init(void)
794{ 789{
795 return platform_driver_register(&bfin_bf54x_driver); 790 return platform_driver_register(&bfin_bf54x_driver);
796} 791}
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index 90cfddabf1f7..5cc36cfbf07b 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -242,7 +242,6 @@ static int bfin_t350mcqb_fb_release(struct fb_info *info, int user)
242 SSYNC(); 242 SSYNC();
243 disable_dma(CH_PPI); 243 disable_dma(CH_PPI);
244 bfin_t350mcqb_stop_timers(); 244 bfin_t350mcqb_stop_timers();
245 memset(fbi->fb_buffer, 0, info->fix.smem_len);
246 } 245 }
247 246
248 spin_unlock(&fbi->lock); 247 spin_unlock(&fbi->lock);
@@ -527,8 +526,6 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
527 goto out3; 526 goto out3;
528 } 527 }
529 528
530 memset(info->fb_buffer, 0, fbinfo->fix.smem_len);
531
532 fbinfo->screen_base = (void *)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET; 529 fbinfo->screen_base = (void *)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET;
533 fbinfo->fix.smem_start = (int)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET; 530 fbinfo->fix.smem_start = (int)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET;
534 531
@@ -602,7 +599,7 @@ out1:
602 return ret; 599 return ret;
603} 600}
604 601
605static int bfin_t350mcqb_remove(struct platform_device *pdev) 602static int __devexit bfin_t350mcqb_remove(struct platform_device *pdev)
606{ 603{
607 604
608 struct fb_info *fbinfo = platform_get_drvdata(pdev); 605 struct fb_info *fbinfo = platform_get_drvdata(pdev);
@@ -637,9 +634,6 @@ static int bfin_t350mcqb_remove(struct platform_device *pdev)
637#ifdef CONFIG_PM 634#ifdef CONFIG_PM
638static int bfin_t350mcqb_suspend(struct platform_device *pdev, pm_message_t state) 635static int bfin_t350mcqb_suspend(struct platform_device *pdev, pm_message_t state)
639{ 636{
640 struct fb_info *fbinfo = platform_get_drvdata(pdev);
641 struct bfin_t350mcqbfb_info *info = fbinfo->par;
642
643 bfin_t350mcqb_disable_ppi(); 637 bfin_t350mcqb_disable_ppi();
644 disable_dma(CH_PPI); 638 disable_dma(CH_PPI);
645 bfin_write_PPI_STATUS(0xFFFF); 639 bfin_write_PPI_STATUS(0xFFFF);
@@ -649,9 +643,6 @@ static int bfin_t350mcqb_suspend(struct platform_device *pdev, pm_message_t stat
649 643
650static int bfin_t350mcqb_resume(struct platform_device *pdev) 644static int bfin_t350mcqb_resume(struct platform_device *pdev)
651{ 645{
652 struct fb_info *fbinfo = platform_get_drvdata(pdev);
653 struct bfin_t350mcqbfb_info *info = fbinfo->par;
654
655 enable_dma(CH_PPI); 646 enable_dma(CH_PPI);
656 bfin_t350mcqb_enable_ppi(); 647 bfin_t350mcqb_enable_ppi();
657 648
@@ -664,7 +655,7 @@ static int bfin_t350mcqb_resume(struct platform_device *pdev)
664 655
665static struct platform_driver bfin_t350mcqb_driver = { 656static struct platform_driver bfin_t350mcqb_driver = {
666 .probe = bfin_t350mcqb_probe, 657 .probe = bfin_t350mcqb_probe,
667 .remove = bfin_t350mcqb_remove, 658 .remove = __devexit_p(bfin_t350mcqb_remove),
668 .suspend = bfin_t350mcqb_suspend, 659 .suspend = bfin_t350mcqb_suspend,
669 .resume = bfin_t350mcqb_resume, 660 .resume = bfin_t350mcqb_resume,
670 .driver = { 661 .driver = {
@@ -673,7 +664,7 @@ static struct platform_driver bfin_t350mcqb_driver = {
673 }, 664 },
674}; 665};
675 666
676static int __devinit bfin_t350mcqb_driver_init(void) 667static int __init bfin_t350mcqb_driver_init(void)
677{ 668{
678 return platform_driver_register(&bfin_t350mcqb_driver); 669 return platform_driver_register(&bfin_t350mcqb_driver);
679} 670}
diff --git a/drivers/video/carminefb.c b/drivers/video/carminefb.c
index c7ff3c1a266a..0c02f8ec4bf3 100644
--- a/drivers/video/carminefb.c
+++ b/drivers/video/carminefb.c
@@ -562,7 +562,7 @@ static int __devinit alloc_carmine_fb(void __iomem *regs, void __iomem *smem_bas
562 if (ret < 0) 562 if (ret < 0)
563 goto err_free_fb; 563 goto err_free_fb;
564 564
565 if (fb_mode > ARRAY_SIZE(carmine_modedb)) 565 if (fb_mode >= ARRAY_SIZE(carmine_modedb))
566 fb_mode = CARMINEFB_DEFAULT_VIDEO_MODE; 566 fb_mode = CARMINEFB_DEFAULT_VIDEO_MODE;
567 567
568 par->cur_mode = par->new_mode = ~0; 568 par->cur_mode = par->new_mode = ~0;
diff --git a/drivers/video/chipsfb.c b/drivers/video/chipsfb.c
index 777389c40988..57b9d276497e 100644
--- a/drivers/video/chipsfb.c
+++ b/drivers/video/chipsfb.c
@@ -414,7 +414,6 @@ chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
414 } 414 }
415 415
416 pci_set_drvdata(dp, p); 416 pci_set_drvdata(dp, p);
417 p->device = &dp->dev;
418 417
419 init_chips(p, addr); 418 init_chips(p, addr);
420 419
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index 8dea2bc92705..eb12182b2059 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -280,6 +280,9 @@ static int __init efifb_probe(struct platform_device *dev)
280 info->pseudo_palette = info->par; 280 info->pseudo_palette = info->par;
281 info->par = NULL; 281 info->par = NULL;
282 282
283 info->aperture_base = efifb_fix.smem_start;
284 info->aperture_size = size_total;
285
283 info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len); 286 info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
284 if (!info->screen_base) { 287 if (!info->screen_base) {
285 printk(KERN_ERR "efifb: abort, cannot ioremap video memory " 288 printk(KERN_ERR "efifb: abort, cannot ioremap video memory "
@@ -337,7 +340,7 @@ static int __init efifb_probe(struct platform_device *dev)
337 info->fbops = &efifb_ops; 340 info->fbops = &efifb_ops;
338 info->var = efifb_defined; 341 info->var = efifb_defined;
339 info->fix = efifb_fix; 342 info->fix = efifb_fix;
340 info->flags = FBINFO_FLAG_DEFAULT; 343 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE;
341 344
342 if ((err = fb_alloc_cmap(&info->cmap, 256, 0)) < 0) { 345 if ((err = fb_alloc_cmap(&info->cmap, 256, 0)) < 0) {
343 printk(KERN_ERR "efifb: cannot allocate colormap\n"); 346 printk(KERN_ERR "efifb: cannot allocate colormap\n");
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index d412a1ddc12f..f8a09bf8d0cd 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1462,6 +1462,16 @@ static int fb_check_foreignness(struct fb_info *fi)
1462 return 0; 1462 return 0;
1463} 1463}
1464 1464
1465static bool fb_do_apertures_overlap(struct fb_info *gen, struct fb_info *hw)
1466{
1467 /* is the generic aperture base the same as the HW one */
1468 if (gen->aperture_base == hw->aperture_base)
1469 return true;
1470 /* is the generic aperture base inside the hw base->hw base+size */
1471 if (gen->aperture_base > hw->aperture_base && gen->aperture_base <= hw->aperture_base + hw->aperture_size)
1472 return true;
1473 return false;
1474}
1465/** 1475/**
1466 * register_framebuffer - registers a frame buffer device 1476 * register_framebuffer - registers a frame buffer device
1467 * @fb_info: frame buffer info structure 1477 * @fb_info: frame buffer info structure
@@ -1485,6 +1495,23 @@ register_framebuffer(struct fb_info *fb_info)
1485 if (fb_check_foreignness(fb_info)) 1495 if (fb_check_foreignness(fb_info))
1486 return -ENOSYS; 1496 return -ENOSYS;
1487 1497
1498 /* check all firmware fbs and kick off if the base addr overlaps */
1499 for (i = 0 ; i < FB_MAX; i++) {
1500 if (!registered_fb[i])
1501 continue;
1502
1503 if (registered_fb[i]->flags & FBINFO_MISC_FIRMWARE) {
1504 if (fb_do_apertures_overlap(registered_fb[i], fb_info)) {
1505 printk(KERN_ERR "fb: conflicting fb hw usage "
1506 "%s vs %s - removing generic driver\n",
1507 fb_info->fix.id,
1508 registered_fb[i]->fix.id);
1509 unregister_framebuffer(registered_fb[i]);
1510 break;
1511 }
1512 }
1513 }
1514
1488 num_registered_fb++; 1515 num_registered_fb++;
1489 for (i = 0 ; i < FB_MAX; i++) 1516 for (i = 0 ; i < FB_MAX; i++)
1490 if (!registered_fb[i]) 1517 if (!registered_fb[i])
@@ -1586,6 +1613,10 @@ unregister_framebuffer(struct fb_info *fb_info)
1586 device_destroy(fb_class, MKDEV(FB_MAJOR, i)); 1613 device_destroy(fb_class, MKDEV(FB_MAJOR, i));
1587 event.info = fb_info; 1614 event.info = fb_info;
1588 fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event); 1615 fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
1616
1617 /* this may free fb info */
1618 if (fb_info->fbops->fb_destroy)
1619 fb_info->fbops->fb_destroy(fb_info);
1589done: 1620done:
1590 return ret; 1621 return ret;
1591} 1622}
diff --git a/drivers/video/igafb.c b/drivers/video/igafb.c
index 3a81060137a2..15d200109446 100644
--- a/drivers/video/igafb.c
+++ b/drivers/video/igafb.c
@@ -395,17 +395,16 @@ int __init igafb_init(void)
395 /* We leak a reference here but as it cannot be unloaded this is 395 /* We leak a reference here but as it cannot be unloaded this is
396 fine. If you write unload code remember to free it in unload */ 396 fine. If you write unload code remember to free it in unload */
397 397
398 size = sizeof(struct fb_info) + sizeof(struct iga_par) + sizeof(u32)*16; 398 size = sizeof(struct iga_par) + sizeof(u32)*16;
399 399
400 info = kzalloc(size, GFP_ATOMIC); 400 info = framebuffer_alloc(size, &pdev->dev);
401 if (!info) { 401 if (!info) {
402 printk("igafb_init: can't alloc fb_info\n"); 402 printk("igafb_init: can't alloc fb_info\n");
403 pci_dev_put(pdev); 403 pci_dev_put(pdev);
404 return -ENOMEM; 404 return -ENOMEM;
405 } 405 }
406 406
407 par = (struct iga_par *) (info + 1); 407 par = info->par;
408
409 408
410 if ((addr = pdev->resource[0].start) == 0) { 409 if ((addr = pdev->resource[0].start) == 0) {
411 printk("igafb_init: no memory start\n"); 410 printk("igafb_init: no memory start\n");
@@ -526,7 +525,6 @@ int __init igafb_init(void)
526 info->var = default_var; 525 info->var = default_var;
527 info->fix = igafb_fix; 526 info->fix = igafb_fix;
528 info->pseudo_palette = (void *)(par + 1); 527 info->pseudo_palette = (void *)(par + 1);
529 info->device = &pdev->dev;
530 528
531 if (!iga_init(info, par)) { 529 if (!iga_init(info, par)) {
532 iounmap((void *)par->io_base); 530 iounmap((void *)par->io_base);
diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/intelfb/intelfbdrv.c
index ace14fe02fc4..0cafd642fbc0 100644
--- a/drivers/video/intelfb/intelfbdrv.c
+++ b/drivers/video/intelfb/intelfbdrv.c
@@ -1365,6 +1365,11 @@ static int intelfb_set_par(struct fb_info *info)
1365 DBG_MSG("intelfb_set_par (%dx%d-%d)\n", info->var.xres, 1365 DBG_MSG("intelfb_set_par (%dx%d-%d)\n", info->var.xres,
1366 info->var.yres, info->var.bits_per_pixel); 1366 info->var.yres, info->var.bits_per_pixel);
1367 1367
1368 /*
1369 * Disable VCO prior to timing register change.
1370 */
1371 OUTREG(DPLL_A, INREG(DPLL_A) & ~DPLL_VCO_ENABLE);
1372
1368 intelfb_blank(FB_BLANK_POWERDOWN, info); 1373 intelfb_blank(FB_BLANK_POWERDOWN, info);
1369 1374
1370 if (ACCEL(dinfo, info)) 1375 if (ACCEL(dinfo, info))
diff --git a/drivers/video/logo/Makefile b/drivers/video/logo/Makefile
index b91251d1fe41..3b437813584c 100644
--- a/drivers/video/logo/Makefile
+++ b/drivers/video/logo/Makefile
@@ -37,22 +37,24 @@ extra-y += $(call logo-cfiles,_clut224,ppm)
37# Gray 256 37# Gray 256
38extra-y += $(call logo-cfiles,_gray256,pgm) 38extra-y += $(call logo-cfiles,_gray256,pgm)
39 39
40pnmtologo := scripts/pnmtologo
41
40# Create commands like "pnmtologo -t mono -n logo_mac_mono -o ..." 42# Create commands like "pnmtologo -t mono -n logo_mac_mono -o ..."
41quiet_cmd_logo = LOGO $@ 43quiet_cmd_logo = LOGO $@
42 cmd_logo = scripts/pnmtologo \ 44 cmd_logo = $(pnmtologo) \
43 -t $(patsubst $*_%,%,$(notdir $(basename $<))) \ 45 -t $(patsubst $*_%,%,$(notdir $(basename $<))) \
44 -n $(notdir $(basename $<)) -o $@ $< 46 -n $(notdir $(basename $<)) -o $@ $<
45 47
46$(obj)/%_mono.c: $(src)/%_mono.pbm FORCE 48$(obj)/%_mono.c: $(src)/%_mono.pbm $(pnmtologo) FORCE
47 $(call if_changed,logo) 49 $(call if_changed,logo)
48 50
49$(obj)/%_vga16.c: $(src)/%_vga16.ppm FORCE 51$(obj)/%_vga16.c: $(src)/%_vga16.ppm $(pnmtologo) FORCE
50 $(call if_changed,logo) 52 $(call if_changed,logo)
51 53
52$(obj)/%_clut224.c: $(src)/%_clut224.ppm FORCE 54$(obj)/%_clut224.c: $(src)/%_clut224.ppm $(pnmtologo) FORCE
53 $(call if_changed,logo) 55 $(call if_changed,logo)
54 56
55$(obj)/%_gray256.c: $(src)/%_gray256.pgm FORCE 57$(obj)/%_gray256.c: $(src)/%_gray256.pgm $(pnmtologo) FORCE
56 $(call if_changed,logo) 58 $(call if_changed,logo)
57 59
58# Files generated that shall be removed upon make clean 60# Files generated that shall be removed upon make clean
diff --git a/drivers/video/logo/logo.c b/drivers/video/logo/logo.c
index 2e85a2b52d05..ea7a8ccc830c 100644
--- a/drivers/video/logo/logo.c
+++ b/drivers/video/logo/logo.c
@@ -21,21 +21,6 @@
21#include <asm/bootinfo.h> 21#include <asm/bootinfo.h>
22#endif 22#endif
23 23
24extern const struct linux_logo logo_linux_mono;
25extern const struct linux_logo logo_linux_vga16;
26extern const struct linux_logo logo_linux_clut224;
27extern const struct linux_logo logo_blackfin_vga16;
28extern const struct linux_logo logo_blackfin_clut224;
29extern const struct linux_logo logo_dec_clut224;
30extern const struct linux_logo logo_mac_clut224;
31extern const struct linux_logo logo_parisc_clut224;
32extern const struct linux_logo logo_sgi_clut224;
33extern const struct linux_logo logo_sun_clut224;
34extern const struct linux_logo logo_superh_mono;
35extern const struct linux_logo logo_superh_vga16;
36extern const struct linux_logo logo_superh_clut224;
37extern const struct linux_logo logo_m32r_clut224;
38
39static int nologo; 24static int nologo;
40module_param(nologo, bool, 0); 25module_param(nologo, bool, 0);
41MODULE_PARM_DESC(nologo, "Disables startup logo"); 26MODULE_PARM_DESC(nologo, "Disables startup logo");
diff --git a/drivers/video/mb862xx/mb862xxfb.c b/drivers/video/mb862xx/mb862xxfb.c
index fb64234a3825..a28e3cfbbf70 100644
--- a/drivers/video/mb862xx/mb862xxfb.c
+++ b/drivers/video/mb862xx/mb862xxfb.c
@@ -19,7 +19,7 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/pci.h> 21#include <linux/pci.h>
22#if defined(CONFIG_PPC_OF) 22#if defined(CONFIG_OF)
23#include <linux/of_platform.h> 23#include <linux/of_platform.h>
24#endif 24#endif
25#include "mb862xxfb.h" 25#include "mb862xxfb.h"
diff --git a/drivers/video/modedb.c b/drivers/video/modedb.c
index 16186240c5f2..34e4e7995169 100644
--- a/drivers/video/modedb.c
+++ b/drivers/video/modedb.c
@@ -264,6 +264,14 @@ static const struct fb_videomode modedb[] = {
264 /* 1280x800, 60 Hz, 47.403 kHz hsync, WXGA 16:10 aspect ratio */ 264 /* 1280x800, 60 Hz, 47.403 kHz hsync, WXGA 16:10 aspect ratio */
265 NULL, 60, 1280, 800, 12048, 200, 64, 24, 1, 136, 3, 265 NULL, 60, 1280, 800, 12048, 200, 64, 24, 1, 136, 3,
266 0, FB_VMODE_NONINTERLACED 266 0, FB_VMODE_NONINTERLACED
267 }, {
268 /* 720x576i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */
269 NULL, 50, 720, 576, 74074, 64, 16, 39, 5, 64, 5,
270 0, FB_VMODE_INTERLACED
271 }, {
272 /* 800x520i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */
273 NULL, 50, 800, 520, 58823, 144, 64, 72, 28, 80, 5,
274 0, FB_VMODE_INTERLACED
267 }, 275 },
268}; 276};
269 277
diff --git a/drivers/video/offb.c b/drivers/video/offb.c
index e1d9eeb1aeaf..4d8c54c23dd7 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -378,7 +378,6 @@ static void __init offb_init_fb(const char *name, const char *full_name,
378 struct fb_fix_screeninfo *fix; 378 struct fb_fix_screeninfo *fix;
379 struct fb_var_screeninfo *var; 379 struct fb_var_screeninfo *var;
380 struct fb_info *info; 380 struct fb_info *info;
381 int size;
382 381
383 if (!request_mem_region(res_start, res_size, "offb")) 382 if (!request_mem_region(res_start, res_size, "offb"))
384 return; 383 return;
@@ -393,15 +392,12 @@ static void __init offb_init_fb(const char *name, const char *full_name,
393 return; 392 return;
394 } 393 }
395 394
396 size = sizeof(struct fb_info) + sizeof(u32) * 16; 395 info = framebuffer_alloc(sizeof(u32) * 16, NULL);
397
398 info = kmalloc(size, GFP_ATOMIC);
399 396
400 if (info == 0) { 397 if (info == 0) {
401 release_mem_region(res_start, res_size); 398 release_mem_region(res_start, res_size);
402 return; 399 return;
403 } 400 }
404 memset(info, 0, size);
405 401
406 fix = &info->fix; 402 fix = &info->fix;
407 var = &info->var; 403 var = &info->var;
@@ -497,7 +493,7 @@ static void __init offb_init_fb(const char *name, const char *full_name,
497 iounmap(par->cmap_adr); 493 iounmap(par->cmap_adr);
498 par->cmap_adr = NULL; 494 par->cmap_adr = NULL;
499 iounmap(info->screen_base); 495 iounmap(info->screen_base);
500 kfree(info); 496 framebuffer_release(info);
501 release_mem_region(res_start, res_size); 497 release_mem_region(res_start, res_size);
502 return; 498 return;
503 } 499 }
diff --git a/drivers/video/pm2fb.c b/drivers/video/pm2fb.c
index c6dd924976a4..36436ee6c1a4 100644
--- a/drivers/video/pm2fb.c
+++ b/drivers/video/pm2fb.c
@@ -1748,7 +1748,7 @@ static void __devexit pm2fb_remove(struct pci_dev *pdev)
1748 pci_set_drvdata(pdev, NULL); 1748 pci_set_drvdata(pdev, NULL);
1749 fb_dealloc_cmap(&info->cmap); 1749 fb_dealloc_cmap(&info->cmap);
1750 kfree(info->pixmap.addr); 1750 kfree(info->pixmap.addr);
1751 kfree(info); 1751 framebuffer_release(info);
1752} 1752}
1753 1753
1754static struct pci_device_id pm2fb_id_table[] = { 1754static struct pci_device_id pm2fb_id_table[] = {
diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
index 0726aecf3b7e..0deb0a8867b7 100644
--- a/drivers/video/s1d13xxxfb.c
+++ b/drivers/video/s1d13xxxfb.c
@@ -2,6 +2,7 @@
2 * 2 *
3 * (c) 2004 Simtec Electronics 3 * (c) 2004 Simtec Electronics
4 * (c) 2005 Thibaut VARENE <varenet@parisc-linux.org> 4 * (c) 2005 Thibaut VARENE <varenet@parisc-linux.org>
5 * (c) 2009 Kristoffer Ericson <kristoffer.ericson@gmail.com>
5 * 6 *
6 * Driver for Epson S1D13xxx series framebuffer chips 7 * Driver for Epson S1D13xxx series framebuffer chips
7 * 8 *
@@ -10,18 +11,10 @@
10 * linux/drivers/video/epson1355fb.c 11 * linux/drivers/video/epson1355fb.c
11 * linux/drivers/video/epson/s1d13xxxfb.c (2.4 driver by Epson) 12 * linux/drivers/video/epson/s1d13xxxfb.c (2.4 driver by Epson)
12 * 13 *
13 * Note, currently only tested on S1D13806 with 16bit CRT.
14 * As such, this driver might still contain some hardcoded bits relating to
15 * S1D13806.
16 * Making it work on other S1D13XXX chips should merely be a matter of adding
17 * a few switch()s, some missing glue here and there maybe, and split header
18 * files.
19 *
20 * TODO: - handle dual screen display (CRT and LCD at the same time). 14 * TODO: - handle dual screen display (CRT and LCD at the same time).
21 * - check_var(), mode change, etc. 15 * - check_var(), mode change, etc.
22 * - PM untested. 16 * - probably not SMP safe :)
23 * - Accelerated interfaces. 17 * - support all bitblt operations on all cards
24 * - Probably not SMP safe :)
25 * 18 *
26 * This file is subject to the terms and conditions of the GNU General Public 19 * This file is subject to the terms and conditions of the GNU General Public
27 * License. See the file COPYING in the main directory of this archive for 20 * License. See the file COPYING in the main directory of this archive for
@@ -31,19 +24,24 @@
31#include <linux/module.h> 24#include <linux/module.h>
32#include <linux/platform_device.h> 25#include <linux/platform_device.h>
33#include <linux/delay.h> 26#include <linux/delay.h>
34
35#include <linux/types.h> 27#include <linux/types.h>
36#include <linux/errno.h> 28#include <linux/errno.h>
37#include <linux/mm.h> 29#include <linux/mm.h>
38#include <linux/mman.h> 30#include <linux/mman.h>
39#include <linux/fb.h> 31#include <linux/fb.h>
32#include <linux/spinlock_types.h>
33#include <linux/spinlock.h>
40 34
41#include <asm/io.h> 35#include <asm/io.h>
42 36
43#include <video/s1d13xxxfb.h> 37#include <video/s1d13xxxfb.h>
44 38
45#define PFX "s1d13xxxfb: " 39#define PFX "s1d13xxxfb: "
40#define BLIT "s1d13xxxfb_bitblt: "
46 41
42/*
43 * set this to enable debugging on general functions
44 */
47#if 0 45#if 0
48#define dbg(fmt, args...) do { printk(KERN_INFO fmt, ## args); } while(0) 46#define dbg(fmt, args...) do { printk(KERN_INFO fmt, ## args); } while(0)
49#else 47#else
@@ -51,7 +49,21 @@
51#endif 49#endif
52 50
53/* 51/*
54 * List of card production ids 52 * set this to enable debugging on 2D acceleration
53 */
54#if 0
55#define dbg_blit(fmt, args...) do { printk(KERN_INFO BLIT fmt, ## args); } while (0)
56#else
57#define dbg_blit(fmt, args...) do { } while (0)
58#endif
59
60/*
61 * we make sure only one bitblt operation is running
62 */
63static DEFINE_SPINLOCK(s1d13xxxfb_bitblt_lock);
64
65/*
66 * list of card production ids
55 */ 67 */
56static const int s1d13xxxfb_prod_ids[] = { 68static const int s1d13xxxfb_prod_ids[] = {
57 S1D13505_PROD_ID, 69 S1D13505_PROD_ID,
@@ -69,7 +81,7 @@ static const char *s1d13xxxfb_prod_names[] = {
69}; 81};
70 82
71/* 83/*
72 * Here we define the default struct fb_fix_screeninfo 84 * here we define the default struct fb_fix_screeninfo
73 */ 85 */
74static struct fb_fix_screeninfo __devinitdata s1d13xxxfb_fix = { 86static struct fb_fix_screeninfo __devinitdata s1d13xxxfb_fix = {
75 .id = S1D_FBID, 87 .id = S1D_FBID,
@@ -145,8 +157,10 @@ crt_enable(struct s1d13xxxfb_par *par, int enable)
145 s1d13xxxfb_writereg(par, S1DREG_COM_DISP_MODE, mode); 157 s1d13xxxfb_writereg(par, S1DREG_COM_DISP_MODE, mode);
146} 158}
147 159
148/* framebuffer control routines */
149 160
161/*************************************************************
162 framebuffer control functions
163 *************************************************************/
150static inline void 164static inline void
151s1d13xxxfb_setup_pseudocolour(struct fb_info *info) 165s1d13xxxfb_setup_pseudocolour(struct fb_info *info)
152{ 166{
@@ -242,13 +256,13 @@ s1d13xxxfb_set_par(struct fb_info *info)
242} 256}
243 257
244/** 258/**
245 * s1d13xxxfb_setcolreg - sets a color register. 259 * s1d13xxxfb_setcolreg - sets a color register.
246 * @regno: Which register in the CLUT we are programming 260 * @regno: Which register in the CLUT we are programming
247 * @red: The red value which can be up to 16 bits wide 261 * @red: The red value which can be up to 16 bits wide
248 * @green: The green value which can be up to 16 bits wide 262 * @green: The green value which can be up to 16 bits wide
249 * @blue: The blue value which can be up to 16 bits wide. 263 * @blue: The blue value which can be up to 16 bits wide.
250 * @transp: If supported the alpha value which can be up to 16 bits wide. 264 * @transp: If supported the alpha value which can be up to 16 bits wide.
251 * @info: frame buffer info structure 265 * @info: frame buffer info structure
252 * 266 *
253 * Returns negative errno on error, or zero on success. 267 * Returns negative errno on error, or zero on success.
254 */ 268 */
@@ -351,15 +365,15 @@ s1d13xxxfb_blank(int blank_mode, struct fb_info *info)
351} 365}
352 366
353/** 367/**
354 * s1d13xxxfb_pan_display - Pans the display. 368 * s1d13xxxfb_pan_display - Pans the display.
355 * @var: frame buffer variable screen structure 369 * @var: frame buffer variable screen structure
356 * @info: frame buffer structure that represents a single frame buffer 370 * @info: frame buffer structure that represents a single frame buffer
357 * 371 *
358 * Pan (or wrap, depending on the `vmode' field) the display using the 372 * Pan (or wrap, depending on the `vmode' field) the display using the
359 * `yoffset' field of the `var' structure (`xoffset' not yet supported). 373 * `yoffset' field of the `var' structure (`xoffset' not yet supported).
360 * If the values don't fit, return -EINVAL. 374 * If the values don't fit, return -EINVAL.
361 * 375 *
362 * Returns negative errno on error, or zero on success. 376 * Returns negative errno on error, or zero on success.
363 */ 377 */
364static int 378static int
365s1d13xxxfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) 379s1d13xxxfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
@@ -390,8 +404,259 @@ s1d13xxxfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
390 return 0; 404 return 0;
391} 405}
392 406
393/* framebuffer information structures */ 407/************************************************************
408 functions to handle bitblt acceleration
409 ************************************************************/
410
411/**
412 * bltbit_wait_bitset - waits for change in register value
413 * @info : framebuffer structure
414 * @bit : value expected in register
415 * @timeout : ...
416 *
417 * waits until value changes INTO bit
418 */
419static u8
420bltbit_wait_bitset(struct fb_info *info, u8 bit, int timeout)
421{
422 while (!(s1d13xxxfb_readreg(info->par, S1DREG_BBLT_CTL0) & bit)) {
423 udelay(10);
424 if (!--timeout) {
425 dbg_blit("wait_bitset timeout\n");
426 break;
427 }
428 }
429
430 return timeout;
431}
432
433/**
434 * bltbit_wait_bitclear - waits for change in register value
435 * @info : frambuffer structure
436 * @bit : value currently in register
437 * @timeout : ...
438 *
439 * waits until value changes FROM bit
440 *
441 */
442static u8
443bltbit_wait_bitclear(struct fb_info *info, u8 bit, int timeout)
444{
445 while (s1d13xxxfb_readreg(info->par, S1DREG_BBLT_CTL0) & bit) {
446 udelay(10);
447 if (!--timeout) {
448 dbg_blit("wait_bitclear timeout\n");
449 break;
450 }
451 }
452
453 return timeout;
454}
455
456/**
457 * bltbit_fifo_status - checks the current status of the fifo
458 * @info : framebuffer structure
459 *
460 * returns number of free words in buffer
461 */
462static u8
463bltbit_fifo_status(struct fb_info *info)
464{
465 u8 status;
394 466
467 status = s1d13xxxfb_readreg(info->par, S1DREG_BBLT_CTL0);
468
469 /* its empty so room for 16 words */
470 if (status & BBLT_FIFO_EMPTY)
471 return 16;
472
473 /* its full so we dont want to add */
474 if (status & BBLT_FIFO_FULL)
475 return 0;
476
477 /* its atleast half full but we can add one atleast */
478 if (status & BBLT_FIFO_NOT_FULL)
479 return 1;
480
481 return 0;
482}
483
484/*
485 * s1d13xxxfb_bitblt_copyarea - accelerated copyarea function
486 * @info : framebuffer structure
487 * @area : fb_copyarea structure
488 *
489 * supports (atleast) S1D13506
490 *
491 */
492static void
493s1d13xxxfb_bitblt_copyarea(struct fb_info *info, const struct fb_copyarea *area)
494{
495 u32 dst, src;
496 u32 stride;
497 u16 reverse = 0;
498 u16 sx = area->sx, sy = area->sy;
499 u16 dx = area->dx, dy = area->dy;
500 u16 width = area->width, height = area->height;
501 u16 bpp;
502
503 spin_lock(&s1d13xxxfb_bitblt_lock);
504
505 /* bytes per xres line */
506 bpp = (info->var.bits_per_pixel >> 3);
507 stride = bpp * info->var.xres;
508
509 /* reverse, calculate the last pixel in rectangle */
510 if ((dy > sy) || ((dy == sy) && (dx >= sx))) {
511 dst = (((dy + height - 1) * stride) + (bpp * (dx + width - 1)));
512 src = (((sy + height - 1) * stride) + (bpp * (sx + width - 1)));
513 reverse = 1;
514 /* not reverse, calculate the first pixel in rectangle */
515 } else { /* (y * xres) + (bpp * x) */
516 dst = (dy * stride) + (bpp * dx);
517 src = (sy * stride) + (bpp * sx);
518 }
519
520 /* set source adress */
521 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_SRC_START0, (src & 0xff));
522 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_SRC_START1, (src >> 8) & 0x00ff);
523 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_SRC_START2, (src >> 16) & 0x00ff);
524
525 /* set destination adress */
526 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START0, (dst & 0xff));
527 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START1, (dst >> 8) & 0x00ff);
528 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START2, (dst >> 16) & 0x00ff);
529
530 /* program height and width */
531 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_WIDTH0, (width & 0xff) - 1);
532 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_WIDTH1, (width >> 8));
533
534 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_HEIGHT0, (height & 0xff) - 1);
535 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_HEIGHT1, (height >> 8));
536
537 /* negative direction ROP */
538 if (reverse == 1) {
539 dbg_blit("(copyarea) negative rop\n");
540 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_OP, 0x03);
541 } else /* positive direction ROP */ {
542 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_OP, 0x02);
543 dbg_blit("(copyarea) positive rop\n");
544 }
545
546 /* set for rectangel mode and not linear */
547 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL0, 0x0);
548
549 /* setup the bpp 1 = 16bpp, 0 = 8bpp*/
550 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL1, (bpp >> 1));
551
552 /* set words per xres */
553 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_MEM_OFF0, (stride >> 1) & 0xff);
554 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_MEM_OFF1, (stride >> 9));
555
556 dbg_blit("(copyarea) dx=%d, dy=%d\n", dx, dy);
557 dbg_blit("(copyarea) sx=%d, sy=%d\n", sx, sy);
558 dbg_blit("(copyarea) width=%d, height=%d\n", width - 1, height - 1);
559 dbg_blit("(copyarea) stride=%d\n", stride);
560 dbg_blit("(copyarea) bpp=%d=0x0%d, mem_offset1=%d, mem_offset2=%d\n", bpp, (bpp >> 1),
561 (stride >> 1) & 0xff, stride >> 9);
562
563 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CC_EXP, 0x0c);
564
565 /* initialize the engine */
566 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL0, 0x80);
567
568 /* wait to complete */
569 bltbit_wait_bitclear(info, 0x80, 8000);
570
571 spin_unlock(&s1d13xxxfb_bitblt_lock);
572}
573
574/**
575 *
576 * s1d13xxxfb_bitblt_solidfill - accelerated solidfill function
577 * @info : framebuffer structure
578 * @rect : fb_fillrect structure
579 *
580 * supports (atleast 13506)
581 *
582 **/
583static void
584s1d13xxxfb_bitblt_solidfill(struct fb_info *info, const struct fb_fillrect *rect)
585{
586 u32 screen_stride, dest;
587 u32 fg;
588 u16 bpp = (info->var.bits_per_pixel >> 3);
589
590 /* grab spinlock */
591 spin_lock(&s1d13xxxfb_bitblt_lock);
592
593 /* bytes per x width */
594 screen_stride = (bpp * info->var.xres);
595
596 /* bytes to starting point */
597 dest = ((rect->dy * screen_stride) + (bpp * rect->dx));
598
599 dbg_blit("(solidfill) dx=%d, dy=%d, stride=%d, dest=%d\n"
600 "(solidfill) : rect_width=%d, rect_height=%d\n",
601 rect->dx, rect->dy, screen_stride, dest,
602 rect->width - 1, rect->height - 1);
603
604 dbg_blit("(solidfill) : xres=%d, yres=%d, bpp=%d\n",
605 info->var.xres, info->var.yres,
606 info->var.bits_per_pixel);
607 dbg_blit("(solidfill) : rop=%d\n", rect->rop);
608
609 /* We split the destination into the three registers */
610 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START0, (dest & 0x00ff));
611 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START1, ((dest >> 8) & 0x00ff));
612 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START2, ((dest >> 16) & 0x00ff));
613
614 /* give information regarding rectangel width */
615 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_WIDTH0, ((rect->width) & 0x00ff) - 1);
616 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_WIDTH1, (rect->width >> 8));
617
618 /* give information regarding rectangel height */
619 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_HEIGHT0, ((rect->height) & 0x00ff) - 1);
620 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_HEIGHT1, (rect->height >> 8));
621
622 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
623 info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
624 fg = ((u32 *)info->pseudo_palette)[rect->color];
625 dbg_blit("(solidfill) truecolor/directcolor\n");
626 dbg_blit("(solidfill) pseudo_palette[%d] = %d\n", rect->color, fg);
627 } else {
628 fg = rect->color;
629 dbg_blit("(solidfill) color = %d\n", rect->color);
630 }
631
632 /* set foreground color */
633 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_FGC0, (fg & 0xff));
634 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_FGC1, (fg >> 8) & 0xff);
635
636 /* set rectangual region of memory (rectangle and not linear) */
637 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL0, 0x0);
638
639 /* set operation mode SOLID_FILL */
640 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_OP, BBLT_SOLID_FILL);
641
642 /* set bits per pixel (1 = 16bpp, 0 = 8bpp) */
643 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL1, (info->var.bits_per_pixel >> 4));
644
645 /* set the memory offset for the bblt in word sizes */
646 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_MEM_OFF0, (screen_stride >> 1) & 0x00ff);
647 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_MEM_OFF1, (screen_stride >> 9));
648
649 /* and away we go.... */
650 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL0, 0x80);
651
652 /* wait until its done */
653 bltbit_wait_bitclear(info, 0x80, 8000);
654
655 /* let others play */
656 spin_unlock(&s1d13xxxfb_bitblt_lock);
657}
658
659/* framebuffer information structures */
395static struct fb_ops s1d13xxxfb_fbops = { 660static struct fb_ops s1d13xxxfb_fbops = {
396 .owner = THIS_MODULE, 661 .owner = THIS_MODULE,
397 .fb_set_par = s1d13xxxfb_set_par, 662 .fb_set_par = s1d13xxxfb_set_par,
@@ -400,7 +665,7 @@ static struct fb_ops s1d13xxxfb_fbops = {
400 665
401 .fb_pan_display = s1d13xxxfb_pan_display, 666 .fb_pan_display = s1d13xxxfb_pan_display,
402 667
403 /* to be replaced by any acceleration we can */ 668 /* gets replaced at chip detection time */
404 .fb_fillrect = cfb_fillrect, 669 .fb_fillrect = cfb_fillrect,
405 .fb_copyarea = cfb_copyarea, 670 .fb_copyarea = cfb_copyarea,
406 .fb_imageblit = cfb_imageblit, 671 .fb_imageblit = cfb_imageblit,
@@ -412,9 +677,9 @@ static int s1d13xxxfb_width_tab[2][4] __devinitdata = {
412}; 677};
413 678
414/** 679/**
415 * s1d13xxxfb_fetch_hw_state - Configure the framebuffer according to 680 * s1d13xxxfb_fetch_hw_state - Configure the framebuffer according to
416 * hardware setup. 681 * hardware setup.
417 * @info: frame buffer structure 682 * @info: frame buffer structure
418 * 683 *
419 * We setup the framebuffer structures according to the current 684 * We setup the framebuffer structures according to the current
420 * hardware setup. On some machines, the BIOS will have filled 685 * hardware setup. On some machines, the BIOS will have filled
@@ -569,7 +834,6 @@ s1d13xxxfb_probe(struct platform_device *pdev)
569 if (pdata && pdata->platform_init_video) 834 if (pdata && pdata->platform_init_video)
570 pdata->platform_init_video(); 835 pdata->platform_init_video();
571 836
572
573 if (pdev->num_resources != 2) { 837 if (pdev->num_resources != 2) {
574 dev_err(&pdev->dev, "invalid num_resources: %i\n", 838 dev_err(&pdev->dev, "invalid num_resources: %i\n",
575 pdev->num_resources); 839 pdev->num_resources);
@@ -655,16 +919,27 @@ s1d13xxxfb_probe(struct platform_device *pdev)
655 919
656 info->fix = s1d13xxxfb_fix; 920 info->fix = s1d13xxxfb_fix;
657 info->fix.mmio_start = pdev->resource[1].start; 921 info->fix.mmio_start = pdev->resource[1].start;
658 info->fix.mmio_len = pdev->resource[1].end - pdev->resource[1].start +1; 922 info->fix.mmio_len = pdev->resource[1].end - pdev->resource[1].start + 1;
659 info->fix.smem_start = pdev->resource[0].start; 923 info->fix.smem_start = pdev->resource[0].start;
660 info->fix.smem_len = pdev->resource[0].end - pdev->resource[0].start +1; 924 info->fix.smem_len = pdev->resource[0].end - pdev->resource[0].start + 1;
661 925
662 printk(KERN_INFO PFX "regs mapped at 0x%p, fb %d KiB mapped at 0x%p\n", 926 printk(KERN_INFO PFX "regs mapped at 0x%p, fb %d KiB mapped at 0x%p\n",
663 default_par->regs, info->fix.smem_len / 1024, info->screen_base); 927 default_par->regs, info->fix.smem_len / 1024, info->screen_base);
664 928
665 info->par = default_par; 929 info->par = default_par;
666 info->fbops = &s1d13xxxfb_fbops;
667 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN; 930 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
931 info->fbops = &s1d13xxxfb_fbops;
932
933 switch(prod_id) {
934 case S1D13506_PROD_ID: /* activate acceleration */
935 s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
936 s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
937 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
938 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
939 break;
940 default:
941 break;
942 }
668 943
669 /* perform "manual" chip initialization, if needed */ 944 /* perform "manual" chip initialization, if needed */
670 if (pdata && pdata->initregs) 945 if (pdata && pdata->initregs)
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index d3a568e6b169..43680e545427 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -358,9 +358,16 @@ static int s3c_fb_set_par(struct fb_info *info)
358 writel(data, regs + VIDOSD_B(win_no)); 358 writel(data, regs + VIDOSD_B(win_no));
359 359
360 data = var->xres * var->yres; 360 data = var->xres * var->yres;
361
362 u32 osdc_data = 0;
363
364 osdc_data = VIDISD14C_ALPHA1_R(0xf) |
365 VIDISD14C_ALPHA1_G(0xf) |
366 VIDISD14C_ALPHA1_B(0xf);
367
361 if (s3c_fb_has_osd_d(win_no)) { 368 if (s3c_fb_has_osd_d(win_no)) {
362 writel(data, regs + VIDOSD_D(win_no)); 369 writel(data, regs + VIDOSD_D(win_no));
363 writel(0, regs + VIDOSD_C(win_no)); 370 writel(osdc_data, regs + VIDOSD_C(win_no));
364 } else 371 } else
365 writel(data, regs + VIDOSD_C(win_no)); 372 writel(data, regs + VIDOSD_C(win_no));
366 373
@@ -409,8 +416,12 @@ static int s3c_fb_set_par(struct fb_info *info)
409 data |= WINCON1_BPPMODE_19BPP_A1666; 416 data |= WINCON1_BPPMODE_19BPP_A1666;
410 else 417 else
411 data |= WINCON1_BPPMODE_18BPP_666; 418 data |= WINCON1_BPPMODE_18BPP_666;
412 } else if (var->transp.length != 0) 419 } else if (var->transp.length == 1)
413 data |= WINCON1_BPPMODE_25BPP_A1888; 420 data |= WINCON1_BPPMODE_25BPP_A1888
421 | WINCON1_BLD_PIX;
422 else if (var->transp.length == 4)
423 data |= WINCON1_BPPMODE_28BPP_A4888
424 | WINCON1_BLD_PIX | WINCON1_ALPHA_SEL;
414 else 425 else
415 data |= WINCON0_BPPMODE_24BPP_888; 426 data |= WINCON0_BPPMODE_24BPP_888;
416 427
@@ -418,6 +429,20 @@ static int s3c_fb_set_par(struct fb_info *info)
418 break; 429 break;
419 } 430 }
420 431
432 /* It has no color key control register for window0 */
433 if (win_no > 0) {
434 u32 keycon0_data = 0, keycon1_data = 0;
435
436 keycon0_data = ~(WxKEYCON0_KEYBL_EN |
437 WxKEYCON0_KEYEN_F |
438 WxKEYCON0_DIRCON) | WxKEYCON0_COMPKEY(0);
439
440 keycon1_data = WxKEYCON1_COLVAL(0xffffff);
441
442 writel(keycon0_data, regs + WxKEYCONy(win_no-1, 0));
443 writel(keycon1_data, regs + WxKEYCONy(win_no-1, 1));
444 }
445
421 writel(data, regs + WINCON(win_no)); 446 writel(data, regs + WINCON(win_no));
422 writel(0x0, regs + WINxMAP(win_no)); 447 writel(0x0, regs + WINxMAP(win_no));
423 448
@@ -700,9 +725,12 @@ static void s3c_fb_free_memory(struct s3c_fb *sfb, struct s3c_fb_win *win)
700 */ 725 */
701static void s3c_fb_release_win(struct s3c_fb *sfb, struct s3c_fb_win *win) 726static void s3c_fb_release_win(struct s3c_fb *sfb, struct s3c_fb_win *win)
702{ 727{
703 fb_dealloc_cmap(&win->fbinfo->cmap); 728 if (win->fbinfo) {
704 unregister_framebuffer(win->fbinfo); 729 unregister_framebuffer(win->fbinfo);
705 s3c_fb_free_memory(sfb, win); 730 fb_dealloc_cmap(&win->fbinfo->cmap);
731 s3c_fb_free_memory(sfb, win);
732 framebuffer_release(win->fbinfo);
733 }
706} 734}
707 735
708/** 736/**
@@ -753,7 +781,7 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
753 ret = s3c_fb_alloc_memory(sfb, win); 781 ret = s3c_fb_alloc_memory(sfb, win);
754 if (ret) { 782 if (ret) {
755 dev_err(sfb->dev, "failed to allocate display memory\n"); 783 dev_err(sfb->dev, "failed to allocate display memory\n");
756 goto err_framebuffer; 784 return ret;
757 } 785 }
758 786
759 /* setup the r/b/g positions for the window's palette */ 787 /* setup the r/b/g positions for the window's palette */
@@ -776,7 +804,7 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
776 ret = s3c_fb_check_var(&fbinfo->var, fbinfo); 804 ret = s3c_fb_check_var(&fbinfo->var, fbinfo);
777 if (ret < 0) { 805 if (ret < 0) {
778 dev_err(sfb->dev, "check_var failed on initial video params\n"); 806 dev_err(sfb->dev, "check_var failed on initial video params\n");
779 goto err_alloc_mem; 807 return ret;
780 } 808 }
781 809
782 /* create initial colour map */ 810 /* create initial colour map */
@@ -796,20 +824,13 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
796 ret = register_framebuffer(fbinfo); 824 ret = register_framebuffer(fbinfo);
797 if (ret < 0) { 825 if (ret < 0) {
798 dev_err(sfb->dev, "failed to register framebuffer\n"); 826 dev_err(sfb->dev, "failed to register framebuffer\n");
799 goto err_alloc_mem; 827 return ret;
800 } 828 }
801 829
802 *res = win; 830 *res = win;
803 dev_info(sfb->dev, "window %d: fb %s\n", win_no, fbinfo->fix.id); 831 dev_info(sfb->dev, "window %d: fb %s\n", win_no, fbinfo->fix.id);
804 832
805 return 0; 833 return 0;
806
807err_alloc_mem:
808 s3c_fb_free_memory(sfb, win);
809
810err_framebuffer:
811 unregister_framebuffer(fbinfo);
812 return ret;
813} 834}
814 835
815/** 836/**
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index b0b4513ba537..7da0027e2409 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -24,6 +24,7 @@
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/clk.h> 26#include <linux/clk.h>
27#include <linux/cpufreq.h>
27 28
28#include <asm/io.h> 29#include <asm/io.h>
29#include <asm/div64.h> 30#include <asm/div64.h>
@@ -89,7 +90,7 @@ static void s3c2410fb_set_lcdaddr(struct fb_info *info)
89static unsigned int s3c2410fb_calc_pixclk(struct s3c2410fb_info *fbi, 90static unsigned int s3c2410fb_calc_pixclk(struct s3c2410fb_info *fbi,
90 unsigned long pixclk) 91 unsigned long pixclk)
91{ 92{
92 unsigned long clk = clk_get_rate(fbi->clk); 93 unsigned long clk = fbi->clk_rate;
93 unsigned long long div; 94 unsigned long long div;
94 95
95 /* pixclk is in picoseconds, our clock is in Hz 96 /* pixclk is in picoseconds, our clock is in Hz
@@ -758,6 +759,57 @@ static irqreturn_t s3c2410fb_irq(int irq, void *dev_id)
758 return IRQ_HANDLED; 759 return IRQ_HANDLED;
759} 760}
760 761
762#ifdef CONFIG_CPU_FREQ
763
764static int s3c2410fb_cpufreq_transition(struct notifier_block *nb,
765 unsigned long val, void *data)
766{
767 struct cpufreq_freqs *freqs = data;
768 struct s3c2410fb_info *info;
769 struct fb_info *fbinfo;
770 long delta_f;
771
772 info = container_of(nb, struct s3c2410fb_info, freq_transition);
773 fbinfo = platform_get_drvdata(to_platform_device(info->dev));
774
775 /* work out change, <0 for speed-up */
776 delta_f = info->clk_rate - clk_get_rate(info->clk);
777
778 if ((val == CPUFREQ_POSTCHANGE && delta_f > 0) ||
779 (val == CPUFREQ_PRECHANGE && delta_f < 0)) {
780 info->clk_rate = clk_get_rate(info->clk);
781 s3c2410fb_activate_var(fbinfo);
782 }
783
784 return 0;
785}
786
787static inline int s3c2410fb_cpufreq_register(struct s3c2410fb_info *info)
788{
789 info->freq_transition.notifier_call = s3c2410fb_cpufreq_transition;
790
791 return cpufreq_register_notifier(&info->freq_transition,
792 CPUFREQ_TRANSITION_NOTIFIER);
793}
794
795static inline void s3c2410fb_cpufreq_deregister(struct s3c2410fb_info *info)
796{
797 cpufreq_unregister_notifier(&info->freq_transition,
798 CPUFREQ_TRANSITION_NOTIFIER);
799}
800
801#else
802static inline int s3c2410fb_cpufreq_register(struct s3c2410fb_info *info)
803{
804 return 0;
805}
806
807static inline void s3c2410fb_cpufreq_deregister(struct s3c2410fb_info *info)
808{
809}
810#endif
811
812
761static char driver_name[] = "s3c2410fb"; 813static char driver_name[] = "s3c2410fb";
762 814
763static int __init s3c24xxfb_probe(struct platform_device *pdev, 815static int __init s3c24xxfb_probe(struct platform_device *pdev,
@@ -875,6 +927,8 @@ static int __init s3c24xxfb_probe(struct platform_device *pdev,
875 927
876 msleep(1); 928 msleep(1);
877 929
930 info->clk_rate = clk_get_rate(info->clk);
931
878 /* find maximum required memory size for display */ 932 /* find maximum required memory size for display */
879 for (i = 0; i < mach_info->num_displays; i++) { 933 for (i = 0; i < mach_info->num_displays; i++) {
880 unsigned long smem_len = mach_info->displays[i].xres; 934 unsigned long smem_len = mach_info->displays[i].xres;
@@ -904,11 +958,17 @@ static int __init s3c24xxfb_probe(struct platform_device *pdev,
904 958
905 s3c2410fb_check_var(&fbinfo->var, fbinfo); 959 s3c2410fb_check_var(&fbinfo->var, fbinfo);
906 960
961 ret = s3c2410fb_cpufreq_register(info);
962 if (ret < 0) {
963 dev_err(&pdev->dev, "Failed to register cpufreq\n");
964 goto free_video_memory;
965 }
966
907 ret = register_framebuffer(fbinfo); 967 ret = register_framebuffer(fbinfo);
908 if (ret < 0) { 968 if (ret < 0) {
909 printk(KERN_ERR "Failed to register framebuffer device: %d\n", 969 printk(KERN_ERR "Failed to register framebuffer device: %d\n",
910 ret); 970 ret);
911 goto free_video_memory; 971 goto free_cpufreq;
912 } 972 }
913 973
914 /* create device files */ 974 /* create device files */
@@ -922,6 +982,8 @@ static int __init s3c24xxfb_probe(struct platform_device *pdev,
922 982
923 return 0; 983 return 0;
924 984
985 free_cpufreq:
986 s3c2410fb_cpufreq_deregister(info);
925free_video_memory: 987free_video_memory:
926 s3c2410fb_unmap_video_memory(fbinfo); 988 s3c2410fb_unmap_video_memory(fbinfo);
927release_clock: 989release_clock:
@@ -961,6 +1023,7 @@ static int s3c2410fb_remove(struct platform_device *pdev)
961 int irq; 1023 int irq;
962 1024
963 unregister_framebuffer(fbinfo); 1025 unregister_framebuffer(fbinfo);
1026 s3c2410fb_cpufreq_deregister(info);
964 1027
965 s3c2410fb_lcd_enable(info, 0); 1028 s3c2410fb_lcd_enable(info, 0);
966 msleep(1); 1029 msleep(1);
diff --git a/drivers/video/s3c2410fb.h b/drivers/video/s3c2410fb.h
index 9a6ba3e9d1b8..47a17bd23011 100644
--- a/drivers/video/s3c2410fb.h
+++ b/drivers/video/s3c2410fb.h
@@ -29,8 +29,13 @@ struct s3c2410fb_info {
29 enum s3c_drv_type drv_type; 29 enum s3c_drv_type drv_type;
30 struct s3c2410fb_hw regs; 30 struct s3c2410fb_hw regs;
31 31
32 unsigned long clk_rate;
32 unsigned int palette_ready; 33 unsigned int palette_ready;
33 34
35#ifdef CONFIG_CPU_FREQ
36 struct notifier_block freq_transition;
37#endif
38
34 /* keep these registers in case we need to re-write palette */ 39 /* keep these registers in case we need to re-write palette */
35 u32 palette_buffer[256]; 40 u32 palette_buffer[256];
36 u32 pseudo_pal[16]; 41 u32 pseudo_pal[16];
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 7e17ee95a97a..7072d19080d5 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -5928,7 +5928,7 @@ sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
5928 if(pci_enable_device(pdev)) { 5928 if(pci_enable_device(pdev)) {
5929 if(ivideo->nbridge) pci_dev_put(ivideo->nbridge); 5929 if(ivideo->nbridge) pci_dev_put(ivideo->nbridge);
5930 pci_set_drvdata(pdev, NULL); 5930 pci_set_drvdata(pdev, NULL);
5931 kfree(sis_fb_info); 5931 framebuffer_release(sis_fb_info);
5932 return -EIO; 5932 return -EIO;
5933 } 5933 }
5934 } 5934 }
@@ -6134,7 +6134,7 @@ error_3: vfree(ivideo->bios_abase);
6134 pci_set_drvdata(pdev, NULL); 6134 pci_set_drvdata(pdev, NULL);
6135 if(!ivideo->sisvga_enabled) 6135 if(!ivideo->sisvga_enabled)
6136 pci_disable_device(pdev); 6136 pci_disable_device(pdev);
6137 kfree(sis_fb_info); 6137 framebuffer_release(sis_fb_info);
6138 return ret; 6138 return ret;
6139 } 6139 }
6140 6140
diff --git a/drivers/video/stifb.c b/drivers/video/stifb.c
index eabaad765aeb..eec9dcb7f599 100644
--- a/drivers/video/stifb.c
+++ b/drivers/video/stifb.c
@@ -1380,7 +1380,7 @@ stifb_cleanup(void)
1380 if (info->screen_base) 1380 if (info->screen_base)
1381 iounmap(info->screen_base); 1381 iounmap(info->screen_base);
1382 fb_dealloc_cmap(&info->cmap); 1382 fb_dealloc_cmap(&info->cmap);
1383 kfree(info); 1383 framebuffer_release(info);
1384 } 1384 }
1385 sti->info = NULL; 1385 sti->info = NULL;
1386 } 1386 }
diff --git a/drivers/video/tcx.c b/drivers/video/tcx.c
index 643afbfe8277..45b883598bf0 100644
--- a/drivers/video/tcx.c
+++ b/drivers/video/tcx.c
@@ -116,17 +116,16 @@ struct tcx_par {
116 u32 flags; 116 u32 flags;
117#define TCX_FLAG_BLANKED 0x00000001 117#define TCX_FLAG_BLANKED 0x00000001
118 118
119 unsigned long physbase;
120 unsigned long which_io; 119 unsigned long which_io;
121 unsigned long fbsize;
122 120
123 struct sbus_mmap_map mmap_map[TCX_MMAP_ENTRIES]; 121 struct sbus_mmap_map mmap_map[TCX_MMAP_ENTRIES];
124 int lowdepth; 122 int lowdepth;
125}; 123};
126 124
127/* Reset control plane so that WID is 8-bit plane. */ 125/* Reset control plane so that WID is 8-bit plane. */
128static void __tcx_set_control_plane(struct tcx_par *par) 126static void __tcx_set_control_plane(struct fb_info *info)
129{ 127{
128 struct tcx_par *par = info->par;
130 u32 __iomem *p, *pend; 129 u32 __iomem *p, *pend;
131 130
132 if (par->lowdepth) 131 if (par->lowdepth)
@@ -135,7 +134,7 @@ static void __tcx_set_control_plane(struct tcx_par *par)
135 p = par->cplane; 134 p = par->cplane;
136 if (p == NULL) 135 if (p == NULL)
137 return; 136 return;
138 for (pend = p + par->fbsize; p < pend; p++) { 137 for (pend = p + info->fix.smem_len; p < pend; p++) {
139 u32 tmp = sbus_readl(p); 138 u32 tmp = sbus_readl(p);
140 139
141 tmp &= 0xffffff; 140 tmp &= 0xffffff;
@@ -149,7 +148,7 @@ static void tcx_reset(struct fb_info *info)
149 unsigned long flags; 148 unsigned long flags;
150 149
151 spin_lock_irqsave(&par->lock, flags); 150 spin_lock_irqsave(&par->lock, flags);
152 __tcx_set_control_plane(par); 151 __tcx_set_control_plane(info);
153 spin_unlock_irqrestore(&par->lock, flags); 152 spin_unlock_irqrestore(&par->lock, flags);
154} 153}
155 154
@@ -304,7 +303,7 @@ static int tcx_mmap(struct fb_info *info, struct vm_area_struct *vma)
304 struct tcx_par *par = (struct tcx_par *)info->par; 303 struct tcx_par *par = (struct tcx_par *)info->par;
305 304
306 return sbusfb_mmap_helper(par->mmap_map, 305 return sbusfb_mmap_helper(par->mmap_map,
307 par->physbase, par->fbsize, 306 info->fix.smem_start, info->fix.smem_len,
308 par->which_io, vma); 307 par->which_io, vma);
309} 308}
310 309
@@ -316,7 +315,7 @@ static int tcx_ioctl(struct fb_info *info, unsigned int cmd,
316 return sbusfb_ioctl_helper(cmd, arg, info, 315 return sbusfb_ioctl_helper(cmd, arg, info,
317 FBTYPE_TCXCOLOR, 316 FBTYPE_TCXCOLOR,
318 (par->lowdepth ? 8 : 24), 317 (par->lowdepth ? 8 : 24),
319 par->fbsize); 318 info->fix.smem_len);
320} 319}
321 320
322/* 321/*
@@ -358,10 +357,10 @@ static void tcx_unmap_regs(struct of_device *op, struct fb_info *info,
358 par->bt, sizeof(struct bt_regs)); 357 par->bt, sizeof(struct bt_regs));
359 if (par->cplane) 358 if (par->cplane)
360 of_iounmap(&op->resource[4], 359 of_iounmap(&op->resource[4],
361 par->cplane, par->fbsize * sizeof(u32)); 360 par->cplane, info->fix.smem_len * sizeof(u32));
362 if (info->screen_base) 361 if (info->screen_base)
363 of_iounmap(&op->resource[0], 362 of_iounmap(&op->resource[0],
364 info->screen_base, par->fbsize); 363 info->screen_base, info->fix.smem_len);
365} 364}
366 365
367static int __devinit tcx_probe(struct of_device *op, 366static int __devinit tcx_probe(struct of_device *op,
@@ -391,7 +390,7 @@ static int __devinit tcx_probe(struct of_device *op,
391 390
392 linebytes = of_getintprop_default(dp, "linebytes", 391 linebytes = of_getintprop_default(dp, "linebytes",
393 info->var.xres); 392 info->var.xres);
394 par->fbsize = PAGE_ALIGN(linebytes * info->var.yres); 393 info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres);
395 394
396 par->tec = of_ioremap(&op->resource[7], 0, 395 par->tec = of_ioremap(&op->resource[7], 0,
397 sizeof(struct tcx_tec), "tcx tec"); 396 sizeof(struct tcx_tec), "tcx tec");
@@ -400,7 +399,7 @@ static int __devinit tcx_probe(struct of_device *op,
400 par->bt = of_ioremap(&op->resource[8], 0, 399 par->bt = of_ioremap(&op->resource[8], 0,
401 sizeof(struct bt_regs), "tcx dac"); 400 sizeof(struct bt_regs), "tcx dac");
402 info->screen_base = of_ioremap(&op->resource[0], 0, 401 info->screen_base = of_ioremap(&op->resource[0], 0,
403 par->fbsize, "tcx ram"); 402 info->fix.smem_len, "tcx ram");
404 if (!par->tec || !par->thc || 403 if (!par->tec || !par->thc ||
405 !par->bt || !info->screen_base) 404 !par->bt || !info->screen_base)
406 goto out_unmap_regs; 405 goto out_unmap_regs;
@@ -408,7 +407,7 @@ static int __devinit tcx_probe(struct of_device *op,
408 memcpy(&par->mmap_map, &__tcx_mmap_map, sizeof(par->mmap_map)); 407 memcpy(&par->mmap_map, &__tcx_mmap_map, sizeof(par->mmap_map));
409 if (!par->lowdepth) { 408 if (!par->lowdepth) {
410 par->cplane = of_ioremap(&op->resource[4], 0, 409 par->cplane = of_ioremap(&op->resource[4], 0,
411 par->fbsize * sizeof(u32), 410 info->fix.smem_len * sizeof(u32),
412 "tcx cplane"); 411 "tcx cplane");
413 if (!par->cplane) 412 if (!par->cplane)
414 goto out_unmap_regs; 413 goto out_unmap_regs;
@@ -419,7 +418,7 @@ static int __devinit tcx_probe(struct of_device *op,
419 par->mmap_map[6].size = SBUS_MMAP_EMPTY; 418 par->mmap_map[6].size = SBUS_MMAP_EMPTY;
420 } 419 }
421 420
422 par->physbase = op->resource[0].start; 421 info->fix.smem_start = op->resource[0].start;
423 par->which_io = op->resource[0].flags & IORESOURCE_BITS; 422 par->which_io = op->resource[0].flags & IORESOURCE_BITS;
424 423
425 for (i = 0; i < TCX_MMAP_ENTRIES; i++) { 424 for (i = 0; i < TCX_MMAP_ENTRIES; i++) {
@@ -473,7 +472,7 @@ static int __devinit tcx_probe(struct of_device *op,
473 printk(KERN_INFO "%s: TCX at %lx:%lx, %s\n", 472 printk(KERN_INFO "%s: TCX at %lx:%lx, %s\n",
474 dp->full_name, 473 dp->full_name,
475 par->which_io, 474 par->which_io,
476 par->physbase, 475 info->fix.smem_start,
477 par->lowdepth ? "8-bit only" : "24-bit depth"); 476 par->lowdepth ? "8-bit only" : "24-bit depth");
478 477
479 return 0; 478 return 0;
diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
index d6856f43d241..bd37ee1f6a25 100644
--- a/drivers/video/vesafb.c
+++ b/drivers/video/vesafb.c
@@ -174,8 +174,17 @@ static int vesafb_setcolreg(unsigned regno, unsigned red, unsigned green,
174 return err; 174 return err;
175} 175}
176 176
177static void vesafb_destroy(struct fb_info *info)
178{
179 if (info->screen_base)
180 iounmap(info->screen_base);
181 release_mem_region(info->aperture_base, info->aperture_size);
182 framebuffer_release(info);
183}
184
177static struct fb_ops vesafb_ops = { 185static struct fb_ops vesafb_ops = {
178 .owner = THIS_MODULE, 186 .owner = THIS_MODULE,
187 .fb_destroy = vesafb_destroy,
179 .fb_setcolreg = vesafb_setcolreg, 188 .fb_setcolreg = vesafb_setcolreg,
180 .fb_pan_display = vesafb_pan_display, 189 .fb_pan_display = vesafb_pan_display,
181 .fb_fillrect = cfb_fillrect, 190 .fb_fillrect = cfb_fillrect,
@@ -286,6 +295,10 @@ static int __init vesafb_probe(struct platform_device *dev)
286 info->pseudo_palette = info->par; 295 info->pseudo_palette = info->par;
287 info->par = NULL; 296 info->par = NULL;
288 297
298 /* set vesafb aperture size for generic probing */
299 info->aperture_base = screen_info.lfb_base;
300 info->aperture_size = size_total;
301
289 info->screen_base = ioremap(vesafb_fix.smem_start, vesafb_fix.smem_len); 302 info->screen_base = ioremap(vesafb_fix.smem_start, vesafb_fix.smem_len);
290 if (!info->screen_base) { 303 if (!info->screen_base) {
291 printk(KERN_ERR 304 printk(KERN_ERR
@@ -437,7 +450,7 @@ static int __init vesafb_probe(struct platform_device *dev)
437 info->fbops = &vesafb_ops; 450 info->fbops = &vesafb_ops;
438 info->var = vesafb_defined; 451 info->var = vesafb_defined;
439 info->fix = vesafb_fix; 452 info->fix = vesafb_fix;
440 info->flags = FBINFO_FLAG_DEFAULT | 453 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
441 (ypan ? FBINFO_HWACCEL_YPAN : 0); 454 (ypan ? FBINFO_HWACCEL_YPAN : 0);
442 455
443 if (!ypan) 456 if (!ypan)
diff --git a/drivers/vlynq/Kconfig b/drivers/vlynq/Kconfig
new file mode 100644
index 000000000000..f6542211db48
--- /dev/null
+++ b/drivers/vlynq/Kconfig
@@ -0,0 +1,20 @@
1menu "TI VLYNQ"
2
3config VLYNQ
4 bool "TI VLYNQ bus support"
5 depends on AR7 && EXPERIMENTAL
6 help
7 Support for Texas Instruments(R) VLYNQ bus.
8 The VLYNQ bus is a high-speed, serial and packetized
9 data bus which allows external peripherals of a SoC
10 to appear into the system's main memory.
11
12 If unsure, say N
13
14config VLYNQ_DEBUG
15 bool "VLYNQ bus debug"
16 depends on VLYNQ && KERNEL_DEBUG
17 help
18 Turn on VLYNQ bus debugging.
19
20endmenu
diff --git a/drivers/vlynq/Makefile b/drivers/vlynq/Makefile
new file mode 100644
index 000000000000..b3f61149b599
--- /dev/null
+++ b/drivers/vlynq/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for kernel vlynq drivers
3#
4
5obj-$(CONFIG_VLYNQ) += vlynq.o
diff --git a/drivers/vlynq/vlynq.c b/drivers/vlynq/vlynq.c
new file mode 100644
index 000000000000..7335433b067b
--- /dev/null
+++ b/drivers/vlynq/vlynq.c
@@ -0,0 +1,814 @@
1/*
2 * Copyright (C) 2006, 2007 Eugene Konev <ejka@openwrt.org>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 *
18 * Parts of the VLYNQ specification can be found here:
19 * http://www.ti.com/litv/pdf/sprue36a
20 */
21
22#include <linux/init.h>
23#include <linux/types.h>
24#include <linux/kernel.h>
25#include <linux/string.h>
26#include <linux/device.h>
27#include <linux/module.h>
28#include <linux/errno.h>
29#include <linux/platform_device.h>
30#include <linux/interrupt.h>
31#include <linux/device.h>
32#include <linux/delay.h>
33#include <linux/io.h>
34
35#include <linux/vlynq.h>
36
37#define VLYNQ_CTRL_PM_ENABLE 0x80000000
38#define VLYNQ_CTRL_CLOCK_INT 0x00008000
39#define VLYNQ_CTRL_CLOCK_DIV(x) (((x) & 7) << 16)
40#define VLYNQ_CTRL_INT_LOCAL 0x00004000
41#define VLYNQ_CTRL_INT_ENABLE 0x00002000
42#define VLYNQ_CTRL_INT_VECTOR(x) (((x) & 0x1f) << 8)
43#define VLYNQ_CTRL_INT2CFG 0x00000080
44#define VLYNQ_CTRL_RESET 0x00000001
45
46#define VLYNQ_CTRL_CLOCK_MASK (0x7 << 16)
47
48#define VLYNQ_INT_OFFSET 0x00000014
49#define VLYNQ_REMOTE_OFFSET 0x00000080
50
51#define VLYNQ_STATUS_LINK 0x00000001
52#define VLYNQ_STATUS_LERROR 0x00000080
53#define VLYNQ_STATUS_RERROR 0x00000100
54
55#define VINT_ENABLE 0x00000100
56#define VINT_TYPE_EDGE 0x00000080
57#define VINT_LEVEL_LOW 0x00000040
58#define VINT_VECTOR(x) ((x) & 0x1f)
59#define VINT_OFFSET(irq) (8 * ((irq) % 4))
60
61#define VLYNQ_AUTONEGO_V2 0x00010000
62
63struct vlynq_regs {
64 u32 revision;
65 u32 control;
66 u32 status;
67 u32 int_prio;
68 u32 int_status;
69 u32 int_pending;
70 u32 int_ptr;
71 u32 tx_offset;
72 struct vlynq_mapping rx_mapping[4];
73 u32 chip;
74 u32 autonego;
75 u32 unused[6];
76 u32 int_device[8];
77};
78
79#ifdef VLYNQ_DEBUG
80static void vlynq_dump_regs(struct vlynq_device *dev)
81{
82 int i;
83
84 printk(KERN_DEBUG "VLYNQ local=%p remote=%p\n",
85 dev->local, dev->remote);
86 for (i = 0; i < 32; i++) {
87 printk(KERN_DEBUG "VLYNQ: local %d: %08x\n",
88 i + 1, ((u32 *)dev->local)[i]);
89 printk(KERN_DEBUG "VLYNQ: remote %d: %08x\n",
90 i + 1, ((u32 *)dev->remote)[i]);
91 }
92}
93
94static void vlynq_dump_mem(u32 *base, int count)
95{
96 int i;
97
98 for (i = 0; i < (count + 3) / 4; i++) {
99 if (i % 4 == 0)
100 printk(KERN_DEBUG "\nMEM[0x%04x]:", i * 4);
101 printk(KERN_DEBUG " 0x%08x", *(base + i));
102 }
103 printk(KERN_DEBUG "\n");
104}
105#endif
106
107/* Check the VLYNQ link status with a given device */
108static int vlynq_linked(struct vlynq_device *dev)
109{
110 int i;
111
112 for (i = 0; i < 100; i++)
113 if (readl(&dev->local->status) & VLYNQ_STATUS_LINK)
114 return 1;
115 else
116 cpu_relax();
117
118 return 0;
119}
120
121static void vlynq_reset(struct vlynq_device *dev)
122{
123 writel(readl(&dev->local->control) | VLYNQ_CTRL_RESET,
124 &dev->local->control);
125
126 /* Wait for the devices to finish resetting */
127 msleep(5);
128
129 /* Remove reset bit */
130 writel(readl(&dev->local->control) & ~VLYNQ_CTRL_RESET,
131 &dev->local->control);
132
133 /* Give some time for the devices to settle */
134 msleep(5);
135}
136
137static void vlynq_irq_unmask(unsigned int irq)
138{
139 u32 val;
140 struct vlynq_device *dev = get_irq_chip_data(irq);
141 int virq;
142
143 BUG_ON(!dev);
144 virq = irq - dev->irq_start;
145 val = readl(&dev->remote->int_device[virq >> 2]);
146 val |= (VINT_ENABLE | virq) << VINT_OFFSET(virq);
147 writel(val, &dev->remote->int_device[virq >> 2]);
148}
149
150static void vlynq_irq_mask(unsigned int irq)
151{
152 u32 val;
153 struct vlynq_device *dev = get_irq_chip_data(irq);
154 int virq;
155
156 BUG_ON(!dev);
157 virq = irq - dev->irq_start;
158 val = readl(&dev->remote->int_device[virq >> 2]);
159 val &= ~(VINT_ENABLE << VINT_OFFSET(virq));
160 writel(val, &dev->remote->int_device[virq >> 2]);
161}
162
163static int vlynq_irq_type(unsigned int irq, unsigned int flow_type)
164{
165 u32 val;
166 struct vlynq_device *dev = get_irq_chip_data(irq);
167 int virq;
168
169 BUG_ON(!dev);
170 virq = irq - dev->irq_start;
171 val = readl(&dev->remote->int_device[virq >> 2]);
172 switch (flow_type & IRQ_TYPE_SENSE_MASK) {
173 case IRQ_TYPE_EDGE_RISING:
174 case IRQ_TYPE_EDGE_FALLING:
175 case IRQ_TYPE_EDGE_BOTH:
176 val |= VINT_TYPE_EDGE << VINT_OFFSET(virq);
177 val &= ~(VINT_LEVEL_LOW << VINT_OFFSET(virq));
178 break;
179 case IRQ_TYPE_LEVEL_HIGH:
180 val &= ~(VINT_TYPE_EDGE << VINT_OFFSET(virq));
181 val &= ~(VINT_LEVEL_LOW << VINT_OFFSET(virq));
182 break;
183 case IRQ_TYPE_LEVEL_LOW:
184 val &= ~(VINT_TYPE_EDGE << VINT_OFFSET(virq));
185 val |= VINT_LEVEL_LOW << VINT_OFFSET(virq);
186 break;
187 default:
188 return -EINVAL;
189 }
190 writel(val, &dev->remote->int_device[virq >> 2]);
191 return 0;
192}
193
194static void vlynq_local_ack(unsigned int irq)
195{
196 struct vlynq_device *dev = get_irq_chip_data(irq);
197
198 u32 status = readl(&dev->local->status);
199
200 pr_debug("%s: local status: 0x%08x\n",
201 dev_name(&dev->dev), status);
202 writel(status, &dev->local->status);
203}
204
205static void vlynq_remote_ack(unsigned int irq)
206{
207 struct vlynq_device *dev = get_irq_chip_data(irq);
208
209 u32 status = readl(&dev->remote->status);
210
211 pr_debug("%s: remote status: 0x%08x\n",
212 dev_name(&dev->dev), status);
213 writel(status, &dev->remote->status);
214}
215
216static irqreturn_t vlynq_irq(int irq, void *dev_id)
217{
218 struct vlynq_device *dev = dev_id;
219 u32 status;
220 int virq = 0;
221
222 status = readl(&dev->local->int_status);
223 writel(status, &dev->local->int_status);
224
225 if (unlikely(!status))
226 spurious_interrupt();
227
228 while (status) {
229 if (status & 1)
230 do_IRQ(dev->irq_start + virq);
231 status >>= 1;
232 virq++;
233 }
234
235 return IRQ_HANDLED;
236}
237
238static struct irq_chip vlynq_irq_chip = {
239 .name = "vlynq",
240 .unmask = vlynq_irq_unmask,
241 .mask = vlynq_irq_mask,
242 .set_type = vlynq_irq_type,
243};
244
245static struct irq_chip vlynq_local_chip = {
246 .name = "vlynq local error",
247 .unmask = vlynq_irq_unmask,
248 .mask = vlynq_irq_mask,
249 .ack = vlynq_local_ack,
250};
251
252static struct irq_chip vlynq_remote_chip = {
253 .name = "vlynq local error",
254 .unmask = vlynq_irq_unmask,
255 .mask = vlynq_irq_mask,
256 .ack = vlynq_remote_ack,
257};
258
259static int vlynq_setup_irq(struct vlynq_device *dev)
260{
261 u32 val;
262 int i, virq;
263
264 if (dev->local_irq == dev->remote_irq) {
265 printk(KERN_ERR
266 "%s: local vlynq irq should be different from remote\n",
267 dev_name(&dev->dev));
268 return -EINVAL;
269 }
270
271 /* Clear local and remote error bits */
272 writel(readl(&dev->local->status), &dev->local->status);
273 writel(readl(&dev->remote->status), &dev->remote->status);
274
275 /* Now setup interrupts */
276 val = VLYNQ_CTRL_INT_VECTOR(dev->local_irq);
277 val |= VLYNQ_CTRL_INT_ENABLE | VLYNQ_CTRL_INT_LOCAL |
278 VLYNQ_CTRL_INT2CFG;
279 val |= readl(&dev->local->control);
280 writel(VLYNQ_INT_OFFSET, &dev->local->int_ptr);
281 writel(val, &dev->local->control);
282
283 val = VLYNQ_CTRL_INT_VECTOR(dev->remote_irq);
284 val |= VLYNQ_CTRL_INT_ENABLE;
285 val |= readl(&dev->remote->control);
286 writel(VLYNQ_INT_OFFSET, &dev->remote->int_ptr);
287 writel(val, &dev->remote->int_ptr);
288 writel(val, &dev->remote->control);
289
290 for (i = dev->irq_start; i <= dev->irq_end; i++) {
291 virq = i - dev->irq_start;
292 if (virq == dev->local_irq) {
293 set_irq_chip_and_handler(i, &vlynq_local_chip,
294 handle_level_irq);
295 set_irq_chip_data(i, dev);
296 } else if (virq == dev->remote_irq) {
297 set_irq_chip_and_handler(i, &vlynq_remote_chip,
298 handle_level_irq);
299 set_irq_chip_data(i, dev);
300 } else {
301 set_irq_chip_and_handler(i, &vlynq_irq_chip,
302 handle_simple_irq);
303 set_irq_chip_data(i, dev);
304 writel(0, &dev->remote->int_device[virq >> 2]);
305 }
306 }
307
308 if (request_irq(dev->irq, vlynq_irq, IRQF_SHARED, "vlynq", dev)) {
309 printk(KERN_ERR "%s: request_irq failed\n",
310 dev_name(&dev->dev));
311 return -EAGAIN;
312 }
313
314 return 0;
315}
316
317static void vlynq_device_release(struct device *dev)
318{
319 struct vlynq_device *vdev = to_vlynq_device(dev);
320 kfree(vdev);
321}
322
323static int vlynq_device_match(struct device *dev,
324 struct device_driver *drv)
325{
326 struct vlynq_device *vdev = to_vlynq_device(dev);
327 struct vlynq_driver *vdrv = to_vlynq_driver(drv);
328 struct vlynq_device_id *ids = vdrv->id_table;
329
330 while (ids->id) {
331 if (ids->id == vdev->dev_id) {
332 vdev->divisor = ids->divisor;
333 vlynq_set_drvdata(vdev, ids);
334 printk(KERN_INFO "Driver found for VLYNQ "
335 "device: %08x\n", vdev->dev_id);
336 return 1;
337 }
338 printk(KERN_DEBUG "Not using the %08x VLYNQ device's driver"
339 " for VLYNQ device: %08x\n", ids->id, vdev->dev_id);
340 ids++;
341 }
342 return 0;
343}
344
345static int vlynq_device_probe(struct device *dev)
346{
347 struct vlynq_device *vdev = to_vlynq_device(dev);
348 struct vlynq_driver *drv = to_vlynq_driver(dev->driver);
349 struct vlynq_device_id *id = vlynq_get_drvdata(vdev);
350 int result = -ENODEV;
351
352 if (drv->probe)
353 result = drv->probe(vdev, id);
354 if (result)
355 put_device(dev);
356 return result;
357}
358
359static int vlynq_device_remove(struct device *dev)
360{
361 struct vlynq_driver *drv = to_vlynq_driver(dev->driver);
362
363 if (drv->remove)
364 drv->remove(to_vlynq_device(dev));
365
366 return 0;
367}
368
369int __vlynq_register_driver(struct vlynq_driver *driver, struct module *owner)
370{
371 driver->driver.name = driver->name;
372 driver->driver.bus = &vlynq_bus_type;
373 return driver_register(&driver->driver);
374}
375EXPORT_SYMBOL(__vlynq_register_driver);
376
377void vlynq_unregister_driver(struct vlynq_driver *driver)
378{
379 driver_unregister(&driver->driver);
380}
381EXPORT_SYMBOL(vlynq_unregister_driver);
382
383/*
384 * A VLYNQ remote device can clock the VLYNQ bus master
385 * using a dedicated clock line. In that case, both the
386 * remove device and the bus master should have the same
387 * serial clock dividers configured. Iterate through the
388 * 8 possible dividers until we actually link with the
389 * device.
390 */
391static int __vlynq_try_remote(struct vlynq_device *dev)
392{
393 int i;
394
395 vlynq_reset(dev);
396 for (i = dev->dev_id ? vlynq_rdiv2 : vlynq_rdiv8; dev->dev_id ?
397 i <= vlynq_rdiv8 : i >= vlynq_rdiv2;
398 dev->dev_id ? i++ : i--) {
399
400 if (!vlynq_linked(dev))
401 break;
402
403 writel((readl(&dev->remote->control) &
404 ~VLYNQ_CTRL_CLOCK_MASK) |
405 VLYNQ_CTRL_CLOCK_INT |
406 VLYNQ_CTRL_CLOCK_DIV(i - vlynq_rdiv1),
407 &dev->remote->control);
408 writel((readl(&dev->local->control)
409 & ~(VLYNQ_CTRL_CLOCK_INT |
410 VLYNQ_CTRL_CLOCK_MASK)) |
411 VLYNQ_CTRL_CLOCK_DIV(i - vlynq_rdiv1),
412 &dev->local->control);
413
414 if (vlynq_linked(dev)) {
415 printk(KERN_DEBUG
416 "%s: using remote clock divisor %d\n",
417 dev_name(&dev->dev), i - vlynq_rdiv1 + 1);
418 dev->divisor = i;
419 return 0;
420 } else {
421 vlynq_reset(dev);
422 }
423 }
424
425 return -ENODEV;
426}
427
428/*
429 * A VLYNQ remote device can be clocked by the VLYNQ bus
430 * master using a dedicated clock line. In that case, only
431 * the bus master configures the serial clock divider.
432 * Iterate through the 8 possible dividers until we
433 * actually get a link with the device.
434 */
435static int __vlynq_try_local(struct vlynq_device *dev)
436{
437 int i;
438
439 vlynq_reset(dev);
440
441 for (i = dev->dev_id ? vlynq_ldiv2 : vlynq_ldiv8; dev->dev_id ?
442 i <= vlynq_ldiv8 : i >= vlynq_ldiv2;
443 dev->dev_id ? i++ : i--) {
444
445 writel((readl(&dev->local->control) &
446 ~VLYNQ_CTRL_CLOCK_MASK) |
447 VLYNQ_CTRL_CLOCK_INT |
448 VLYNQ_CTRL_CLOCK_DIV(i - vlynq_ldiv1),
449 &dev->local->control);
450
451 if (vlynq_linked(dev)) {
452 printk(KERN_DEBUG
453 "%s: using local clock divisor %d\n",
454 dev_name(&dev->dev), i - vlynq_ldiv1 + 1);
455 dev->divisor = i;
456 return 0;
457 } else {
458 vlynq_reset(dev);
459 }
460 }
461
462 return -ENODEV;
463}
464
465/*
466 * When using external clocking method, serial clock
467 * is supplied by an external oscillator, therefore we
468 * should mask the local clock bit in the clock control
469 * register for both the bus master and the remote device.
470 */
471static int __vlynq_try_external(struct vlynq_device *dev)
472{
473 vlynq_reset(dev);
474 if (!vlynq_linked(dev))
475 return -ENODEV;
476
477 writel((readl(&dev->remote->control) &
478 ~VLYNQ_CTRL_CLOCK_INT),
479 &dev->remote->control);
480
481 writel((readl(&dev->local->control) &
482 ~VLYNQ_CTRL_CLOCK_INT),
483 &dev->local->control);
484
485 if (vlynq_linked(dev)) {
486 printk(KERN_DEBUG "%s: using external clock\n",
487 dev_name(&dev->dev));
488 dev->divisor = vlynq_div_external;
489 return 0;
490 }
491
492 return -ENODEV;
493}
494
495static int __vlynq_enable_device(struct vlynq_device *dev)
496{
497 int result;
498 struct plat_vlynq_ops *ops = dev->dev.platform_data;
499
500 result = ops->on(dev);
501 if (result)
502 return result;
503
504 switch (dev->divisor) {
505 case vlynq_div_external:
506 case vlynq_div_auto:
507 /* When the device is brought from reset it should have clock
508 * generation negotiated by hardware.
509 * Check which device is generating clocks and perform setup
510 * accordingly */
511 if (vlynq_linked(dev) && readl(&dev->remote->control) &
512 VLYNQ_CTRL_CLOCK_INT) {
513 if (!__vlynq_try_remote(dev) ||
514 !__vlynq_try_local(dev) ||
515 !__vlynq_try_external(dev))
516 return 0;
517 } else {
518 if (!__vlynq_try_external(dev) ||
519 !__vlynq_try_local(dev) ||
520 !__vlynq_try_remote(dev))
521 return 0;
522 }
523 break;
524 case vlynq_ldiv1:
525 case vlynq_ldiv2:
526 case vlynq_ldiv3:
527 case vlynq_ldiv4:
528 case vlynq_ldiv5:
529 case vlynq_ldiv6:
530 case vlynq_ldiv7:
531 case vlynq_ldiv8:
532 writel(VLYNQ_CTRL_CLOCK_INT |
533 VLYNQ_CTRL_CLOCK_DIV(dev->divisor -
534 vlynq_ldiv1), &dev->local->control);
535 writel(0, &dev->remote->control);
536 if (vlynq_linked(dev)) {
537 printk(KERN_DEBUG
538 "%s: using local clock divisor %d\n",
539 dev_name(&dev->dev),
540 dev->divisor - vlynq_ldiv1 + 1);
541 return 0;
542 }
543 break;
544 case vlynq_rdiv1:
545 case vlynq_rdiv2:
546 case vlynq_rdiv3:
547 case vlynq_rdiv4:
548 case vlynq_rdiv5:
549 case vlynq_rdiv6:
550 case vlynq_rdiv7:
551 case vlynq_rdiv8:
552 writel(0, &dev->local->control);
553 writel(VLYNQ_CTRL_CLOCK_INT |
554 VLYNQ_CTRL_CLOCK_DIV(dev->divisor -
555 vlynq_rdiv1), &dev->remote->control);
556 if (vlynq_linked(dev)) {
557 printk(KERN_DEBUG
558 "%s: using remote clock divisor %d\n",
559 dev_name(&dev->dev),
560 dev->divisor - vlynq_rdiv1 + 1);
561 return 0;
562 }
563 break;
564 }
565
566 ops->off(dev);
567 return -ENODEV;
568}
569
570int vlynq_enable_device(struct vlynq_device *dev)
571{
572 struct plat_vlynq_ops *ops = dev->dev.platform_data;
573 int result = -ENODEV;
574
575 result = __vlynq_enable_device(dev);
576 if (result)
577 return result;
578
579 result = vlynq_setup_irq(dev);
580 if (result)
581 ops->off(dev);
582
583 dev->enabled = !result;
584 return result;
585}
586EXPORT_SYMBOL(vlynq_enable_device);
587
588
589void vlynq_disable_device(struct vlynq_device *dev)
590{
591 struct plat_vlynq_ops *ops = dev->dev.platform_data;
592
593 dev->enabled = 0;
594 free_irq(dev->irq, dev);
595 ops->off(dev);
596}
597EXPORT_SYMBOL(vlynq_disable_device);
598
599int vlynq_set_local_mapping(struct vlynq_device *dev, u32 tx_offset,
600 struct vlynq_mapping *mapping)
601{
602 int i;
603
604 if (!dev->enabled)
605 return -ENXIO;
606
607 writel(tx_offset, &dev->local->tx_offset);
608 for (i = 0; i < 4; i++) {
609 writel(mapping[i].offset, &dev->local->rx_mapping[i].offset);
610 writel(mapping[i].size, &dev->local->rx_mapping[i].size);
611 }
612 return 0;
613}
614EXPORT_SYMBOL(vlynq_set_local_mapping);
615
616int vlynq_set_remote_mapping(struct vlynq_device *dev, u32 tx_offset,
617 struct vlynq_mapping *mapping)
618{
619 int i;
620
621 if (!dev->enabled)
622 return -ENXIO;
623
624 writel(tx_offset, &dev->remote->tx_offset);
625 for (i = 0; i < 4; i++) {
626 writel(mapping[i].offset, &dev->remote->rx_mapping[i].offset);
627 writel(mapping[i].size, &dev->remote->rx_mapping[i].size);
628 }
629 return 0;
630}
631EXPORT_SYMBOL(vlynq_set_remote_mapping);
632
633int vlynq_set_local_irq(struct vlynq_device *dev, int virq)
634{
635 int irq = dev->irq_start + virq;
636 if (dev->enabled)
637 return -EBUSY;
638
639 if ((irq < dev->irq_start) || (irq > dev->irq_end))
640 return -EINVAL;
641
642 if (virq == dev->remote_irq)
643 return -EINVAL;
644
645 dev->local_irq = virq;
646
647 return 0;
648}
649EXPORT_SYMBOL(vlynq_set_local_irq);
650
651int vlynq_set_remote_irq(struct vlynq_device *dev, int virq)
652{
653 int irq = dev->irq_start + virq;
654 if (dev->enabled)
655 return -EBUSY;
656
657 if ((irq < dev->irq_start) || (irq > dev->irq_end))
658 return -EINVAL;
659
660 if (virq == dev->local_irq)
661 return -EINVAL;
662
663 dev->remote_irq = virq;
664
665 return 0;
666}
667EXPORT_SYMBOL(vlynq_set_remote_irq);
668
669static int vlynq_probe(struct platform_device *pdev)
670{
671 struct vlynq_device *dev;
672 struct resource *regs_res, *mem_res, *irq_res;
673 int len, result;
674
675 regs_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
676 if (!regs_res)
677 return -ENODEV;
678
679 mem_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
680 if (!mem_res)
681 return -ENODEV;
682
683 irq_res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "devirq");
684 if (!irq_res)
685 return -ENODEV;
686
687 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
688 if (!dev) {
689 printk(KERN_ERR
690 "vlynq: failed to allocate device structure\n");
691 return -ENOMEM;
692 }
693
694 dev->id = pdev->id;
695 dev->dev.bus = &vlynq_bus_type;
696 dev->dev.parent = &pdev->dev;
697 dev_set_name(&dev->dev, "vlynq%d", dev->id);
698 dev->dev.platform_data = pdev->dev.platform_data;
699 dev->dev.release = vlynq_device_release;
700
701 dev->regs_start = regs_res->start;
702 dev->regs_end = regs_res->end;
703 dev->mem_start = mem_res->start;
704 dev->mem_end = mem_res->end;
705
706 len = regs_res->end - regs_res->start;
707 if (!request_mem_region(regs_res->start, len, dev_name(&dev->dev))) {
708 printk(KERN_ERR "%s: Can't request vlynq registers\n",
709 dev_name(&dev->dev));
710 result = -ENXIO;
711 goto fail_request;
712 }
713
714 dev->local = ioremap(regs_res->start, len);
715 if (!dev->local) {
716 printk(KERN_ERR "%s: Can't remap vlynq registers\n",
717 dev_name(&dev->dev));
718 result = -ENXIO;
719 goto fail_remap;
720 }
721
722 dev->remote = (struct vlynq_regs *)((void *)dev->local +
723 VLYNQ_REMOTE_OFFSET);
724
725 dev->irq = platform_get_irq_byname(pdev, "irq");
726 dev->irq_start = irq_res->start;
727 dev->irq_end = irq_res->end;
728 dev->local_irq = dev->irq_end - dev->irq_start;
729 dev->remote_irq = dev->local_irq - 1;
730
731 if (device_register(&dev->dev))
732 goto fail_register;
733 platform_set_drvdata(pdev, dev);
734
735 printk(KERN_INFO "%s: regs 0x%p, irq %d, mem 0x%p\n",
736 dev_name(&dev->dev), (void *)dev->regs_start, dev->irq,
737 (void *)dev->mem_start);
738
739 dev->dev_id = 0;
740 dev->divisor = vlynq_div_auto;
741 result = __vlynq_enable_device(dev);
742 if (result == 0) {
743 dev->dev_id = readl(&dev->remote->chip);
744 ((struct plat_vlynq_ops *)(dev->dev.platform_data))->off(dev);
745 }
746 if (dev->dev_id)
747 printk(KERN_INFO "Found a VLYNQ device: %08x\n", dev->dev_id);
748
749 return 0;
750
751fail_register:
752 iounmap(dev->local);
753fail_remap:
754fail_request:
755 release_mem_region(regs_res->start, len);
756 kfree(dev);
757 return result;
758}
759
760static int vlynq_remove(struct platform_device *pdev)
761{
762 struct vlynq_device *dev = platform_get_drvdata(pdev);
763
764 device_unregister(&dev->dev);
765 iounmap(dev->local);
766 release_mem_region(dev->regs_start, dev->regs_end - dev->regs_start);
767
768 kfree(dev);
769
770 return 0;
771}
772
773static struct platform_driver vlynq_platform_driver = {
774 .driver.name = "vlynq",
775 .probe = vlynq_probe,
776 .remove = __devexit_p(vlynq_remove),
777};
778
779struct bus_type vlynq_bus_type = {
780 .name = "vlynq",
781 .match = vlynq_device_match,
782 .probe = vlynq_device_probe,
783 .remove = vlynq_device_remove,
784};
785EXPORT_SYMBOL(vlynq_bus_type);
786
787static int __devinit vlynq_init(void)
788{
789 int res = 0;
790
791 res = bus_register(&vlynq_bus_type);
792 if (res)
793 goto fail_bus;
794
795 res = platform_driver_register(&vlynq_platform_driver);
796 if (res)
797 goto fail_platform;
798
799 return 0;
800
801fail_platform:
802 bus_unregister(&vlynq_bus_type);
803fail_bus:
804 return res;
805}
806
807static void __devexit vlynq_exit(void)
808{
809 platform_driver_unregister(&vlynq_platform_driver);
810 bus_unregister(&vlynq_bus_type);
811}
812
813module_init(vlynq_init);
814module_exit(vlynq_exit);
diff --git a/fs/Kconfig b/fs/Kconfig
index 525da2e8f73b..4044f163035f 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -39,6 +39,13 @@ config FS_POSIX_ACL
39 bool 39 bool
40 default n 40 default n
41 41
42source "fs/xfs/Kconfig"
43source "fs/gfs2/Kconfig"
44source "fs/ocfs2/Kconfig"
45source "fs/btrfs/Kconfig"
46
47endif # BLOCK
48
42config FILE_LOCKING 49config FILE_LOCKING
43 bool "Enable POSIX file locking API" if EMBEDDED 50 bool "Enable POSIX file locking API" if EMBEDDED
44 default y 51 default y
@@ -47,13 +54,6 @@ config FILE_LOCKING
47 for filesystems like NFS and for the flock() system 54 for filesystems like NFS and for the flock() system
48 call. Disabling this option saves about 11k. 55 call. Disabling this option saves about 11k.
49 56
50source "fs/xfs/Kconfig"
51source "fs/gfs2/Kconfig"
52source "fs/ocfs2/Kconfig"
53source "fs/btrfs/Kconfig"
54
55endif # BLOCK
56
57source "fs/notify/Kconfig" 57source "fs/notify/Kconfig"
58 58
59source "fs/quota/Kconfig" 59source "fs/quota/Kconfig"
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index b6a719a909f8..a2edb7913447 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -24,7 +24,7 @@ static void drop_pagecache_sb(struct super_block *sb)
24 continue; 24 continue;
25 __iget(inode); 25 __iget(inode);
26 spin_unlock(&inode_lock); 26 spin_unlock(&inode_lock);
27 __invalidate_mapping_pages(inode->i_mapping, 0, -1, true); 27 invalidate_mapping_pages(inode->i_mapping, 0, -1);
28 iput(toput_inode); 28 iput(toput_inode);
29 toput_inode = inode; 29 toput_inode = inode;
30 spin_lock(&inode_lock); 30 spin_lock(&inode_lock);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 40308e98c6a4..caf049146ca2 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -321,7 +321,7 @@ __sync_single_inode(struct inode *inode, struct writeback_control *wbc)
321 321
322 spin_lock(&inode_lock); 322 spin_lock(&inode_lock);
323 inode->i_state &= ~I_SYNC; 323 inode->i_state &= ~I_SYNC;
324 if (!(inode->i_state & I_FREEING)) { 324 if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
325 if (!(inode->i_state & I_DIRTY) && 325 if (!(inode->i_state & I_DIRTY) &&
326 mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 326 mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
327 /* 327 /*
@@ -492,7 +492,7 @@ void generic_sync_sb_inodes(struct super_block *sb,
492 break; 492 break;
493 } 493 }
494 494
495 if (inode->i_state & I_NEW) { 495 if (inode->i_state & (I_NEW | I_WILL_FREE)) {
496 requeue_io(inode); 496 requeue_io(inode);
497 continue; 497 continue;
498 } 498 }
@@ -523,7 +523,7 @@ void generic_sync_sb_inodes(struct super_block *sb,
523 if (current_is_pdflush() && !writeback_acquire(bdi)) 523 if (current_is_pdflush() && !writeback_acquire(bdi))
524 break; 524 break;
525 525
526 BUG_ON(inode->i_state & I_FREEING); 526 BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
527 __iget(inode); 527 __iget(inode);
528 pages_skipped = wbc->pages_skipped; 528 pages_skipped = wbc->pages_skipped;
529 __writeback_single_inode(inode, wbc); 529 __writeback_single_inode(inode, wbc);
diff --git a/fs/nfs/iostat.h b/fs/nfs/iostat.h
index a2ab2529b5ca..ceda50aad73c 100644
--- a/fs/nfs/iostat.h
+++ b/fs/nfs/iostat.h
@@ -31,7 +31,7 @@ static inline void nfs_inc_server_stats(const struct nfs_server *server,
31 cpu = get_cpu(); 31 cpu = get_cpu();
32 iostats = per_cpu_ptr(server->io_stats, cpu); 32 iostats = per_cpu_ptr(server->io_stats, cpu);
33 iostats->events[stat]++; 33 iostats->events[stat]++;
34 put_cpu_no_resched(); 34 put_cpu();
35} 35}
36 36
37static inline void nfs_inc_stats(const struct inode *inode, 37static inline void nfs_inc_stats(const struct inode *inode,
@@ -50,7 +50,7 @@ static inline void nfs_add_server_stats(const struct nfs_server *server,
50 cpu = get_cpu(); 50 cpu = get_cpu();
51 iostats = per_cpu_ptr(server->io_stats, cpu); 51 iostats = per_cpu_ptr(server->io_stats, cpu);
52 iostats->bytes[stat] += addend; 52 iostats->bytes[stat] += addend;
53 put_cpu_no_resched(); 53 put_cpu();
54} 54}
55 55
56static inline void nfs_add_stats(const struct inode *inode, 56static inline void nfs_add_stats(const struct inode *inode,
@@ -71,7 +71,7 @@ static inline void nfs_add_fscache_stats(struct inode *inode,
71 cpu = get_cpu(); 71 cpu = get_cpu();
72 iostats = per_cpu_ptr(NFS_SERVER(inode)->io_stats, cpu); 72 iostats = per_cpu_ptr(NFS_SERVER(inode)->io_stats, cpu);
73 iostats->fscache[stat] += addend; 73 iostats->fscache[stat] += addend;
74 put_cpu_no_resched(); 74 put_cpu();
75} 75}
76#endif 76#endif
77 77
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 82c5085559c6..9938034762cc 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -27,6 +27,7 @@
27#include <linux/pagemap.h> 27#include <linux/pagemap.h>
28#include <linux/quotaops.h> 28#include <linux/quotaops.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/log2.h>
30 31
31#include "aops.h" 32#include "aops.h"
32#include "attrib.h" 33#include "attrib.h"
@@ -1570,7 +1571,7 @@ static int ntfs_read_locked_index_inode(struct inode *base_vi, struct inode *vi)
1570 ntfs_debug("Index collation rule is 0x%x.", 1571 ntfs_debug("Index collation rule is 0x%x.",
1571 le32_to_cpu(ir->collation_rule)); 1572 le32_to_cpu(ir->collation_rule));
1572 ni->itype.index.block_size = le32_to_cpu(ir->index_block_size); 1573 ni->itype.index.block_size = le32_to_cpu(ir->index_block_size);
1573 if (ni->itype.index.block_size & (ni->itype.index.block_size - 1)) { 1574 if (!is_power_of_2(ni->itype.index.block_size)) {
1574 ntfs_error(vi->i_sb, "Index block size (%u) is not a power of " 1575 ntfs_error(vi->i_sb, "Index block size (%u) is not a power of "
1575 "two.", ni->itype.index.block_size); 1576 "two.", ni->itype.index.block_size);
1576 goto unm_err_out; 1577 goto unm_err_out;
diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c
index d7932e95b1fd..89b02985c054 100644
--- a/fs/ntfs/logfile.c
+++ b/fs/ntfs/logfile.c
@@ -26,6 +26,7 @@
26#include <linux/highmem.h> 26#include <linux/highmem.h>
27#include <linux/buffer_head.h> 27#include <linux/buffer_head.h>
28#include <linux/bitops.h> 28#include <linux/bitops.h>
29#include <linux/log2.h>
29 30
30#include "attrib.h" 31#include "attrib.h"
31#include "aops.h" 32#include "aops.h"
@@ -65,7 +66,7 @@ static bool ntfs_check_restart_page_header(struct inode *vi,
65 logfile_log_page_size < NTFS_BLOCK_SIZE || 66 logfile_log_page_size < NTFS_BLOCK_SIZE ||
66 logfile_system_page_size & 67 logfile_system_page_size &
67 (logfile_system_page_size - 1) || 68 (logfile_system_page_size - 1) ||
68 logfile_log_page_size & (logfile_log_page_size - 1)) { 69 !is_power_of_2(logfile_log_page_size)) {
69 ntfs_error(vi->i_sb, "$LogFile uses unsupported page size."); 70 ntfs_error(vi->i_sb, "$LogFile uses unsupported page size.");
70 return false; 71 return false;
71 } 72 }
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 1539e630c47d..3ce5ae9e3d2d 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1006,7 +1006,12 @@ static ssize_t oom_adjust_read(struct file *file, char __user *buf,
1006 1006
1007 if (!task) 1007 if (!task)
1008 return -ESRCH; 1008 return -ESRCH;
1009 oom_adjust = task->oomkilladj; 1009 task_lock(task);
1010 if (task->mm)
1011 oom_adjust = task->mm->oom_adj;
1012 else
1013 oom_adjust = OOM_DISABLE;
1014 task_unlock(task);
1010 put_task_struct(task); 1015 put_task_struct(task);
1011 1016
1012 len = snprintf(buffer, sizeof(buffer), "%i\n", oom_adjust); 1017 len = snprintf(buffer, sizeof(buffer), "%i\n", oom_adjust);
@@ -1035,11 +1040,19 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
1035 task = get_proc_task(file->f_path.dentry->d_inode); 1040 task = get_proc_task(file->f_path.dentry->d_inode);
1036 if (!task) 1041 if (!task)
1037 return -ESRCH; 1042 return -ESRCH;
1038 if (oom_adjust < task->oomkilladj && !capable(CAP_SYS_RESOURCE)) { 1043 task_lock(task);
1044 if (!task->mm) {
1045 task_unlock(task);
1046 put_task_struct(task);
1047 return -EINVAL;
1048 }
1049 if (oom_adjust < task->mm->oom_adj && !capable(CAP_SYS_RESOURCE)) {
1050 task_unlock(task);
1039 put_task_struct(task); 1051 put_task_struct(task);
1040 return -EACCES; 1052 return -EACCES;
1041 } 1053 }
1042 task->oomkilladj = oom_adjust; 1054 task->mm->oom_adj = oom_adjust;
1055 task_unlock(task);
1043 put_task_struct(task); 1056 put_task_struct(task);
1044 if (end - buffer == 0) 1057 if (end - buffer == 0)
1045 return -EIO; 1058 return -EIO;
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index c6b0302af4c4..d5c410d47fae 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -64,10 +64,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
64 "Inactive(anon): %8lu kB\n" 64 "Inactive(anon): %8lu kB\n"
65 "Active(file): %8lu kB\n" 65 "Active(file): %8lu kB\n"
66 "Inactive(file): %8lu kB\n" 66 "Inactive(file): %8lu kB\n"
67#ifdef CONFIG_UNEVICTABLE_LRU
68 "Unevictable: %8lu kB\n" 67 "Unevictable: %8lu kB\n"
69 "Mlocked: %8lu kB\n" 68 "Mlocked: %8lu kB\n"
70#endif
71#ifdef CONFIG_HIGHMEM 69#ifdef CONFIG_HIGHMEM
72 "HighTotal: %8lu kB\n" 70 "HighTotal: %8lu kB\n"
73 "HighFree: %8lu kB\n" 71 "HighFree: %8lu kB\n"
@@ -109,10 +107,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
109 K(pages[LRU_INACTIVE_ANON]), 107 K(pages[LRU_INACTIVE_ANON]),
110 K(pages[LRU_ACTIVE_FILE]), 108 K(pages[LRU_ACTIVE_FILE]),
111 K(pages[LRU_INACTIVE_FILE]), 109 K(pages[LRU_INACTIVE_FILE]),
112#ifdef CONFIG_UNEVICTABLE_LRU
113 K(pages[LRU_UNEVICTABLE]), 110 K(pages[LRU_UNEVICTABLE]),
114 K(global_page_state(NR_MLOCK)), 111 K(global_page_state(NR_MLOCK)),
115#endif
116#ifdef CONFIG_HIGHMEM 112#ifdef CONFIG_HIGHMEM
117 K(i.totalhigh), 113 K(i.totalhigh),
118 K(i.freehigh), 114 K(i.freehigh),
diff --git a/fs/proc/page.c b/fs/proc/page.c
index e9983837d08d..2707c6c7a20f 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -6,11 +6,13 @@
6#include <linux/mmzone.h> 6#include <linux/mmzone.h>
7#include <linux/proc_fs.h> 7#include <linux/proc_fs.h>
8#include <linux/seq_file.h> 8#include <linux/seq_file.h>
9#include <linux/hugetlb.h>
9#include <asm/uaccess.h> 10#include <asm/uaccess.h>
10#include "internal.h" 11#include "internal.h"
11 12
12#define KPMSIZE sizeof(u64) 13#define KPMSIZE sizeof(u64)
13#define KPMMASK (KPMSIZE - 1) 14#define KPMMASK (KPMSIZE - 1)
15
14/* /proc/kpagecount - an array exposing page counts 16/* /proc/kpagecount - an array exposing page counts
15 * 17 *
16 * Each entry is a u64 representing the corresponding 18 * Each entry is a u64 representing the corresponding
@@ -32,20 +34,22 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
32 return -EINVAL; 34 return -EINVAL;
33 35
34 while (count > 0) { 36 while (count > 0) {
35 ppage = NULL;
36 if (pfn_valid(pfn)) 37 if (pfn_valid(pfn))
37 ppage = pfn_to_page(pfn); 38 ppage = pfn_to_page(pfn);
38 pfn++; 39 else
40 ppage = NULL;
39 if (!ppage) 41 if (!ppage)
40 pcount = 0; 42 pcount = 0;
41 else 43 else
42 pcount = page_mapcount(ppage); 44 pcount = page_mapcount(ppage);
43 45
44 if (put_user(pcount, out++)) { 46 if (put_user(pcount, out)) {
45 ret = -EFAULT; 47 ret = -EFAULT;
46 break; 48 break;
47 } 49 }
48 50
51 pfn++;
52 out++;
49 count -= KPMSIZE; 53 count -= KPMSIZE;
50 } 54 }
51 55
@@ -68,19 +72,122 @@ static const struct file_operations proc_kpagecount_operations = {
68 72
69/* These macros are used to decouple internal flags from exported ones */ 73/* These macros are used to decouple internal flags from exported ones */
70 74
71#define KPF_LOCKED 0 75#define KPF_LOCKED 0
72#define KPF_ERROR 1 76#define KPF_ERROR 1
73#define KPF_REFERENCED 2 77#define KPF_REFERENCED 2
74#define KPF_UPTODATE 3 78#define KPF_UPTODATE 3
75#define KPF_DIRTY 4 79#define KPF_DIRTY 4
76#define KPF_LRU 5 80#define KPF_LRU 5
77#define KPF_ACTIVE 6 81#define KPF_ACTIVE 6
78#define KPF_SLAB 7 82#define KPF_SLAB 7
79#define KPF_WRITEBACK 8 83#define KPF_WRITEBACK 8
80#define KPF_RECLAIM 9 84#define KPF_RECLAIM 9
81#define KPF_BUDDY 10 85#define KPF_BUDDY 10
86
87/* 11-20: new additions in 2.6.31 */
88#define KPF_MMAP 11
89#define KPF_ANON 12
90#define KPF_SWAPCACHE 13
91#define KPF_SWAPBACKED 14
92#define KPF_COMPOUND_HEAD 15
93#define KPF_COMPOUND_TAIL 16
94#define KPF_HUGE 17
95#define KPF_UNEVICTABLE 18
96#define KPF_NOPAGE 20
97
98/* kernel hacking assistances
99 * WARNING: subject to change, never rely on them!
100 */
101#define KPF_RESERVED 32
102#define KPF_MLOCKED 33
103#define KPF_MAPPEDTODISK 34
104#define KPF_PRIVATE 35
105#define KPF_PRIVATE_2 36
106#define KPF_OWNER_PRIVATE 37
107#define KPF_ARCH 38
108#define KPF_UNCACHED 39
109
110static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit)
111{
112 return ((kflags >> kbit) & 1) << ubit;
113}
82 114
83#define kpf_copy_bit(flags, dstpos, srcpos) (((flags >> srcpos) & 1) << dstpos) 115static u64 get_uflags(struct page *page)
116{
117 u64 k;
118 u64 u;
119
120 /*
121 * pseudo flag: KPF_NOPAGE
122 * it differentiates a memory hole from a page with no flags
123 */
124 if (!page)
125 return 1 << KPF_NOPAGE;
126
127 k = page->flags;
128 u = 0;
129
130 /*
131 * pseudo flags for the well known (anonymous) memory mapped pages
132 *
133 * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the
134 * simple test in page_mapped() is not enough.
135 */
136 if (!PageSlab(page) && page_mapped(page))
137 u |= 1 << KPF_MMAP;
138 if (PageAnon(page))
139 u |= 1 << KPF_ANON;
140
141 /*
142 * compound pages: export both head/tail info
143 * they together define a compound page's start/end pos and order
144 */
145 if (PageHead(page))
146 u |= 1 << KPF_COMPOUND_HEAD;
147 if (PageTail(page))
148 u |= 1 << KPF_COMPOUND_TAIL;
149 if (PageHuge(page))
150 u |= 1 << KPF_HUGE;
151
152 u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked);
153
154 /*
155 * Caveats on high order pages:
156 * PG_buddy will only be set on the head page; SLUB/SLQB do the same
157 * for PG_slab; SLOB won't set PG_slab at all on compound pages.
158 */
159 u |= kpf_copy_bit(k, KPF_SLAB, PG_slab);
160 u |= kpf_copy_bit(k, KPF_BUDDY, PG_buddy);
161
162 u |= kpf_copy_bit(k, KPF_ERROR, PG_error);
163 u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty);
164 u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate);
165 u |= kpf_copy_bit(k, KPF_WRITEBACK, PG_writeback);
166
167 u |= kpf_copy_bit(k, KPF_LRU, PG_lru);
168 u |= kpf_copy_bit(k, KPF_REFERENCED, PG_referenced);
169 u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active);
170 u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim);
171
172 u |= kpf_copy_bit(k, KPF_SWAPCACHE, PG_swapcache);
173 u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked);
174
175 u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable);
176 u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked);
177
178#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
179 u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached);
180#endif
181
182 u |= kpf_copy_bit(k, KPF_RESERVED, PG_reserved);
183 u |= kpf_copy_bit(k, KPF_MAPPEDTODISK, PG_mappedtodisk);
184 u |= kpf_copy_bit(k, KPF_PRIVATE, PG_private);
185 u |= kpf_copy_bit(k, KPF_PRIVATE_2, PG_private_2);
186 u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE, PG_owner_priv_1);
187 u |= kpf_copy_bit(k, KPF_ARCH, PG_arch_1);
188
189 return u;
190};
84 191
85static ssize_t kpageflags_read(struct file *file, char __user *buf, 192static ssize_t kpageflags_read(struct file *file, char __user *buf,
86 size_t count, loff_t *ppos) 193 size_t count, loff_t *ppos)
@@ -90,7 +197,6 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
90 unsigned long src = *ppos; 197 unsigned long src = *ppos;
91 unsigned long pfn; 198 unsigned long pfn;
92 ssize_t ret = 0; 199 ssize_t ret = 0;
93 u64 kflags, uflags;
94 200
95 pfn = src / KPMSIZE; 201 pfn = src / KPMSIZE;
96 count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src); 202 count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src);
@@ -98,32 +204,18 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
98 return -EINVAL; 204 return -EINVAL;
99 205
100 while (count > 0) { 206 while (count > 0) {
101 ppage = NULL;
102 if (pfn_valid(pfn)) 207 if (pfn_valid(pfn))
103 ppage = pfn_to_page(pfn); 208 ppage = pfn_to_page(pfn);
104 pfn++;
105 if (!ppage)
106 kflags = 0;
107 else 209 else
108 kflags = ppage->flags; 210 ppage = NULL;
109 211
110 uflags = kpf_copy_bit(kflags, KPF_LOCKED, PG_locked) | 212 if (put_user(get_uflags(ppage), out)) {
111 kpf_copy_bit(kflags, KPF_ERROR, PG_error) |
112 kpf_copy_bit(kflags, KPF_REFERENCED, PG_referenced) |
113 kpf_copy_bit(kflags, KPF_UPTODATE, PG_uptodate) |
114 kpf_copy_bit(kflags, KPF_DIRTY, PG_dirty) |
115 kpf_copy_bit(kflags, KPF_LRU, PG_lru) |
116 kpf_copy_bit(kflags, KPF_ACTIVE, PG_active) |
117 kpf_copy_bit(kflags, KPF_SLAB, PG_slab) |
118 kpf_copy_bit(kflags, KPF_WRITEBACK, PG_writeback) |
119 kpf_copy_bit(kflags, KPF_RECLAIM, PG_reclaim) |
120 kpf_copy_bit(kflags, KPF_BUDDY, PG_buddy);
121
122 if (put_user(uflags, out++)) {
123 ret = -EFAULT; 213 ret = -EFAULT;
124 break; 214 break;
125 } 215 }
126 216
217 pfn++;
218 out++;
127 count -= KPMSIZE; 219 count -= KPMSIZE;
128 } 220 }
129 221
diff --git a/fs/select.c b/fs/select.c
index 0fe0e1469df3..d870237e42c7 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -168,7 +168,7 @@ static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
168 return table->entry++; 168 return table->entry++;
169} 169}
170 170
171static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) 171static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
172{ 172{
173 struct poll_wqueues *pwq = wait->private; 173 struct poll_wqueues *pwq = wait->private;
174 DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task); 174 DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
@@ -194,6 +194,16 @@ static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
194 return default_wake_function(&dummy_wait, mode, sync, key); 194 return default_wake_function(&dummy_wait, mode, sync, key);
195} 195}
196 196
197static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
198{
199 struct poll_table_entry *entry;
200
201 entry = container_of(wait, struct poll_table_entry, wait);
202 if (key && !((unsigned long)key & entry->key))
203 return 0;
204 return __pollwake(wait, mode, sync, key);
205}
206
197/* Add a new entry */ 207/* Add a new entry */
198static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, 208static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
199 poll_table *p) 209 poll_table *p)
@@ -205,6 +215,7 @@ static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
205 get_file(filp); 215 get_file(filp);
206 entry->filp = filp; 216 entry->filp = filp;
207 entry->wait_address = wait_address; 217 entry->wait_address = wait_address;
218 entry->key = p->key;
208 init_waitqueue_func_entry(&entry->wait, pollwake); 219 init_waitqueue_func_entry(&entry->wait, pollwake);
209 entry->wait.private = pwq; 220 entry->wait.private = pwq;
210 add_wait_queue(wait_address, &entry->wait); 221 add_wait_queue(wait_address, &entry->wait);
@@ -362,6 +373,18 @@ get_max:
362#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR) 373#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
363#define POLLEX_SET (POLLPRI) 374#define POLLEX_SET (POLLPRI)
364 375
376static inline void wait_key_set(poll_table *wait, unsigned long in,
377 unsigned long out, unsigned long bit)
378{
379 if (wait) {
380 wait->key = POLLEX_SET;
381 if (in & bit)
382 wait->key |= POLLIN_SET;
383 if (out & bit)
384 wait->key |= POLLOUT_SET;
385 }
386}
387
365int do_select(int n, fd_set_bits *fds, struct timespec *end_time) 388int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
366{ 389{
367 ktime_t expire, *to = NULL; 390 ktime_t expire, *to = NULL;
@@ -418,20 +441,25 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
418 if (file) { 441 if (file) {
419 f_op = file->f_op; 442 f_op = file->f_op;
420 mask = DEFAULT_POLLMASK; 443 mask = DEFAULT_POLLMASK;
421 if (f_op && f_op->poll) 444 if (f_op && f_op->poll) {
422 mask = (*f_op->poll)(file, retval ? NULL : wait); 445 wait_key_set(wait, in, out, bit);
446 mask = (*f_op->poll)(file, wait);
447 }
423 fput_light(file, fput_needed); 448 fput_light(file, fput_needed);
424 if ((mask & POLLIN_SET) && (in & bit)) { 449 if ((mask & POLLIN_SET) && (in & bit)) {
425 res_in |= bit; 450 res_in |= bit;
426 retval++; 451 retval++;
452 wait = NULL;
427 } 453 }
428 if ((mask & POLLOUT_SET) && (out & bit)) { 454 if ((mask & POLLOUT_SET) && (out & bit)) {
429 res_out |= bit; 455 res_out |= bit;
430 retval++; 456 retval++;
457 wait = NULL;
431 } 458 }
432 if ((mask & POLLEX_SET) && (ex & bit)) { 459 if ((mask & POLLEX_SET) && (ex & bit)) {
433 res_ex |= bit; 460 res_ex |= bit;
434 retval++; 461 retval++;
462 wait = NULL;
435 } 463 }
436 } 464 }
437 } 465 }
@@ -685,8 +713,12 @@ static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait)
685 mask = POLLNVAL; 713 mask = POLLNVAL;
686 if (file != NULL) { 714 if (file != NULL) {
687 mask = DEFAULT_POLLMASK; 715 mask = DEFAULT_POLLMASK;
688 if (file->f_op && file->f_op->poll) 716 if (file->f_op && file->f_op->poll) {
717 if (pwait)
718 pwait->key = pollfd->events |
719 POLLERR | POLLHUP;
689 mask = file->f_op->poll(file, pwait); 720 mask = file->f_op->poll(file, pwait);
721 }
690 /* Mask out unneeded events. */ 722 /* Mask out unneeded events. */
691 mask &= pollfd->events | POLLERR | POLLHUP; 723 mask &= pollfd->events | POLLERR | POLLHUP;
692 fput_light(file, fput_needed); 724 fput_light(file, fput_needed);
diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
index 58c33055c304..54e8b3d956b7 100644
--- a/include/asm-generic/kmap_types.h
+++ b/include/asm-generic/kmap_types.h
@@ -1,7 +1,7 @@
1#ifndef _ASM_GENERIC_KMAP_TYPES_H 1#ifndef _ASM_GENERIC_KMAP_TYPES_H
2#define _ASM_GENERIC_KMAP_TYPES_H 2#define _ASM_GENERIC_KMAP_TYPES_H
3 3
4#ifdef CONFIG_DEBUG_HIGHMEM 4#ifdef __WITH_KM_FENCE
5# define D(n) __KM_FENCE_##n , 5# define D(n) __KM_FENCE_##n ,
6#else 6#else
7# define D(n) 7# define D(n)
diff --git a/include/linux/bug.h b/include/linux/bug.h
index 54398d2c6d8d..d276b5510c83 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -1,7 +1,6 @@
1#ifndef _LINUX_BUG_H 1#ifndef _LINUX_BUG_H
2#define _LINUX_BUG_H 2#define _LINUX_BUG_H
3 3
4#include <linux/module.h>
5#include <asm/bug.h> 4#include <asm/bug.h>
6 5
7enum bug_trap_type { 6enum bug_trap_type {
@@ -24,10 +23,6 @@ const struct bug_entry *find_bug(unsigned long bugaddr);
24 23
25enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs); 24enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs);
26 25
27int module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *,
28 struct module *);
29void module_bug_cleanup(struct module *);
30
31/* These are defined by the architecture */ 26/* These are defined by the architecture */
32int is_valid_bugaddr(unsigned long addr); 27int is_valid_bugaddr(unsigned long addr);
33 28
@@ -38,13 +33,6 @@ static inline enum bug_trap_type report_bug(unsigned long bug_addr,
38{ 33{
39 return BUG_TRAP_TYPE_BUG; 34 return BUG_TRAP_TYPE_BUG;
40} 35}
41static inline int module_bug_finalize(const Elf_Ehdr *hdr,
42 const Elf_Shdr *sechdrs,
43 struct module *mod)
44{
45 return 0;
46}
47static inline void module_bug_cleanup(struct module *mod) {}
48 36
49#endif /* CONFIG_GENERIC_BUG */ 37#endif /* CONFIG_GENERIC_BUG */
50#endif /* _LINUX_BUG_H */ 38#endif /* _LINUX_BUG_H */
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 05ea1dd7d681..a5740fc4d04b 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -18,7 +18,6 @@
18 18
19extern int number_of_cpusets; /* How many cpusets are defined in system? */ 19extern int number_of_cpusets; /* How many cpusets are defined in system? */
20 20
21extern int cpuset_init_early(void);
22extern int cpuset_init(void); 21extern int cpuset_init(void);
23extern void cpuset_init_smp(void); 22extern void cpuset_init_smp(void);
24extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); 23extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
@@ -27,7 +26,6 @@ extern void cpuset_cpus_allowed_locked(struct task_struct *p,
27extern nodemask_t cpuset_mems_allowed(struct task_struct *p); 26extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
28#define cpuset_current_mems_allowed (current->mems_allowed) 27#define cpuset_current_mems_allowed (current->mems_allowed)
29void cpuset_init_current_mems_allowed(void); 28void cpuset_init_current_mems_allowed(void);
30void cpuset_update_task_memory_state(void);
31int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); 29int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
32 30
33extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask); 31extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
@@ -92,9 +90,13 @@ extern void rebuild_sched_domains(void);
92 90
93extern void cpuset_print_task_mems_allowed(struct task_struct *p); 91extern void cpuset_print_task_mems_allowed(struct task_struct *p);
94 92
93static inline void set_mems_allowed(nodemask_t nodemask)
94{
95 current->mems_allowed = nodemask;
96}
97
95#else /* !CONFIG_CPUSETS */ 98#else /* !CONFIG_CPUSETS */
96 99
97static inline int cpuset_init_early(void) { return 0; }
98static inline int cpuset_init(void) { return 0; } 100static inline int cpuset_init(void) { return 0; }
99static inline void cpuset_init_smp(void) {} 101static inline void cpuset_init_smp(void) {}
100 102
@@ -116,7 +118,6 @@ static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
116 118
117#define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY]) 119#define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY])
118static inline void cpuset_init_current_mems_allowed(void) {} 120static inline void cpuset_init_current_mems_allowed(void) {}
119static inline void cpuset_update_task_memory_state(void) {}
120 121
121static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) 122static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
122{ 123{
@@ -188,6 +189,10 @@ static inline void cpuset_print_task_mems_allowed(struct task_struct *p)
188{ 189{
189} 190}
190 191
192static inline void set_mems_allowed(nodemask_t nodemask)
193{
194}
195
191#endif /* !CONFIG_CPUSETS */ 196#endif /* !CONFIG_CPUSETS */
192 197
193#endif /* _LINUX_CPUSET_H */ 198#endif /* _LINUX_CPUSET_H */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 330c4b1bfcaa..dd68358996b7 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -677,6 +677,9 @@ struct fb_ops {
677 /* get capability given var */ 677 /* get capability given var */
678 void (*fb_get_caps)(struct fb_info *info, struct fb_blit_caps *caps, 678 void (*fb_get_caps)(struct fb_info *info, struct fb_blit_caps *caps,
679 struct fb_var_screeninfo *var); 679 struct fb_var_screeninfo *var);
680
681 /* teardown any resources to do with this framebuffer */
682 void (*fb_destroy)(struct fb_info *info);
680}; 683};
681 684
682#ifdef CONFIG_FB_TILEBLITTING 685#ifdef CONFIG_FB_TILEBLITTING
@@ -786,6 +789,8 @@ struct fb_tile_ops {
786#define FBINFO_MISC_USEREVENT 0x10000 /* event request 789#define FBINFO_MISC_USEREVENT 0x10000 /* event request
787 from userspace */ 790 from userspace */
788#define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */ 791#define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */
792#define FBINFO_MISC_FIRMWARE 0x40000 /* a replaceable firmware
793 inited framebuffer */
789 794
790/* A driver may set this flag to indicate that it does want a set_par to be 795/* A driver may set this flag to indicate that it does want a set_par to be
791 * called every time when fbcon_switch is executed. The advantage is that with 796 * called every time when fbcon_switch is executed. The advantage is that with
@@ -854,7 +859,12 @@ struct fb_info {
854 u32 state; /* Hardware state i.e suspend */ 859 u32 state; /* Hardware state i.e suspend */
855 void *fbcon_par; /* fbcon use-only private area */ 860 void *fbcon_par; /* fbcon use-only private area */
856 /* From here on everything is device dependent */ 861 /* From here on everything is device dependent */
857 void *par; 862 void *par;
863 /* we need the PCI or similiar aperture base/size not
864 smem_start/size as smem_start may just be an object
865 allocated inside the aperture so may not actually overlap */
866 resource_size_t aperture_base;
867 resource_size_t aperture_size;
858}; 868};
859 869
860#ifdef MODULE 870#ifdef MODULE
@@ -893,7 +903,7 @@ struct fb_info {
893#define fb_writeq sbus_writeq 903#define fb_writeq sbus_writeq
894#define fb_memset sbus_memset_io 904#define fb_memset sbus_memset_io
895 905
896#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) 906#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) || defined(__bfin__)
897 907
898#define fb_readb __raw_readb 908#define fb_readb __raw_readb
899#define fb_readw __raw_readw 909#define fb_readw __raw_readw
diff --git a/include/linux/firmware-map.h b/include/linux/firmware-map.h
index cca686b39123..875451f1373a 100644
--- a/include/linux/firmware-map.h
+++ b/include/linux/firmware-map.h
@@ -24,21 +24,17 @@
24 */ 24 */
25#ifdef CONFIG_FIRMWARE_MEMMAP 25#ifdef CONFIG_FIRMWARE_MEMMAP
26 26
27int firmware_map_add(resource_size_t start, resource_size_t end, 27int firmware_map_add(u64 start, u64 end, const char *type);
28 const char *type); 28int firmware_map_add_early(u64 start, u64 end, const char *type);
29int firmware_map_add_early(resource_size_t start, resource_size_t end,
30 const char *type);
31 29
32#else /* CONFIG_FIRMWARE_MEMMAP */ 30#else /* CONFIG_FIRMWARE_MEMMAP */
33 31
34static inline int firmware_map_add(resource_size_t start, resource_size_t end, 32static inline int firmware_map_add(u64 start, u64 end, const char *type)
35 const char *type)
36{ 33{
37 return 0; 34 return 0;
38} 35}
39 36
40static inline int firmware_map_add_early(resource_size_t start, 37static inline int firmware_map_add_early(u64 start, u64 end, const char *type)
41 resource_size_t end, const char *type)
42{ 38{
43 return 0; 39 return 0;
44} 40}
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 6d12174fbe11..74a57938c880 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -879,7 +879,7 @@ struct file_ra_state {
879 there are only # of pages ahead */ 879 there are only # of pages ahead */
880 880
881 unsigned int ra_pages; /* Maximum readahead window */ 881 unsigned int ra_pages; /* Maximum readahead window */
882 int mmap_miss; /* Cache miss stat for mmap accesses */ 882 unsigned int mmap_miss; /* Cache miss stat for mmap accesses */
883 loff_t prev_pos; /* Cache last read() position */ 883 loff_t prev_pos; /* Cache last read() position */
884}; 884};
885 885
@@ -2037,9 +2037,6 @@ extern int __invalidate_device(struct block_device *);
2037extern int invalidate_partition(struct gendisk *, int); 2037extern int invalidate_partition(struct gendisk *, int);
2038#endif 2038#endif
2039extern int invalidate_inodes(struct super_block *); 2039extern int invalidate_inodes(struct super_block *);
2040unsigned long __invalidate_mapping_pages(struct address_space *mapping,
2041 pgoff_t start, pgoff_t end,
2042 bool be_atomic);
2043unsigned long invalidate_mapping_pages(struct address_space *mapping, 2040unsigned long invalidate_mapping_pages(struct address_space *mapping,
2044 pgoff_t start, pgoff_t end); 2041 pgoff_t start, pgoff_t end);
2045 2042
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 80e14b8c2e78..cfdb35d71bca 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -5,6 +5,7 @@
5#include <linux/stddef.h> 5#include <linux/stddef.h>
6#include <linux/linkage.h> 6#include <linux/linkage.h>
7#include <linux/topology.h> 7#include <linux/topology.h>
8#include <linux/mmdebug.h>
8 9
9struct vm_area_struct; 10struct vm_area_struct;
10 11
@@ -20,7 +21,8 @@ struct vm_area_struct;
20#define __GFP_DMA ((__force gfp_t)0x01u) 21#define __GFP_DMA ((__force gfp_t)0x01u)
21#define __GFP_HIGHMEM ((__force gfp_t)0x02u) 22#define __GFP_HIGHMEM ((__force gfp_t)0x02u)
22#define __GFP_DMA32 ((__force gfp_t)0x04u) 23#define __GFP_DMA32 ((__force gfp_t)0x04u)
23 24#define __GFP_MOVABLE ((__force gfp_t)0x08u) /* Page is movable */
25#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
24/* 26/*
25 * Action modifiers - doesn't change the zoning 27 * Action modifiers - doesn't change the zoning
26 * 28 *
@@ -50,7 +52,6 @@ struct vm_area_struct;
50#define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */ 52#define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */
51#define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ 53#define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */
52#define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */ 54#define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */
53#define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */
54 55
55#ifdef CONFIG_KMEMCHECK 56#ifdef CONFIG_KMEMCHECK
56#define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */ 57#define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */
@@ -127,24 +128,105 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags)
127 ((gfp_flags & __GFP_RECLAIMABLE) != 0); 128 ((gfp_flags & __GFP_RECLAIMABLE) != 0);
128} 129}
129 130
130static inline enum zone_type gfp_zone(gfp_t flags) 131#ifdef CONFIG_HIGHMEM
131{ 132#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
133#else
134#define OPT_ZONE_HIGHMEM ZONE_NORMAL
135#endif
136
132#ifdef CONFIG_ZONE_DMA 137#ifdef CONFIG_ZONE_DMA
133 if (flags & __GFP_DMA) 138#define OPT_ZONE_DMA ZONE_DMA
134 return ZONE_DMA; 139#else
140#define OPT_ZONE_DMA ZONE_NORMAL
135#endif 141#endif
142
136#ifdef CONFIG_ZONE_DMA32 143#ifdef CONFIG_ZONE_DMA32
137 if (flags & __GFP_DMA32) 144#define OPT_ZONE_DMA32 ZONE_DMA32
138 return ZONE_DMA32; 145#else
146#define OPT_ZONE_DMA32 ZONE_NORMAL
139#endif 147#endif
140 if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) == 148
141 (__GFP_HIGHMEM | __GFP_MOVABLE)) 149/*
142 return ZONE_MOVABLE; 150 * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the
143#ifdef CONFIG_HIGHMEM 151 * zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long
144 if (flags & __GFP_HIGHMEM) 152 * and there are 16 of them to cover all possible combinations of
145 return ZONE_HIGHMEM; 153 * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM
154 *
155 * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA.
156 * But GFP_MOVABLE is not only a zone specifier but also an allocation
157 * policy. Therefore __GFP_MOVABLE plus another zone selector is valid.
158 * Only 1bit of the lowest 3 bit (DMA,DMA32,HIGHMEM) can be set to "1".
159 *
160 * bit result
161 * =================
162 * 0x0 => NORMAL
163 * 0x1 => DMA or NORMAL
164 * 0x2 => HIGHMEM or NORMAL
165 * 0x3 => BAD (DMA+HIGHMEM)
166 * 0x4 => DMA32 or DMA or NORMAL
167 * 0x5 => BAD (DMA+DMA32)
168 * 0x6 => BAD (HIGHMEM+DMA32)
169 * 0x7 => BAD (HIGHMEM+DMA32+DMA)
170 * 0x8 => NORMAL (MOVABLE+0)
171 * 0x9 => DMA or NORMAL (MOVABLE+DMA)
172 * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too)
173 * 0xb => BAD (MOVABLE+HIGHMEM+DMA)
174 * 0xc => DMA32 (MOVABLE+HIGHMEM+DMA32)
175 * 0xd => BAD (MOVABLE+DMA32+DMA)
176 * 0xe => BAD (MOVABLE+DMA32+HIGHMEM)
177 * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA)
178 *
179 * ZONES_SHIFT must be <= 2 on 32 bit platforms.
180 */
181
182#if 16 * ZONES_SHIFT > BITS_PER_LONG
183#error ZONES_SHIFT too large to create GFP_ZONE_TABLE integer
184#endif
185
186#define GFP_ZONE_TABLE ( \
187 (ZONE_NORMAL << 0 * ZONES_SHIFT) \
188 | (OPT_ZONE_DMA << __GFP_DMA * ZONES_SHIFT) \
189 | (OPT_ZONE_HIGHMEM << __GFP_HIGHMEM * ZONES_SHIFT) \
190 | (OPT_ZONE_DMA32 << __GFP_DMA32 * ZONES_SHIFT) \
191 | (ZONE_NORMAL << __GFP_MOVABLE * ZONES_SHIFT) \
192 | (OPT_ZONE_DMA << (__GFP_MOVABLE | __GFP_DMA) * ZONES_SHIFT) \
193 | (ZONE_MOVABLE << (__GFP_MOVABLE | __GFP_HIGHMEM) * ZONES_SHIFT)\
194 | (OPT_ZONE_DMA32 << (__GFP_MOVABLE | __GFP_DMA32) * ZONES_SHIFT)\
195)
196
197/*
198 * GFP_ZONE_BAD is a bitmap for all combination of __GFP_DMA, __GFP_DMA32
199 * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per
200 * entry starting with bit 0. Bit is set if the combination is not
201 * allowed.
202 */
203#define GFP_ZONE_BAD ( \
204 1 << (__GFP_DMA | __GFP_HIGHMEM) \
205 | 1 << (__GFP_DMA | __GFP_DMA32) \
206 | 1 << (__GFP_DMA32 | __GFP_HIGHMEM) \
207 | 1 << (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM) \
208 | 1 << (__GFP_MOVABLE | __GFP_HIGHMEM | __GFP_DMA) \
209 | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_DMA) \
210 | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_HIGHMEM) \
211 | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_DMA | __GFP_HIGHMEM)\
212)
213
214static inline enum zone_type gfp_zone(gfp_t flags)
215{
216 enum zone_type z;
217 int bit = flags & GFP_ZONEMASK;
218
219 z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) &
220 ((1 << ZONES_SHIFT) - 1);
221
222 if (__builtin_constant_p(bit))
223 BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
224 else {
225#ifdef CONFIG_DEBUG_VM
226 BUG_ON((GFP_ZONE_BAD >> bit) & 1);
146#endif 227#endif
147 return ZONE_NORMAL; 228 }
229 return z;
148} 230}
149 231
150/* 232/*
@@ -184,30 +266,19 @@ static inline void arch_alloc_page(struct page *page, int order) { }
184#endif 266#endif
185 267
186struct page * 268struct page *
187__alloc_pages_internal(gfp_t gfp_mask, unsigned int order, 269__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
188 struct zonelist *zonelist, nodemask_t *nodemask); 270 struct zonelist *zonelist, nodemask_t *nodemask);
189 271
190static inline struct page * 272static inline struct page *
191__alloc_pages(gfp_t gfp_mask, unsigned int order, 273__alloc_pages(gfp_t gfp_mask, unsigned int order,
192 struct zonelist *zonelist) 274 struct zonelist *zonelist)
193{ 275{
194 return __alloc_pages_internal(gfp_mask, order, zonelist, NULL); 276 return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL);
195} 277}
196 278
197static inline struct page *
198__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
199 struct zonelist *zonelist, nodemask_t *nodemask)
200{
201 return __alloc_pages_internal(gfp_mask, order, zonelist, nodemask);
202}
203
204
205static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, 279static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
206 unsigned int order) 280 unsigned int order)
207{ 281{
208 if (unlikely(order >= MAX_ORDER))
209 return NULL;
210
211 /* Unknown node is current node */ 282 /* Unknown node is current node */
212 if (nid < 0) 283 if (nid < 0)
213 nid = numa_node_id(); 284 nid = numa_node_id();
@@ -215,15 +286,20 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
215 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); 286 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
216} 287}
217 288
289static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask,
290 unsigned int order)
291{
292 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
293
294 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
295}
296
218#ifdef CONFIG_NUMA 297#ifdef CONFIG_NUMA
219extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); 298extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
220 299
221static inline struct page * 300static inline struct page *
222alloc_pages(gfp_t gfp_mask, unsigned int order) 301alloc_pages(gfp_t gfp_mask, unsigned int order)
223{ 302{
224 if (unlikely(order >= MAX_ORDER))
225 return NULL;
226
227 return alloc_pages_current(gfp_mask, order); 303 return alloc_pages_current(gfp_mask, order);
228} 304}
229extern struct page *alloc_page_vma(gfp_t gfp_mask, 305extern struct page *alloc_page_vma(gfp_t gfp_mask,
@@ -260,4 +336,16 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
260void drain_all_pages(void); 336void drain_all_pages(void);
261void drain_local_pages(void *dummy); 337void drain_local_pages(void *dummy);
262 338
339extern bool oom_killer_disabled;
340
341static inline void oom_killer_disable(void)
342{
343 oom_killer_disabled = true;
344}
345
346static inline void oom_killer_enable(void)
347{
348 oom_killer_disabled = false;
349}
350
263#endif /* __LINUX_GFP_H */ 351#endif /* __LINUX_GFP_H */
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 1fcb7126a01f..211ff4497269 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -55,7 +55,9 @@ static inline void *kmap(struct page *page)
55 return page_address(page); 55 return page_address(page);
56} 56}
57 57
58#define kunmap(page) do { (void) (page); } while (0) 58static inline void kunmap(struct page *page)
59{
60}
59 61
60static inline void *kmap_atomic(struct page *page, enum km_type idx) 62static inline void *kmap_atomic(struct page *page, enum km_type idx)
61{ 63{
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 03be7f29ca01..a05a5ef33391 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -11,6 +11,8 @@
11 11
12struct ctl_table; 12struct ctl_table;
13 13
14int PageHuge(struct page *page);
15
14static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) 16static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
15{ 17{
16 return vma->vm_flags & VM_HUGETLB; 18 return vma->vm_flags & VM_HUGETLB;
@@ -61,6 +63,11 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
61 63
62#else /* !CONFIG_HUGETLB_PAGE */ 64#else /* !CONFIG_HUGETLB_PAGE */
63 65
66static inline int PageHuge(struct page *page)
67{
68 return 0;
69}
70
64static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) 71static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
65{ 72{
66 return 0; 73 return 0;
diff --git a/include/linux/init.h b/include/linux/init.h
index b2189803f19a..8c2c9989626d 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -29,7 +29,7 @@
29 * sign followed by value, e.g.: 29 * sign followed by value, e.g.:
30 * 30 *
31 * static int init_variable __initdata = 0; 31 * static int init_variable __initdata = 0;
32 * static char linux_logo[] __initdata = { 0x32, 0x36, ... }; 32 * static const char linux_logo[] __initconst = { 0x32, 0x36, ... };
33 * 33 *
34 * Don't forget to initialize data not at file scope, i.e. within a function, 34 * Don't forget to initialize data not at file scope, i.e. within a function,
35 * as gcc otherwise puts the data into the bss section and not into the init 35 * as gcc otherwise puts the data into the bss section and not into the init
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 28b1f30601b5..5368fbdc7801 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -15,18 +15,6 @@
15extern struct files_struct init_files; 15extern struct files_struct init_files;
16extern struct fs_struct init_fs; 16extern struct fs_struct init_fs;
17 17
18#define INIT_MM(name) \
19{ \
20 .mm_rb = RB_ROOT, \
21 .pgd = swapper_pg_dir, \
22 .mm_users = ATOMIC_INIT(2), \
23 .mm_count = ATOMIC_INIT(1), \
24 .mmap_sem = __RWSEM_INITIALIZER(name.mmap_sem), \
25 .page_table_lock = __SPIN_LOCK_UNLOCKED(name.page_table_lock), \
26 .mmlist = LIST_HEAD_INIT(name.mmlist), \
27 .cpu_vm_mask = CPU_MASK_ALL, \
28}
29
30#define INIT_SIGNALS(sig) { \ 18#define INIT_SIGNALS(sig) { \
31 .count = ATOMIC_INIT(1), \ 19 .count = ATOMIC_INIT(1), \
32 .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\ 20 .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
diff --git a/include/linux/linux_logo.h b/include/linux/linux_logo.h
index 08a92969c76e..ca5bd91d12e1 100644
--- a/include/linux/linux_logo.h
+++ b/include/linux/linux_logo.h
@@ -32,6 +32,22 @@ struct linux_logo {
32 const unsigned char *data; 32 const unsigned char *data;
33}; 33};
34 34
35extern const struct linux_logo logo_linux_mono;
36extern const struct linux_logo logo_linux_vga16;
37extern const struct linux_logo logo_linux_clut224;
38extern const struct linux_logo logo_blackfin_vga16;
39extern const struct linux_logo logo_blackfin_clut224;
40extern const struct linux_logo logo_dec_clut224;
41extern const struct linux_logo logo_mac_clut224;
42extern const struct linux_logo logo_parisc_clut224;
43extern const struct linux_logo logo_sgi_clut224;
44extern const struct linux_logo logo_sun_clut224;
45extern const struct linux_logo logo_superh_mono;
46extern const struct linux_logo logo_superh_vga16;
47extern const struct linux_logo logo_superh_clut224;
48extern const struct linux_logo logo_m32r_clut224;
49extern const struct linux_logo logo_spe_clut224;
50
35extern const struct linux_logo *fb_find_logo(int depth); 51extern const struct linux_logo *fb_find_logo(int depth);
36#ifdef CONFIG_FB_LOGO_EXTRA 52#ifdef CONFIG_FB_LOGO_EXTRA
37extern void fb_append_extra_logo(const struct linux_logo *logo, 53extern void fb_append_extra_logo(const struct linux_logo *logo,
diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h
new file mode 100644
index 000000000000..ad651f4e45ac
--- /dev/null
+++ b/include/linux/lis3lv02d.h
@@ -0,0 +1,39 @@
1#ifndef __LIS3LV02D_H_
2#define __LIS3LV02D_H_
3
4struct lis3lv02d_platform_data {
5 /* please note: the 'click' feature is only supported for
6 * LIS[32]02DL variants of the chip and will be ignored for
7 * others */
8#define LIS3_CLICK_SINGLE_X (1 << 0)
9#define LIS3_CLICK_DOUBLE_X (1 << 1)
10#define LIS3_CLICK_SINGLE_Y (1 << 2)
11#define LIS3_CLICK_DOUBLE_Y (1 << 3)
12#define LIS3_CLICK_SINGLE_Z (1 << 4)
13#define LIS3_CLICK_DOUBLE_Z (1 << 5)
14 unsigned char click_flags;
15 unsigned char click_thresh_x;
16 unsigned char click_thresh_y;
17 unsigned char click_thresh_z;
18 unsigned char click_time_limit;
19 unsigned char click_latency;
20 unsigned char click_window;
21
22#define LIS3_IRQ1_DISABLE (0 << 0)
23#define LIS3_IRQ1_FF_WU_1 (1 << 0)
24#define LIS3_IRQ1_FF_WU_2 (2 << 0)
25#define LIS3_IRQ1_FF_WU_12 (3 << 0)
26#define LIS3_IRQ1_DATA_READY (4 << 0)
27#define LIS3_IRQ1_CLICK (7 << 0)
28#define LIS3_IRQ2_DISABLE (0 << 3)
29#define LIS3_IRQ2_FF_WU_1 (1 << 3)
30#define LIS3_IRQ2_FF_WU_2 (2 << 3)
31#define LIS3_IRQ2_FF_WU_12 (3 << 3)
32#define LIS3_IRQ2_DATA_READY (4 << 3)
33#define LIS3_IRQ2_CLICK (7 << 3)
34#define LIS3_IRQ_OPEN_DRAIN (1 << 6)
35#define LIS3_IRQ_ACTIVE_HIGH (1 << 7)
36 unsigned char irq_cfg;
37};
38
39#endif /* __LIS3LV02D_H_ */
diff --git a/include/linux/major.h b/include/linux/major.h
index 058ec15dd060..6a8ca98c9a96 100644
--- a/include/linux/major.h
+++ b/include/linux/major.h
@@ -145,6 +145,7 @@
145#define UNIX98_PTY_MAJOR_COUNT 8 145#define UNIX98_PTY_MAJOR_COUNT 8
146#define UNIX98_PTY_SLAVE_MAJOR (UNIX98_PTY_MASTER_MAJOR+UNIX98_PTY_MAJOR_COUNT) 146#define UNIX98_PTY_SLAVE_MAJOR (UNIX98_PTY_MASTER_MAJOR+UNIX98_PTY_MAJOR_COUNT)
147 147
148#define DRBD_MAJOR 147
148#define RTF_MAJOR 150 149#define RTF_MAJOR 150
149#define RAW_MAJOR 162 150#define RAW_MAJOR 162
150 151
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 25b9ca93d232..45add35dda1b 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -94,6 +94,7 @@ extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
94extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, 94extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
95 int priority); 95 int priority);
96int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg); 96int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg);
97int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg);
97unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, 98unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
98 struct zone *zone, 99 struct zone *zone,
99 enum lru_list lru); 100 enum lru_list lru);
@@ -239,6 +240,12 @@ mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
239 return 1; 240 return 1;
240} 241}
241 242
243static inline int
244mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
245{
246 return 1;
247}
248
242static inline unsigned long 249static inline unsigned long
243mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone, 250mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone,
244 enum lru_list lru) 251 enum lru_list lru)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ad613ed66ab0..d88d6fc530ad 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -7,7 +7,6 @@
7 7
8#include <linux/gfp.h> 8#include <linux/gfp.h>
9#include <linux/list.h> 9#include <linux/list.h>
10#include <linux/mmdebug.h>
11#include <linux/mmzone.h> 10#include <linux/mmzone.h>
12#include <linux/rbtree.h> 11#include <linux/rbtree.h>
13#include <linux/prio_tree.h> 12#include <linux/prio_tree.h>
@@ -725,7 +724,7 @@ static inline int shmem_lock(struct file *file, int lock,
725 return 0; 724 return 0;
726} 725}
727#endif 726#endif
728struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags); 727struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
729 728
730int shmem_zero_setup(struct vm_area_struct *); 729int shmem_zero_setup(struct vm_area_struct *);
731 730
@@ -793,6 +792,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
793 struct vm_area_struct *vma); 792 struct vm_area_struct *vma);
794void unmap_mapping_range(struct address_space *mapping, 793void unmap_mapping_range(struct address_space *mapping,
795 loff_t const holebegin, loff_t const holelen, int even_cows); 794 loff_t const holebegin, loff_t const holelen, int even_cows);
795int follow_pfn(struct vm_area_struct *vma, unsigned long address,
796 unsigned long *pfn);
796int follow_phys(struct vm_area_struct *vma, unsigned long address, 797int follow_phys(struct vm_area_struct *vma, unsigned long address,
797 unsigned int flags, unsigned long *prot, resource_size_t *phys); 798 unsigned int flags, unsigned long *prot, resource_size_t *phys);
798int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, 799int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
@@ -824,8 +825,11 @@ static inline int handle_mm_fault(struct mm_struct *mm,
824extern int make_pages_present(unsigned long addr, unsigned long end); 825extern int make_pages_present(unsigned long addr, unsigned long end);
825extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); 826extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
826 827
827int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, 828int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
828 int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); 829 unsigned long start, int len, int write, int force,
830 struct page **pages, struct vm_area_struct **vmas);
831int get_user_pages_fast(unsigned long start, int nr_pages, int write,
832 struct page **pages);
829 833
830extern int try_to_release_page(struct page * page, gfp_t gfp_mask); 834extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
831extern void do_invalidatepage(struct page *page, unsigned long offset); 835extern void do_invalidatepage(struct page *page, unsigned long offset);
@@ -850,19 +854,6 @@ extern int mprotect_fixup(struct vm_area_struct *vma,
850 unsigned long end, unsigned long newflags); 854 unsigned long end, unsigned long newflags);
851 855
852/* 856/*
853 * get_user_pages_fast provides equivalent functionality to get_user_pages,
854 * operating on current and current->mm (force=0 and doesn't return any vmas).
855 *
856 * get_user_pages_fast may take mmap_sem and page tables, so no assumptions
857 * can be made about locking. get_user_pages_fast is to be implemented in a
858 * way that is advantageous (vs get_user_pages()) when the user memory area is
859 * already faulted in and present in ptes. However if the pages have to be
860 * faulted in, it may turn out to be slightly slower).
861 */
862int get_user_pages_fast(unsigned long start, int nr_pages, int write,
863 struct page **pages);
864
865/*
866 * A callback you can register to apply pressure to ageable caches. 857 * A callback you can register to apply pressure to ageable caches.
867 * 858 *
868 * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should 859 * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should
@@ -1061,7 +1052,8 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn);
1061extern void set_dma_reserve(unsigned long new_dma_reserve); 1052extern void set_dma_reserve(unsigned long new_dma_reserve);
1062extern void memmap_init_zone(unsigned long, int, unsigned long, 1053extern void memmap_init_zone(unsigned long, int, unsigned long,
1063 unsigned long, enum memmap_context); 1054 unsigned long, enum memmap_context);
1064extern void setup_per_zone_pages_min(void); 1055extern void setup_per_zone_wmarks(void);
1056extern void calculate_zone_inactive_ratio(struct zone *zone);
1065extern void mem_init(void); 1057extern void mem_init(void);
1066extern void __init mmap_init(void); 1058extern void __init mmap_init(void);
1067extern void show_mem(void); 1059extern void show_mem(void);
@@ -1178,8 +1170,6 @@ void task_dirty_inc(struct task_struct *tsk);
1178#define VM_MAX_READAHEAD 128 /* kbytes */ 1170#define VM_MAX_READAHEAD 128 /* kbytes */
1179#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ 1171#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */
1180 1172
1181int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
1182 pgoff_t offset, unsigned long nr_to_read);
1183int force_page_cache_readahead(struct address_space *mapping, struct file *filp, 1173int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
1184 pgoff_t offset, unsigned long nr_to_read); 1174 pgoff_t offset, unsigned long nr_to_read);
1185 1175
@@ -1197,6 +1187,9 @@ void page_cache_async_readahead(struct address_space *mapping,
1197 unsigned long size); 1187 unsigned long size);
1198 1188
1199unsigned long max_sane_readahead(unsigned long nr); 1189unsigned long max_sane_readahead(unsigned long nr);
1190unsigned long ra_submit(struct file_ra_state *ra,
1191 struct address_space *mapping,
1192 struct file *filp);
1200 1193
1201/* Do stack extension */ 1194/* Do stack extension */
1202extern int expand_stack(struct vm_area_struct *vma, unsigned long address); 1195extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 0042090a4d70..7acc8439d9b3 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -240,6 +240,8 @@ struct mm_struct {
240 240
241 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ 241 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
242 242
243 s8 oom_adj; /* OOM kill score adjustment (bit shift) */
244
243 cpumask_t cpu_vm_mask; 245 cpumask_t cpu_vm_mask;
244 246
245 /* Architecture-specific MM context */ 247 /* Architecture-specific MM context */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index a47c879e1304..889598537370 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -50,9 +50,6 @@ extern int page_group_by_mobility_disabled;
50 50
51static inline int get_pageblock_migratetype(struct page *page) 51static inline int get_pageblock_migratetype(struct page *page)
52{ 52{
53 if (unlikely(page_group_by_mobility_disabled))
54 return MIGRATE_UNMOVABLE;
55
56 return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end); 53 return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
57} 54}
58 55
@@ -86,13 +83,8 @@ enum zone_stat_item {
86 NR_ACTIVE_ANON, /* " " " " " */ 83 NR_ACTIVE_ANON, /* " " " " " */
87 NR_INACTIVE_FILE, /* " " " " " */ 84 NR_INACTIVE_FILE, /* " " " " " */
88 NR_ACTIVE_FILE, /* " " " " " */ 85 NR_ACTIVE_FILE, /* " " " " " */
89#ifdef CONFIG_UNEVICTABLE_LRU
90 NR_UNEVICTABLE, /* " " " " " */ 86 NR_UNEVICTABLE, /* " " " " " */
91 NR_MLOCK, /* mlock()ed pages found and moved off LRU */ 87 NR_MLOCK, /* mlock()ed pages found and moved off LRU */
92#else
93 NR_UNEVICTABLE = NR_ACTIVE_FILE, /* avoid compiler errors in dead code */
94 NR_MLOCK = NR_ACTIVE_FILE,
95#endif
96 NR_ANON_PAGES, /* Mapped anonymous pages */ 88 NR_ANON_PAGES, /* Mapped anonymous pages */
97 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. 89 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
98 only modified from process context */ 90 only modified from process context */
@@ -135,11 +127,7 @@ enum lru_list {
135 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, 127 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
136 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, 128 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
137 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, 129 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
138#ifdef CONFIG_UNEVICTABLE_LRU
139 LRU_UNEVICTABLE, 130 LRU_UNEVICTABLE,
140#else
141 LRU_UNEVICTABLE = LRU_ACTIVE_FILE, /* avoid compiler errors in dead code */
142#endif
143 NR_LRU_LISTS 131 NR_LRU_LISTS
144}; 132};
145 133
@@ -159,13 +147,20 @@ static inline int is_active_lru(enum lru_list l)
159 147
160static inline int is_unevictable_lru(enum lru_list l) 148static inline int is_unevictable_lru(enum lru_list l)
161{ 149{
162#ifdef CONFIG_UNEVICTABLE_LRU
163 return (l == LRU_UNEVICTABLE); 150 return (l == LRU_UNEVICTABLE);
164#else
165 return 0;
166#endif
167} 151}
168 152
153enum zone_watermarks {
154 WMARK_MIN,
155 WMARK_LOW,
156 WMARK_HIGH,
157 NR_WMARK
158};
159
160#define min_wmark_pages(z) (z->watermark[WMARK_MIN])
161#define low_wmark_pages(z) (z->watermark[WMARK_LOW])
162#define high_wmark_pages(z) (z->watermark[WMARK_HIGH])
163
169struct per_cpu_pages { 164struct per_cpu_pages {
170 int count; /* number of pages in the list */ 165 int count; /* number of pages in the list */
171 int high; /* high watermark, emptying needed */ 166 int high; /* high watermark, emptying needed */
@@ -278,7 +273,10 @@ struct zone_reclaim_stat {
278 273
279struct zone { 274struct zone {
280 /* Fields commonly accessed by the page allocator */ 275 /* Fields commonly accessed by the page allocator */
281 unsigned long pages_min, pages_low, pages_high; 276
277 /* zone watermarks, access with *_wmark_pages(zone) macros */
278 unsigned long watermark[NR_WMARK];
279
282 /* 280 /*
283 * We don't know if the memory that we're going to allocate will be freeable 281 * We don't know if the memory that we're going to allocate will be freeable
284 * or/and it will be released eventually, so to avoid totally wasting several 282 * or/and it will be released eventually, so to avoid totally wasting several
@@ -323,9 +321,9 @@ struct zone {
323 321
324 /* Fields commonly accessed by the page reclaim scanner */ 322 /* Fields commonly accessed by the page reclaim scanner */
325 spinlock_t lru_lock; 323 spinlock_t lru_lock;
326 struct { 324 struct zone_lru {
327 struct list_head list; 325 struct list_head list;
328 unsigned long nr_scan; 326 unsigned long nr_saved_scan; /* accumulated for batching */
329 } lru[NR_LRU_LISTS]; 327 } lru[NR_LRU_LISTS];
330 328
331 struct zone_reclaim_stat reclaim_stat; 329 struct zone_reclaim_stat reclaim_stat;
diff --git a/include/linux/module.h b/include/linux/module.h
index a7bc6e7b43a7..505f20dcc1c7 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -697,4 +697,21 @@ static inline void module_remove_modinfo_attrs(struct module *mod)
697 697
698#define __MODULE_STRING(x) __stringify(x) 698#define __MODULE_STRING(x) __stringify(x)
699 699
700
701#ifdef CONFIG_GENERIC_BUG
702int module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *,
703 struct module *);
704void module_bug_cleanup(struct module *);
705
706#else /* !CONFIG_GENERIC_BUG */
707
708static inline int module_bug_finalize(const Elf_Ehdr *hdr,
709 const Elf_Shdr *sechdrs,
710 struct module *mod)
711{
712 return 0;
713}
714static inline void module_bug_cleanup(struct module *mod) {}
715#endif /* CONFIG_GENERIC_BUG */
716
700#endif /* _LINUX_MODULE_H */ 717#endif /* _LINUX_MODULE_H */
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 848025cd7087..829b94b156f2 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -408,6 +408,19 @@ static inline int num_node_state(enum node_states state)
408#define next_online_node(nid) next_node((nid), node_states[N_ONLINE]) 408#define next_online_node(nid) next_node((nid), node_states[N_ONLINE])
409 409
410extern int nr_node_ids; 410extern int nr_node_ids;
411extern int nr_online_nodes;
412
413static inline void node_set_online(int nid)
414{
415 node_set_state(nid, N_ONLINE);
416 nr_online_nodes = num_node_state(N_ONLINE);
417}
418
419static inline void node_set_offline(int nid)
420{
421 node_clear_state(nid, N_ONLINE);
422 nr_online_nodes = num_node_state(N_ONLINE);
423}
411#else 424#else
412 425
413static inline int node_state(int node, enum node_states state) 426static inline int node_state(int node, enum node_states state)
@@ -434,7 +447,10 @@ static inline int num_node_state(enum node_states state)
434#define first_online_node 0 447#define first_online_node 0
435#define next_online_node(nid) (MAX_NUMNODES) 448#define next_online_node(nid) (MAX_NUMNODES)
436#define nr_node_ids 1 449#define nr_node_ids 1
450#define nr_online_nodes 1
437 451
452#define node_set_online(node) node_set_state((node), N_ONLINE)
453#define node_set_offline(node) node_clear_state((node), N_ONLINE)
438#endif 454#endif
439 455
440#define node_online_map node_states[N_ONLINE] 456#define node_online_map node_states[N_ONLINE]
@@ -454,9 +470,6 @@ static inline int num_node_state(enum node_states state)
454#define node_online(node) node_state((node), N_ONLINE) 470#define node_online(node) node_state((node), N_ONLINE)
455#define node_possible(node) node_state((node), N_POSSIBLE) 471#define node_possible(node) node_state((node), N_POSSIBLE)
456 472
457#define node_set_online(node) node_set_state((node), N_ONLINE)
458#define node_set_offline(node) node_clear_state((node), N_ONLINE)
459
460#define for_each_node(node) for_each_node_state(node, N_POSSIBLE) 473#define for_each_node(node) for_each_node_state(node, N_POSSIBLE)
461#define for_each_online_node(node) for_each_node_state(node, N_ONLINE) 474#define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
462 475
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 62214c7d2d93..d6792f88a176 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -95,9 +95,7 @@ enum pageflags {
95 PG_reclaim, /* To be reclaimed asap */ 95 PG_reclaim, /* To be reclaimed asap */
96 PG_buddy, /* Page is free, on buddy lists */ 96 PG_buddy, /* Page is free, on buddy lists */
97 PG_swapbacked, /* Page is backed by RAM/swap */ 97 PG_swapbacked, /* Page is backed by RAM/swap */
98#ifdef CONFIG_UNEVICTABLE_LRU
99 PG_unevictable, /* Page is "unevictable" */ 98 PG_unevictable, /* Page is "unevictable" */
100#endif
101#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT 99#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
102 PG_mlocked, /* Page is vma mlocked */ 100 PG_mlocked, /* Page is vma mlocked */
103#endif 101#endif
@@ -248,14 +246,8 @@ PAGEFLAG_FALSE(SwapCache)
248 SETPAGEFLAG_NOOP(SwapCache) CLEARPAGEFLAG_NOOP(SwapCache) 246 SETPAGEFLAG_NOOP(SwapCache) CLEARPAGEFLAG_NOOP(SwapCache)
249#endif 247#endif
250 248
251#ifdef CONFIG_UNEVICTABLE_LRU
252PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable) 249PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
253 TESTCLEARFLAG(Unevictable, unevictable) 250 TESTCLEARFLAG(Unevictable, unevictable)
254#else
255PAGEFLAG_FALSE(Unevictable) TESTCLEARFLAG_FALSE(Unevictable)
256 SETPAGEFLAG_NOOP(Unevictable) CLEARPAGEFLAG_NOOP(Unevictable)
257 __CLEARPAGEFLAG_NOOP(Unevictable)
258#endif
259 251
260#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT 252#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
261#define MLOCK_PAGES 1 253#define MLOCK_PAGES 1
@@ -382,12 +374,6 @@ static inline void __ClearPageTail(struct page *page)
382 374
383#endif /* !PAGEFLAGS_EXTENDED */ 375#endif /* !PAGEFLAGS_EXTENDED */
384 376
385#ifdef CONFIG_UNEVICTABLE_LRU
386#define __PG_UNEVICTABLE (1 << PG_unevictable)
387#else
388#define __PG_UNEVICTABLE 0
389#endif
390
391#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT 377#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
392#define __PG_MLOCKED (1 << PG_mlocked) 378#define __PG_MLOCKED (1 << PG_mlocked)
393#else 379#else
@@ -403,7 +389,7 @@ static inline void __ClearPageTail(struct page *page)
403 1 << PG_private | 1 << PG_private_2 | \ 389 1 << PG_private | 1 << PG_private_2 | \
404 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \ 390 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \
405 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ 391 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
406 __PG_UNEVICTABLE | __PG_MLOCKED) 392 1 << PG_unevictable | __PG_MLOCKED)
407 393
408/* 394/*
409 * Flags checked when a page is prepped for return by the page allocator. 395 * Flags checked when a page is prepped for return by the page allocator.
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 34da5230faab..aec3252afcf5 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -22,9 +22,7 @@ enum mapping_flags {
22 AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */ 22 AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */
23 AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */ 23 AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
24 AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ 24 AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
25#ifdef CONFIG_UNEVICTABLE_LRU
26 AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ 25 AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
27#endif
28}; 26};
29 27
30static inline void mapping_set_error(struct address_space *mapping, int error) 28static inline void mapping_set_error(struct address_space *mapping, int error)
@@ -37,8 +35,6 @@ static inline void mapping_set_error(struct address_space *mapping, int error)
37 } 35 }
38} 36}
39 37
40#ifdef CONFIG_UNEVICTABLE_LRU
41
42static inline void mapping_set_unevictable(struct address_space *mapping) 38static inline void mapping_set_unevictable(struct address_space *mapping)
43{ 39{
44 set_bit(AS_UNEVICTABLE, &mapping->flags); 40 set_bit(AS_UNEVICTABLE, &mapping->flags);
@@ -55,14 +51,6 @@ static inline int mapping_unevictable(struct address_space *mapping)
55 return test_bit(AS_UNEVICTABLE, &mapping->flags); 51 return test_bit(AS_UNEVICTABLE, &mapping->flags);
56 return !!mapping; 52 return !!mapping;
57} 53}
58#else
59static inline void mapping_set_unevictable(struct address_space *mapping) { }
60static inline void mapping_clear_unevictable(struct address_space *mapping) { }
61static inline int mapping_unevictable(struct address_space *mapping)
62{
63 return 0;
64}
65#endif
66 54
67static inline gfp_t mapping_gfp_mask(struct address_space * mapping) 55static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
68{ 56{
diff --git a/include/linux/poll.h b/include/linux/poll.h
index 8c24ef8d9976..fa287f25138d 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -32,6 +32,7 @@ typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_
32 32
33typedef struct poll_table_struct { 33typedef struct poll_table_struct {
34 poll_queue_proc qproc; 34 poll_queue_proc qproc;
35 unsigned long key;
35} poll_table; 36} poll_table;
36 37
37static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) 38static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
@@ -43,10 +44,12 @@ static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_addres
43static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc) 44static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
44{ 45{
45 pt->qproc = qproc; 46 pt->qproc = qproc;
47 pt->key = ~0UL; /* all events enabled */
46} 48}
47 49
48struct poll_table_entry { 50struct poll_table_entry {
49 struct file *filp; 51 struct file *filp;
52 unsigned long key;
50 wait_queue_t wait; 53 wait_queue_t wait;
51 wait_queue_head_t *wait_address; 54 wait_queue_head_t *wait_address;
52}; 55};
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 355f6e80db0d..c5da74918096 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -167,6 +167,8 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
167 unsigned long first_index, unsigned int max_items); 167 unsigned long first_index, unsigned int max_items);
168unsigned long radix_tree_next_hole(struct radix_tree_root *root, 168unsigned long radix_tree_next_hole(struct radix_tree_root *root,
169 unsigned long index, unsigned long max_scan); 169 unsigned long index, unsigned long max_scan);
170unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
171 unsigned long index, unsigned long max_scan);
170int radix_tree_preload(gfp_t gfp_mask); 172int radix_tree_preload(gfp_t gfp_mask);
171void radix_tree_init(void); 173void radix_tree_init(void);
172void *radix_tree_tag_set(struct radix_tree_root *root, 174void *radix_tree_tag_set(struct radix_tree_root *root,
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index b35bc0e19cd9..216d024f830d 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -83,7 +83,8 @@ static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma,
83/* 83/*
84 * Called from mm/vmscan.c to handle paging out 84 * Called from mm/vmscan.c to handle paging out
85 */ 85 */
86int page_referenced(struct page *, int is_locked, struct mem_cgroup *cnt); 86int page_referenced(struct page *, int is_locked,
87 struct mem_cgroup *cnt, unsigned long *vm_flags);
87int try_to_unmap(struct page *, int ignore_refs); 88int try_to_unmap(struct page *, int ignore_refs);
88 89
89/* 90/*
@@ -105,18 +106,11 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
105 */ 106 */
106int page_mkclean(struct page *); 107int page_mkclean(struct page *);
107 108
108#ifdef CONFIG_UNEVICTABLE_LRU
109/* 109/*
110 * called in munlock()/munmap() path to check for other vmas holding 110 * called in munlock()/munmap() path to check for other vmas holding
111 * the page mlocked. 111 * the page mlocked.
112 */ 112 */
113int try_to_munlock(struct page *); 113int try_to_munlock(struct page *);
114#else
115static inline int try_to_munlock(struct page *page)
116{
117 return 0; /* a.k.a. SWAP_SUCCESS */
118}
119#endif
120 114
121#else /* !CONFIG_MMU */ 115#else /* !CONFIG_MMU */
122 116
@@ -124,7 +118,7 @@ static inline int try_to_munlock(struct page *page)
124#define anon_vma_prepare(vma) (0) 118#define anon_vma_prepare(vma) (0)
125#define anon_vma_link(vma) do {} while (0) 119#define anon_vma_link(vma) do {} while (0)
126 120
127#define page_referenced(page,l,cnt) TestClearPageReferenced(page) 121#define page_referenced(page, locked, cnt, flags) TestClearPageReferenced(page)
128#define try_to_unmap(page, refs) SWAP_FAIL 122#define try_to_unmap(page, refs) SWAP_FAIL
129 123
130static inline int page_mkclean(struct page *page) 124static inline int page_mkclean(struct page *page)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7531b1c28201..02042e7f2196 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1178,7 +1178,6 @@ struct task_struct {
1178 * a short time 1178 * a short time
1179 */ 1179 */
1180 unsigned char fpu_counter; 1180 unsigned char fpu_counter;
1181 s8 oomkilladj; /* OOM kill score adjustment (bit shift). */
1182#ifdef CONFIG_BLK_DEV_IO_TRACE 1181#ifdef CONFIG_BLK_DEV_IO_TRACE
1183 unsigned int btrace_seq; 1182 unsigned int btrace_seq;
1184#endif 1183#endif
@@ -1318,7 +1317,8 @@ struct task_struct {
1318/* Thread group tracking */ 1317/* Thread group tracking */
1319 u32 parent_exec_id; 1318 u32 parent_exec_id;
1320 u32 self_exec_id; 1319 u32 self_exec_id;
1321/* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ 1320/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
1321 * mempolicy */
1322 spinlock_t alloc_lock; 1322 spinlock_t alloc_lock;
1323 1323
1324#ifdef CONFIG_GENERIC_HARDIRQS 1324#ifdef CONFIG_GENERIC_HARDIRQS
@@ -1386,8 +1386,7 @@ struct task_struct {
1386 cputime_t acct_timexpd; /* stime + utime since last update */ 1386 cputime_t acct_timexpd; /* stime + utime since last update */
1387#endif 1387#endif
1388#ifdef CONFIG_CPUSETS 1388#ifdef CONFIG_CPUSETS
1389 nodemask_t mems_allowed; 1389 nodemask_t mems_allowed; /* Protected by alloc_lock */
1390 int cpuset_mems_generation;
1391 int cpuset_mem_spread_rotor; 1390 int cpuset_mem_spread_rotor;
1392#endif 1391#endif
1393#ifdef CONFIG_CGROUPS 1392#ifdef CONFIG_CGROUPS
@@ -1410,7 +1409,7 @@ struct task_struct {
1410 struct list_head perf_counter_list; 1409 struct list_head perf_counter_list;
1411#endif 1410#endif
1412#ifdef CONFIG_NUMA 1411#ifdef CONFIG_NUMA
1413 struct mempolicy *mempolicy; 1412 struct mempolicy *mempolicy; /* Protected by alloc_lock */
1414 short il_next; 1413 short il_next;
1415#endif 1414#endif
1416 atomic_t fs_excl; /* holding fs exclusive resources */ 1415 atomic_t fs_excl; /* holding fs exclusive resources */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index a69db820eed6..9e3d8af09207 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -177,7 +177,6 @@ static inline void init_call_single_data(void)
177 177
178#define get_cpu() ({ preempt_disable(); smp_processor_id(); }) 178#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
179#define put_cpu() preempt_enable() 179#define put_cpu() preempt_enable()
180#define put_cpu_no_resched() preempt_enable_no_resched()
181 180
182/* 181/*
183 * Callback to arch code if there's nosmp or maxcpus=0 on the 182 * Callback to arch code if there's nosmp or maxcpus=0 on the
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d476aad3ff57..0cedf31af0b0 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -129,9 +129,10 @@ enum {
129 129
130#define SWAP_CLUSTER_MAX 32 130#define SWAP_CLUSTER_MAX 32
131 131
132#define SWAP_MAP_MAX 0x7fff 132#define SWAP_MAP_MAX 0x7ffe
133#define SWAP_MAP_BAD 0x8000 133#define SWAP_MAP_BAD 0x7fff
134 134#define SWAP_HAS_CACHE 0x8000 /* There is a swap cache of entry. */
135#define SWAP_COUNT_MASK (~SWAP_HAS_CACHE)
135/* 136/*
136 * The in-memory structure used to track swap areas. 137 * The in-memory structure used to track swap areas.
137 */ 138 */
@@ -235,7 +236,6 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
235} 236}
236#endif 237#endif
237 238
238#ifdef CONFIG_UNEVICTABLE_LRU
239extern int page_evictable(struct page *page, struct vm_area_struct *vma); 239extern int page_evictable(struct page *page, struct vm_area_struct *vma);
240extern void scan_mapping_unevictable_pages(struct address_space *); 240extern void scan_mapping_unevictable_pages(struct address_space *);
241 241
@@ -244,24 +244,6 @@ extern int scan_unevictable_handler(struct ctl_table *, int, struct file *,
244 void __user *, size_t *, loff_t *); 244 void __user *, size_t *, loff_t *);
245extern int scan_unevictable_register_node(struct node *node); 245extern int scan_unevictable_register_node(struct node *node);
246extern void scan_unevictable_unregister_node(struct node *node); 246extern void scan_unevictable_unregister_node(struct node *node);
247#else
248static inline int page_evictable(struct page *page,
249 struct vm_area_struct *vma)
250{
251 return 1;
252}
253
254static inline void scan_mapping_unevictable_pages(struct address_space *mapping)
255{
256}
257
258static inline int scan_unevictable_register_node(struct node *node)
259{
260 return 0;
261}
262
263static inline void scan_unevictable_unregister_node(struct node *node) { }
264#endif
265 247
266extern int kswapd_run(int nid); 248extern int kswapd_run(int nid);
267 249
@@ -274,7 +256,7 @@ extern void swap_unplug_io_fn(struct backing_dev_info *, struct page *);
274 256
275#ifdef CONFIG_SWAP 257#ifdef CONFIG_SWAP
276/* linux/mm/page_io.c */ 258/* linux/mm/page_io.c */
277extern int swap_readpage(struct file *, struct page *); 259extern int swap_readpage(struct page *);
278extern int swap_writepage(struct page *page, struct writeback_control *wbc); 260extern int swap_writepage(struct page *page, struct writeback_control *wbc);
279extern void end_swap_bio_read(struct bio *bio, int err); 261extern void end_swap_bio_read(struct bio *bio, int err);
280 262
@@ -300,9 +282,11 @@ extern long total_swap_pages;
300extern void si_swapinfo(struct sysinfo *); 282extern void si_swapinfo(struct sysinfo *);
301extern swp_entry_t get_swap_page(void); 283extern swp_entry_t get_swap_page(void);
302extern swp_entry_t get_swap_page_of_type(int); 284extern swp_entry_t get_swap_page_of_type(int);
303extern int swap_duplicate(swp_entry_t); 285extern void swap_duplicate(swp_entry_t);
286extern int swapcache_prepare(swp_entry_t);
304extern int valid_swaphandles(swp_entry_t, unsigned long *); 287extern int valid_swaphandles(swp_entry_t, unsigned long *);
305extern void swap_free(swp_entry_t); 288extern void swap_free(swp_entry_t);
289extern void swapcache_free(swp_entry_t, struct page *page);
306extern int free_swap_and_cache(swp_entry_t); 290extern int free_swap_and_cache(swp_entry_t);
307extern int swap_type_of(dev_t, sector_t, struct block_device **); 291extern int swap_type_of(dev_t, sector_t, struct block_device **);
308extern unsigned int count_swap_pages(int, int); 292extern unsigned int count_swap_pages(int, int);
@@ -370,12 +354,20 @@ static inline void show_swap_cache_info(void)
370} 354}
371 355
372#define free_swap_and_cache(swp) is_migration_entry(swp) 356#define free_swap_and_cache(swp) is_migration_entry(swp)
373#define swap_duplicate(swp) is_migration_entry(swp) 357#define swapcache_prepare(swp) is_migration_entry(swp)
358
359static inline void swap_duplicate(swp_entry_t swp)
360{
361}
374 362
375static inline void swap_free(swp_entry_t swp) 363static inline void swap_free(swp_entry_t swp)
376{ 364{
377} 365}
378 366
367static inline void swapcache_free(swp_entry_t swp, struct page *page)
368{
369}
370
379static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, 371static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
380 struct vm_area_struct *vma, unsigned long addr) 372 struct vm_area_struct *vma, unsigned long addr)
381{ 373{
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 418d90f5effe..fa4242cdade8 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -434,6 +434,7 @@ asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg);
434asmlinkage long sys_fcntl64(unsigned int fd, 434asmlinkage long sys_fcntl64(unsigned int fd,
435 unsigned int cmd, unsigned long arg); 435 unsigned int cmd, unsigned long arg);
436#endif 436#endif
437asmlinkage long sys_pipe(int __user *fildes);
437asmlinkage long sys_pipe2(int __user *fildes, int flags); 438asmlinkage long sys_pipe2(int __user *fildes, int flags);
438asmlinkage long sys_dup(unsigned int fildes); 439asmlinkage long sys_dup(unsigned int fildes);
439asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd); 440asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd);
@@ -751,8 +752,6 @@ asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *,
751asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int, 752asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int,
752 struct timespec __user *, const sigset_t __user *, 753 struct timespec __user *, const sigset_t __user *,
753 size_t); 754 size_t);
754asmlinkage long sys_pipe2(int __user *, int);
755asmlinkage long sys_pipe(int __user *);
756 755
757int kernel_execve(const char *filename, char *const argv[], char *const envp[]); 756int kernel_execve(const char *filename, char *const argv[], char *const envp[]);
758 757
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 9910e3bd5b31..e6967d10d9e5 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -280,6 +280,9 @@ extern int do_adjtimex(struct timex *);
280 280
281int read_current_timer(unsigned long *timer_val); 281int read_current_timer(unsigned long *timer_val);
282 282
283/* The clock frequency of the i8253/i8254 PIT */
284#define PIT_TICK_RATE 1193182ul
285
283#endif /* KERNEL */ 286#endif /* KERNEL */
284 287
285#endif /* LINUX_TIMEX_H */ 288#endif /* LINUX_TIMEX_H */
diff --git a/include/linux/utsname.h b/include/linux/utsname.h
index 11232676bfff..3656b300de3a 100644
--- a/include/linux/utsname.h
+++ b/include/linux/utsname.h
@@ -22,12 +22,12 @@ struct old_utsname {
22}; 22};
23 23
24struct new_utsname { 24struct new_utsname {
25 char sysname[65]; 25 char sysname[__NEW_UTS_LEN + 1];
26 char nodename[65]; 26 char nodename[__NEW_UTS_LEN + 1];
27 char release[65]; 27 char release[__NEW_UTS_LEN + 1];
28 char version[65]; 28 char version[__NEW_UTS_LEN + 1];
29 char machine[65]; 29 char machine[__NEW_UTS_LEN + 1];
30 char domainname[65]; 30 char domainname[__NEW_UTS_LEN + 1];
31}; 31};
32 32
33#ifdef __KERNEL__ 33#ifdef __KERNEL__
diff --git a/include/linux/vlynq.h b/include/linux/vlynq.h
new file mode 100644
index 000000000000..8f6a95882b09
--- /dev/null
+++ b/include/linux/vlynq.h
@@ -0,0 +1,161 @@
1/*
2 * Copyright (C) 2006, 2007 Eugene Konev <ejka@openwrt.org>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19#ifndef __VLYNQ_H__
20#define __VLYNQ_H__
21
22#include <linux/device.h>
23#include <linux/module.h>
24#include <linux/types.h>
25
26#define VLYNQ_NUM_IRQS 32
27
28struct vlynq_mapping {
29 u32 size;
30 u32 offset;
31};
32
33enum vlynq_divisor {
34 vlynq_div_auto = 0,
35 vlynq_ldiv1,
36 vlynq_ldiv2,
37 vlynq_ldiv3,
38 vlynq_ldiv4,
39 vlynq_ldiv5,
40 vlynq_ldiv6,
41 vlynq_ldiv7,
42 vlynq_ldiv8,
43 vlynq_rdiv1,
44 vlynq_rdiv2,
45 vlynq_rdiv3,
46 vlynq_rdiv4,
47 vlynq_rdiv5,
48 vlynq_rdiv6,
49 vlynq_rdiv7,
50 vlynq_rdiv8,
51 vlynq_div_external
52};
53
54struct vlynq_device_id {
55 u32 id;
56 enum vlynq_divisor divisor;
57 unsigned long driver_data;
58};
59
60struct vlynq_regs;
61struct vlynq_device {
62 u32 id, dev_id;
63 int local_irq;
64 int remote_irq;
65 enum vlynq_divisor divisor;
66 u32 regs_start, regs_end;
67 u32 mem_start, mem_end;
68 u32 irq_start, irq_end;
69 int irq;
70 int enabled;
71 struct vlynq_regs *local;
72 struct vlynq_regs *remote;
73 struct device dev;
74};
75
76struct vlynq_driver {
77 char *name;
78 struct vlynq_device_id *id_table;
79 int (*probe)(struct vlynq_device *dev, struct vlynq_device_id *id);
80 void (*remove)(struct vlynq_device *dev);
81 struct device_driver driver;
82};
83
84struct plat_vlynq_ops {
85 int (*on)(struct vlynq_device *dev);
86 void (*off)(struct vlynq_device *dev);
87};
88
89static inline struct vlynq_driver *to_vlynq_driver(struct device_driver *drv)
90{
91 return container_of(drv, struct vlynq_driver, driver);
92}
93
94static inline struct vlynq_device *to_vlynq_device(struct device *device)
95{
96 return container_of(device, struct vlynq_device, dev);
97}
98
99extern struct bus_type vlynq_bus_type;
100
101extern int __vlynq_register_driver(struct vlynq_driver *driver,
102 struct module *owner);
103
104static inline int vlynq_register_driver(struct vlynq_driver *driver)
105{
106 return __vlynq_register_driver(driver, THIS_MODULE);
107}
108
109static inline void *vlynq_get_drvdata(struct vlynq_device *dev)
110{
111 return dev_get_drvdata(&dev->dev);
112}
113
114static inline void vlynq_set_drvdata(struct vlynq_device *dev, void *data)
115{
116 dev_set_drvdata(&dev->dev, data);
117}
118
119static inline u32 vlynq_mem_start(struct vlynq_device *dev)
120{
121 return dev->mem_start;
122}
123
124static inline u32 vlynq_mem_end(struct vlynq_device *dev)
125{
126 return dev->mem_end;
127}
128
129static inline u32 vlynq_mem_len(struct vlynq_device *dev)
130{
131 return dev->mem_end - dev->mem_start + 1;
132}
133
134static inline int vlynq_virq_to_irq(struct vlynq_device *dev, int virq)
135{
136 int irq = dev->irq_start + virq;
137 if ((irq < dev->irq_start) || (irq > dev->irq_end))
138 return -EINVAL;
139
140 return irq;
141}
142
143static inline int vlynq_irq_to_virq(struct vlynq_device *dev, int irq)
144{
145 if ((irq < dev->irq_start) || (irq > dev->irq_end))
146 return -EINVAL;
147
148 return irq - dev->irq_start;
149}
150
151extern void vlynq_unregister_driver(struct vlynq_driver *driver);
152extern int vlynq_enable_device(struct vlynq_device *dev);
153extern void vlynq_disable_device(struct vlynq_device *dev);
154extern int vlynq_set_local_mapping(struct vlynq_device *dev, u32 tx_offset,
155 struct vlynq_mapping *mapping);
156extern int vlynq_set_remote_mapping(struct vlynq_device *dev, u32 tx_offset,
157 struct vlynq_mapping *mapping);
158extern int vlynq_set_local_irq(struct vlynq_device *dev, int virq);
159extern int vlynq_set_remote_irq(struct vlynq_device *dev, int virq);
160
161#endif /* __VLYNQ_H__ */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 524cd1b28ecb..81a97cf8f0a0 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -36,12 +36,14 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
36 FOR_ALL_ZONES(PGSTEAL), 36 FOR_ALL_ZONES(PGSTEAL),
37 FOR_ALL_ZONES(PGSCAN_KSWAPD), 37 FOR_ALL_ZONES(PGSCAN_KSWAPD),
38 FOR_ALL_ZONES(PGSCAN_DIRECT), 38 FOR_ALL_ZONES(PGSCAN_DIRECT),
39#ifdef CONFIG_NUMA
40 PGSCAN_ZONE_RECLAIM_FAILED,
41#endif
39 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, 42 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
40 PAGEOUTRUN, ALLOCSTALL, PGROTATED, 43 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
41#ifdef CONFIG_HUGETLB_PAGE 44#ifdef CONFIG_HUGETLB_PAGE
42 HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, 45 HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
43#endif 46#endif
44#ifdef CONFIG_UNEVICTABLE_LRU
45 UNEVICTABLE_PGCULLED, /* culled to noreclaim list */ 47 UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
46 UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */ 48 UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
47 UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */ 49 UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
@@ -50,7 +52,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
50 UNEVICTABLE_PGCLEARED, /* on COW, page truncate */ 52 UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
51 UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */ 53 UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
52 UNEVICTABLE_MLOCKFREED, 54 UNEVICTABLE_MLOCKFREED,
53#endif
54 NR_VM_EVENT_ITEMS 55 NR_VM_EVENT_ITEMS
55}; 56};
56 57
diff --git a/include/video/s1d13xxxfb.h b/include/video/s1d13xxxfb.h
index c3b2a2aa7140..f0736cff2ca3 100644
--- a/include/video/s1d13xxxfb.h
+++ b/include/video/s1d13xxxfb.h
@@ -136,6 +136,15 @@
136#define S1DREG_DELAYOFF 0xFFFE 136#define S1DREG_DELAYOFF 0xFFFE
137#define S1DREG_DELAYON 0xFFFF 137#define S1DREG_DELAYON 0xFFFF
138 138
139#define BBLT_FIFO_EMPTY 0x00
140#define BBLT_FIFO_NOT_EMPTY 0x40
141#define BBLT_FIFO_NOT_FULL 0x30
142#define BBLT_FIFO_HALF_FULL 0x20
143#define BBLT_FIFO_FULL 0x10
144
145#define BBLT_SOLID_FILL 0x0c
146
147
139/* Note: all above defines should go in separate header files 148/* Note: all above defines should go in separate header files
140 when implementing other S1D13xxx chip support. */ 149 when implementing other S1D13xxx chip support. */
141 150
diff --git a/init/main.c b/init/main.c
index 7becd8b5c5bf..7756ddad3c85 100644
--- a/init/main.c
+++ b/init/main.c
@@ -671,7 +671,6 @@ asmlinkage void __init start_kernel(void)
671 initrd_start = 0; 671 initrd_start = 0;
672 } 672 }
673#endif 673#endif
674 cpuset_init_early();
675 page_cgroup_init(); 674 page_cgroup_init();
676 enable_debug_pagealloc(); 675 enable_debug_pagealloc();
677 cpu_hotplug_init(); 676 cpu_hotplug_init();
@@ -868,6 +867,11 @@ static noinline int init_post(void)
868static int __init kernel_init(void * unused) 867static int __init kernel_init(void * unused)
869{ 868{
870 lock_kernel(); 869 lock_kernel();
870
871 /*
872 * init can allocate pages on any node
873 */
874 set_mems_allowed(node_possible_map);
871 /* 875 /*
872 * init can run on any cpu. 876 * init can run on any cpu.
873 */ 877 */
diff --git a/kernel/Makefile b/kernel/Makefile
index 90b53f6dc226..9df4501cb921 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -11,6 +11,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \
11 hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ 11 hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
12 notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \ 12 notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \
13 async.o 13 async.o
14obj-y += groups.o
14 15
15ifdef CONFIG_FUNCTION_TRACER 16ifdef CONFIG_FUNCTION_TRACER
16# Do not trace debug files and internal ftrace files 17# Do not trace debug files and internal ftrace files
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index d5a7e17474ee..7e75a41bd508 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -97,12 +97,6 @@ struct cpuset {
97 97
98 struct cpuset *parent; /* my parent */ 98 struct cpuset *parent; /* my parent */
99 99
100 /*
101 * Copy of global cpuset_mems_generation as of the most
102 * recent time this cpuset changed its mems_allowed.
103 */
104 int mems_generation;
105
106 struct fmeter fmeter; /* memory_pressure filter */ 100 struct fmeter fmeter; /* memory_pressure filter */
107 101
108 /* partition number for rebuild_sched_domains() */ 102 /* partition number for rebuild_sched_domains() */
@@ -176,27 +170,6 @@ static inline int is_spread_slab(const struct cpuset *cs)
176 return test_bit(CS_SPREAD_SLAB, &cs->flags); 170 return test_bit(CS_SPREAD_SLAB, &cs->flags);
177} 171}
178 172
179/*
180 * Increment this integer everytime any cpuset changes its
181 * mems_allowed value. Users of cpusets can track this generation
182 * number, and avoid having to lock and reload mems_allowed unless
183 * the cpuset they're using changes generation.
184 *
185 * A single, global generation is needed because cpuset_attach_task() could
186 * reattach a task to a different cpuset, which must not have its
187 * generation numbers aliased with those of that tasks previous cpuset.
188 *
189 * Generations are needed for mems_allowed because one task cannot
190 * modify another's memory placement. So we must enable every task,
191 * on every visit to __alloc_pages(), to efficiently check whether
192 * its current->cpuset->mems_allowed has changed, requiring an update
193 * of its current->mems_allowed.
194 *
195 * Since writes to cpuset_mems_generation are guarded by the cgroup lock
196 * there is no need to mark it atomic.
197 */
198static int cpuset_mems_generation;
199
200static struct cpuset top_cpuset = { 173static struct cpuset top_cpuset = {
201 .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)), 174 .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)),
202}; 175};
@@ -228,8 +201,9 @@ static struct cpuset top_cpuset = {
228 * If a task is only holding callback_mutex, then it has read-only 201 * If a task is only holding callback_mutex, then it has read-only
229 * access to cpusets. 202 * access to cpusets.
230 * 203 *
231 * The task_struct fields mems_allowed and mems_generation may only 204 * Now, the task_struct fields mems_allowed and mempolicy may be changed
232 * be accessed in the context of that task, so require no locks. 205 * by other task, we use alloc_lock in the task_struct fields to protect
206 * them.
233 * 207 *
234 * The cpuset_common_file_read() handlers only hold callback_mutex across 208 * The cpuset_common_file_read() handlers only hold callback_mutex across
235 * small pieces of code, such as when reading out possibly multi-word 209 * small pieces of code, such as when reading out possibly multi-word
@@ -331,75 +305,22 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
331 BUG_ON(!nodes_intersects(*pmask, node_states[N_HIGH_MEMORY])); 305 BUG_ON(!nodes_intersects(*pmask, node_states[N_HIGH_MEMORY]));
332} 306}
333 307
334/** 308/*
335 * cpuset_update_task_memory_state - update task memory placement 309 * update task's spread flag if cpuset's page/slab spread flag is set
336 * 310 *
337 * If the current tasks cpusets mems_allowed changed behind our 311 * Called with callback_mutex/cgroup_mutex held
338 * backs, update current->mems_allowed, mems_generation and task NUMA
339 * mempolicy to the new value.
340 *
341 * Task mempolicy is updated by rebinding it relative to the
342 * current->cpuset if a task has its memory placement changed.
343 * Do not call this routine if in_interrupt().
344 *
345 * Call without callback_mutex or task_lock() held. May be
346 * called with or without cgroup_mutex held. Thanks in part to
347 * 'the_top_cpuset_hack', the task's cpuset pointer will never
348 * be NULL. This routine also might acquire callback_mutex during
349 * call.
350 *
351 * Reading current->cpuset->mems_generation doesn't need task_lock
352 * to guard the current->cpuset derefence, because it is guarded
353 * from concurrent freeing of current->cpuset using RCU.
354 *
355 * The rcu_dereference() is technically probably not needed,
356 * as I don't actually mind if I see a new cpuset pointer but
357 * an old value of mems_generation. However this really only
358 * matters on alpha systems using cpusets heavily. If I dropped
359 * that rcu_dereference(), it would save them a memory barrier.
360 * For all other arch's, rcu_dereference is a no-op anyway, and for
361 * alpha systems not using cpusets, another planned optimization,
362 * avoiding the rcu critical section for tasks in the root cpuset
363 * which is statically allocated, so can't vanish, will make this
364 * irrelevant. Better to use RCU as intended, than to engage in
365 * some cute trick to save a memory barrier that is impossible to
366 * test, for alpha systems using cpusets heavily, which might not
367 * even exist.
368 *
369 * This routine is needed to update the per-task mems_allowed data,
370 * within the tasks context, when it is trying to allocate memory
371 * (in various mm/mempolicy.c routines) and notices that some other
372 * task has been modifying its cpuset.
373 */ 312 */
374 313static void cpuset_update_task_spread_flag(struct cpuset *cs,
375void cpuset_update_task_memory_state(void) 314 struct task_struct *tsk)
376{ 315{
377 int my_cpusets_mem_gen; 316 if (is_spread_page(cs))
378 struct task_struct *tsk = current; 317 tsk->flags |= PF_SPREAD_PAGE;
379 struct cpuset *cs; 318 else
380 319 tsk->flags &= ~PF_SPREAD_PAGE;
381 rcu_read_lock(); 320 if (is_spread_slab(cs))
382 my_cpusets_mem_gen = task_cs(tsk)->mems_generation; 321 tsk->flags |= PF_SPREAD_SLAB;
383 rcu_read_unlock(); 322 else
384 323 tsk->flags &= ~PF_SPREAD_SLAB;
385 if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) {
386 mutex_lock(&callback_mutex);
387 task_lock(tsk);
388 cs = task_cs(tsk); /* Maybe changed when task not locked */
389 guarantee_online_mems(cs, &tsk->mems_allowed);
390 tsk->cpuset_mems_generation = cs->mems_generation;
391 if (is_spread_page(cs))
392 tsk->flags |= PF_SPREAD_PAGE;
393 else
394 tsk->flags &= ~PF_SPREAD_PAGE;
395 if (is_spread_slab(cs))
396 tsk->flags |= PF_SPREAD_SLAB;
397 else
398 tsk->flags &= ~PF_SPREAD_SLAB;
399 task_unlock(tsk);
400 mutex_unlock(&callback_mutex);
401 mpol_rebind_task(tsk, &tsk->mems_allowed);
402 }
403} 324}
404 325
405/* 326/*
@@ -1007,14 +928,6 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
1007 * other task, the task_struct mems_allowed that we are hacking 928 * other task, the task_struct mems_allowed that we are hacking
1008 * is for our current task, which must allocate new pages for that 929 * is for our current task, which must allocate new pages for that
1009 * migrating memory region. 930 * migrating memory region.
1010 *
1011 * We call cpuset_update_task_memory_state() before hacking
1012 * our tasks mems_allowed, so that we are assured of being in
1013 * sync with our tasks cpuset, and in particular, callbacks to
1014 * cpuset_update_task_memory_state() from nested page allocations
1015 * won't see any mismatch of our cpuset and task mems_generation
1016 * values, so won't overwrite our hacked tasks mems_allowed
1017 * nodemask.
1018 */ 931 */
1019 932
1020static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, 933static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
@@ -1022,22 +935,37 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
1022{ 935{
1023 struct task_struct *tsk = current; 936 struct task_struct *tsk = current;
1024 937
1025 cpuset_update_task_memory_state();
1026
1027 mutex_lock(&callback_mutex);
1028 tsk->mems_allowed = *to; 938 tsk->mems_allowed = *to;
1029 mutex_unlock(&callback_mutex);
1030 939
1031 do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); 940 do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
1032 941
1033 mutex_lock(&callback_mutex);
1034 guarantee_online_mems(task_cs(tsk),&tsk->mems_allowed); 942 guarantee_online_mems(task_cs(tsk),&tsk->mems_allowed);
1035 mutex_unlock(&callback_mutex);
1036} 943}
1037 944
1038/* 945/*
1039 * Rebind task's vmas to cpuset's new mems_allowed, and migrate pages to new 946 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
1040 * nodes if memory_migrate flag is set. Called with cgroup_mutex held. 947 * @tsk: the task to change
948 * @newmems: new nodes that the task will be set
949 *
950 * In order to avoid seeing no nodes if the old and new nodes are disjoint,
951 * we structure updates as setting all new allowed nodes, then clearing newly
952 * disallowed ones.
953 *
954 * Called with task's alloc_lock held
955 */
956static void cpuset_change_task_nodemask(struct task_struct *tsk,
957 nodemask_t *newmems)
958{
959 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
960 mpol_rebind_task(tsk, &tsk->mems_allowed);
961 mpol_rebind_task(tsk, newmems);
962 tsk->mems_allowed = *newmems;
963}
964
965/*
966 * Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy
967 * of it to cpuset's new mems_allowed, and migrate pages to new nodes if
968 * memory_migrate flag is set. Called with cgroup_mutex held.
1041 */ 969 */
1042static void cpuset_change_nodemask(struct task_struct *p, 970static void cpuset_change_nodemask(struct task_struct *p,
1043 struct cgroup_scanner *scan) 971 struct cgroup_scanner *scan)
@@ -1046,12 +974,19 @@ static void cpuset_change_nodemask(struct task_struct *p,
1046 struct cpuset *cs; 974 struct cpuset *cs;
1047 int migrate; 975 int migrate;
1048 const nodemask_t *oldmem = scan->data; 976 const nodemask_t *oldmem = scan->data;
977 nodemask_t newmems;
978
979 cs = cgroup_cs(scan->cg);
980 guarantee_online_mems(cs, &newmems);
981
982 task_lock(p);
983 cpuset_change_task_nodemask(p, &newmems);
984 task_unlock(p);
1049 985
1050 mm = get_task_mm(p); 986 mm = get_task_mm(p);
1051 if (!mm) 987 if (!mm)
1052 return; 988 return;
1053 989
1054 cs = cgroup_cs(scan->cg);
1055 migrate = is_memory_migrate(cs); 990 migrate = is_memory_migrate(cs);
1056 991
1057 mpol_rebind_mm(mm, &cs->mems_allowed); 992 mpol_rebind_mm(mm, &cs->mems_allowed);
@@ -1104,10 +1039,10 @@ static void update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem,
1104/* 1039/*
1105 * Handle user request to change the 'mems' memory placement 1040 * Handle user request to change the 'mems' memory placement
1106 * of a cpuset. Needs to validate the request, update the 1041 * of a cpuset. Needs to validate the request, update the
1107 * cpusets mems_allowed and mems_generation, and for each 1042 * cpusets mems_allowed, and for each task in the cpuset,
1108 * task in the cpuset, rebind any vma mempolicies and if 1043 * update mems_allowed and rebind task's mempolicy and any vma
1109 * the cpuset is marked 'memory_migrate', migrate the tasks 1044 * mempolicies and if the cpuset is marked 'memory_migrate',
1110 * pages to the new memory. 1045 * migrate the tasks pages to the new memory.
1111 * 1046 *
1112 * Call with cgroup_mutex held. May take callback_mutex during call. 1047 * Call with cgroup_mutex held. May take callback_mutex during call.
1113 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, 1048 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
@@ -1160,7 +1095,6 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1160 1095
1161 mutex_lock(&callback_mutex); 1096 mutex_lock(&callback_mutex);
1162 cs->mems_allowed = trialcs->mems_allowed; 1097 cs->mems_allowed = trialcs->mems_allowed;
1163 cs->mems_generation = cpuset_mems_generation++;
1164 mutex_unlock(&callback_mutex); 1098 mutex_unlock(&callback_mutex);
1165 1099
1166 update_tasks_nodemask(cs, &oldmem, &heap); 1100 update_tasks_nodemask(cs, &oldmem, &heap);
@@ -1193,6 +1127,46 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
1193} 1127}
1194 1128
1195/* 1129/*
1130 * cpuset_change_flag - make a task's spread flags the same as its cpuset's
1131 * @tsk: task to be updated
1132 * @scan: struct cgroup_scanner containing the cgroup of the task
1133 *
1134 * Called by cgroup_scan_tasks() for each task in a cgroup.
1135 *
1136 * We don't need to re-check for the cgroup/cpuset membership, since we're
1137 * holding cgroup_lock() at this point.
1138 */
1139static void cpuset_change_flag(struct task_struct *tsk,
1140 struct cgroup_scanner *scan)
1141{
1142 cpuset_update_task_spread_flag(cgroup_cs(scan->cg), tsk);
1143}
1144
1145/*
1146 * update_tasks_flags - update the spread flags of tasks in the cpuset.
1147 * @cs: the cpuset in which each task's spread flags needs to be changed
1148 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
1149 *
1150 * Called with cgroup_mutex held
1151 *
1152 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
1153 * calling callback functions for each.
1154 *
1155 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
1156 * if @heap != NULL.
1157 */
1158static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap)
1159{
1160 struct cgroup_scanner scan;
1161
1162 scan.cg = cs->css.cgroup;
1163 scan.test_task = NULL;
1164 scan.process_task = cpuset_change_flag;
1165 scan.heap = heap;
1166 cgroup_scan_tasks(&scan);
1167}
1168
1169/*
1196 * update_flag - read a 0 or a 1 in a file and update associated flag 1170 * update_flag - read a 0 or a 1 in a file and update associated flag
1197 * bit: the bit to update (see cpuset_flagbits_t) 1171 * bit: the bit to update (see cpuset_flagbits_t)
1198 * cs: the cpuset to update 1172 * cs: the cpuset to update
@@ -1205,8 +1179,10 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1205 int turning_on) 1179 int turning_on)
1206{ 1180{
1207 struct cpuset *trialcs; 1181 struct cpuset *trialcs;
1208 int err;
1209 int balance_flag_changed; 1182 int balance_flag_changed;
1183 int spread_flag_changed;
1184 struct ptr_heap heap;
1185 int err;
1210 1186
1211 trialcs = alloc_trial_cpuset(cs); 1187 trialcs = alloc_trial_cpuset(cs);
1212 if (!trialcs) 1188 if (!trialcs)
@@ -1221,9 +1197,16 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1221 if (err < 0) 1197 if (err < 0)
1222 goto out; 1198 goto out;
1223 1199
1200 err = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
1201 if (err < 0)
1202 goto out;
1203
1224 balance_flag_changed = (is_sched_load_balance(cs) != 1204 balance_flag_changed = (is_sched_load_balance(cs) !=
1225 is_sched_load_balance(trialcs)); 1205 is_sched_load_balance(trialcs));
1226 1206
1207 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
1208 || (is_spread_page(cs) != is_spread_page(trialcs)));
1209
1227 mutex_lock(&callback_mutex); 1210 mutex_lock(&callback_mutex);
1228 cs->flags = trialcs->flags; 1211 cs->flags = trialcs->flags;
1229 mutex_unlock(&callback_mutex); 1212 mutex_unlock(&callback_mutex);
@@ -1231,6 +1214,9 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1231 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) 1214 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
1232 async_rebuild_sched_domains(); 1215 async_rebuild_sched_domains();
1233 1216
1217 if (spread_flag_changed)
1218 update_tasks_flags(cs, &heap);
1219 heap_free(&heap);
1234out: 1220out:
1235 free_trial_cpuset(trialcs); 1221 free_trial_cpuset(trialcs);
1236 return err; 1222 return err;
@@ -1372,15 +1358,20 @@ static void cpuset_attach(struct cgroup_subsys *ss,
1372 1358
1373 if (cs == &top_cpuset) { 1359 if (cs == &top_cpuset) {
1374 cpumask_copy(cpus_attach, cpu_possible_mask); 1360 cpumask_copy(cpus_attach, cpu_possible_mask);
1361 to = node_possible_map;
1375 } else { 1362 } else {
1376 mutex_lock(&callback_mutex);
1377 guarantee_online_cpus(cs, cpus_attach); 1363 guarantee_online_cpus(cs, cpus_attach);
1378 mutex_unlock(&callback_mutex); 1364 guarantee_online_mems(cs, &to);
1379 } 1365 }
1380 err = set_cpus_allowed_ptr(tsk, cpus_attach); 1366 err = set_cpus_allowed_ptr(tsk, cpus_attach);
1381 if (err) 1367 if (err)
1382 return; 1368 return;
1383 1369
1370 task_lock(tsk);
1371 cpuset_change_task_nodemask(tsk, &to);
1372 task_unlock(tsk);
1373 cpuset_update_task_spread_flag(cs, tsk);
1374
1384 from = oldcs->mems_allowed; 1375 from = oldcs->mems_allowed;
1385 to = cs->mems_allowed; 1376 to = cs->mems_allowed;
1386 mm = get_task_mm(tsk); 1377 mm = get_task_mm(tsk);
@@ -1442,11 +1433,9 @@ static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1442 break; 1433 break;
1443 case FILE_SPREAD_PAGE: 1434 case FILE_SPREAD_PAGE:
1444 retval = update_flag(CS_SPREAD_PAGE, cs, val); 1435 retval = update_flag(CS_SPREAD_PAGE, cs, val);
1445 cs->mems_generation = cpuset_mems_generation++;
1446 break; 1436 break;
1447 case FILE_SPREAD_SLAB: 1437 case FILE_SPREAD_SLAB:
1448 retval = update_flag(CS_SPREAD_SLAB, cs, val); 1438 retval = update_flag(CS_SPREAD_SLAB, cs, val);
1449 cs->mems_generation = cpuset_mems_generation++;
1450 break; 1439 break;
1451 default: 1440 default:
1452 retval = -EINVAL; 1441 retval = -EINVAL;
@@ -1786,8 +1775,6 @@ static struct cgroup_subsys_state *cpuset_create(
1786 struct cpuset *parent; 1775 struct cpuset *parent;
1787 1776
1788 if (!cont->parent) { 1777 if (!cont->parent) {
1789 /* This is early initialization for the top cgroup */
1790 top_cpuset.mems_generation = cpuset_mems_generation++;
1791 return &top_cpuset.css; 1778 return &top_cpuset.css;
1792 } 1779 }
1793 parent = cgroup_cs(cont->parent); 1780 parent = cgroup_cs(cont->parent);
@@ -1799,7 +1786,6 @@ static struct cgroup_subsys_state *cpuset_create(
1799 return ERR_PTR(-ENOMEM); 1786 return ERR_PTR(-ENOMEM);
1800 } 1787 }
1801 1788
1802 cpuset_update_task_memory_state();
1803 cs->flags = 0; 1789 cs->flags = 0;
1804 if (is_spread_page(parent)) 1790 if (is_spread_page(parent))
1805 set_bit(CS_SPREAD_PAGE, &cs->flags); 1791 set_bit(CS_SPREAD_PAGE, &cs->flags);
@@ -1808,7 +1794,6 @@ static struct cgroup_subsys_state *cpuset_create(
1808 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); 1794 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1809 cpumask_clear(cs->cpus_allowed); 1795 cpumask_clear(cs->cpus_allowed);
1810 nodes_clear(cs->mems_allowed); 1796 nodes_clear(cs->mems_allowed);
1811 cs->mems_generation = cpuset_mems_generation++;
1812 fmeter_init(&cs->fmeter); 1797 fmeter_init(&cs->fmeter);
1813 cs->relax_domain_level = -1; 1798 cs->relax_domain_level = -1;
1814 1799
@@ -1827,8 +1812,6 @@ static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
1827{ 1812{
1828 struct cpuset *cs = cgroup_cs(cont); 1813 struct cpuset *cs = cgroup_cs(cont);
1829 1814
1830 cpuset_update_task_memory_state();
1831
1832 if (is_sched_load_balance(cs)) 1815 if (is_sched_load_balance(cs))
1833 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); 1816 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
1834 1817
@@ -1849,21 +1832,6 @@ struct cgroup_subsys cpuset_subsys = {
1849 .early_init = 1, 1832 .early_init = 1,
1850}; 1833};
1851 1834
1852/*
1853 * cpuset_init_early - just enough so that the calls to
1854 * cpuset_update_task_memory_state() in early init code
1855 * are harmless.
1856 */
1857
1858int __init cpuset_init_early(void)
1859{
1860 alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_NOWAIT);
1861
1862 top_cpuset.mems_generation = cpuset_mems_generation++;
1863 return 0;
1864}
1865
1866
1867/** 1835/**
1868 * cpuset_init - initialize cpusets at system boot 1836 * cpuset_init - initialize cpusets at system boot
1869 * 1837 *
@@ -1874,11 +1842,13 @@ int __init cpuset_init(void)
1874{ 1842{
1875 int err = 0; 1843 int err = 0;
1876 1844
1845 if (!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL))
1846 BUG();
1847
1877 cpumask_setall(top_cpuset.cpus_allowed); 1848 cpumask_setall(top_cpuset.cpus_allowed);
1878 nodes_setall(top_cpuset.mems_allowed); 1849 nodes_setall(top_cpuset.mems_allowed);
1879 1850
1880 fmeter_init(&top_cpuset.fmeter); 1851 fmeter_init(&top_cpuset.fmeter);
1881 top_cpuset.mems_generation = cpuset_mems_generation++;
1882 set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags); 1852 set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
1883 top_cpuset.relax_domain_level = -1; 1853 top_cpuset.relax_domain_level = -1;
1884 1854
diff --git a/kernel/groups.c b/kernel/groups.c
new file mode 100644
index 000000000000..2b45b2ee3964
--- /dev/null
+++ b/kernel/groups.c
@@ -0,0 +1,288 @@
1/*
2 * Supplementary group IDs
3 */
4#include <linux/cred.h>
5#include <linux/module.h>
6#include <linux/slab.h>
7#include <linux/security.h>
8#include <linux/syscalls.h>
9#include <asm/uaccess.h>
10
11/* init to 2 - one for init_task, one to ensure it is never freed */
12struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
13
14struct group_info *groups_alloc(int gidsetsize)
15{
16 struct group_info *group_info;
17 int nblocks;
18 int i;
19
20 nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
21 /* Make sure we always allocate at least one indirect block pointer */
22 nblocks = nblocks ? : 1;
23 group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
24 if (!group_info)
25 return NULL;
26 group_info->ngroups = gidsetsize;
27 group_info->nblocks = nblocks;
28 atomic_set(&group_info->usage, 1);
29
30 if (gidsetsize <= NGROUPS_SMALL)
31 group_info->blocks[0] = group_info->small_block;
32 else {
33 for (i = 0; i < nblocks; i++) {
34 gid_t *b;
35 b = (void *)__get_free_page(GFP_USER);
36 if (!b)
37 goto out_undo_partial_alloc;
38 group_info->blocks[i] = b;
39 }
40 }
41 return group_info;
42
43out_undo_partial_alloc:
44 while (--i >= 0) {
45 free_page((unsigned long)group_info->blocks[i]);
46 }
47 kfree(group_info);
48 return NULL;
49}
50
51EXPORT_SYMBOL(groups_alloc);
52
53void groups_free(struct group_info *group_info)
54{
55 if (group_info->blocks[0] != group_info->small_block) {
56 int i;
57 for (i = 0; i < group_info->nblocks; i++)
58 free_page((unsigned long)group_info->blocks[i]);
59 }
60 kfree(group_info);
61}
62
63EXPORT_SYMBOL(groups_free);
64
65/* export the group_info to a user-space array */
66static int groups_to_user(gid_t __user *grouplist,
67 const struct group_info *group_info)
68{
69 int i;
70 unsigned int count = group_info->ngroups;
71
72 for (i = 0; i < group_info->nblocks; i++) {
73 unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
74 unsigned int len = cp_count * sizeof(*grouplist);
75
76 if (copy_to_user(grouplist, group_info->blocks[i], len))
77 return -EFAULT;
78
79 grouplist += NGROUPS_PER_BLOCK;
80 count -= cp_count;
81 }
82 return 0;
83}
84
85/* fill a group_info from a user-space array - it must be allocated already */
86static int groups_from_user(struct group_info *group_info,
87 gid_t __user *grouplist)
88{
89 int i;
90 unsigned int count = group_info->ngroups;
91
92 for (i = 0; i < group_info->nblocks; i++) {
93 unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
94 unsigned int len = cp_count * sizeof(*grouplist);
95
96 if (copy_from_user(group_info->blocks[i], grouplist, len))
97 return -EFAULT;
98
99 grouplist += NGROUPS_PER_BLOCK;
100 count -= cp_count;
101 }
102 return 0;
103}
104
105/* a simple Shell sort */
106static void groups_sort(struct group_info *group_info)
107{
108 int base, max, stride;
109 int gidsetsize = group_info->ngroups;
110
111 for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
112 ; /* nothing */
113 stride /= 3;
114
115 while (stride) {
116 max = gidsetsize - stride;
117 for (base = 0; base < max; base++) {
118 int left = base;
119 int right = left + stride;
120 gid_t tmp = GROUP_AT(group_info, right);
121
122 while (left >= 0 && GROUP_AT(group_info, left) > tmp) {
123 GROUP_AT(group_info, right) =
124 GROUP_AT(group_info, left);
125 right = left;
126 left -= stride;
127 }
128 GROUP_AT(group_info, right) = tmp;
129 }
130 stride /= 3;
131 }
132}
133
134/* a simple bsearch */
135int groups_search(const struct group_info *group_info, gid_t grp)
136{
137 unsigned int left, right;
138
139 if (!group_info)
140 return 0;
141
142 left = 0;
143 right = group_info->ngroups;
144 while (left < right) {
145 unsigned int mid = (left+right)/2;
146 int cmp = grp - GROUP_AT(group_info, mid);
147 if (cmp > 0)
148 left = mid + 1;
149 else if (cmp < 0)
150 right = mid;
151 else
152 return 1;
153 }
154 return 0;
155}
156
157/**
158 * set_groups - Change a group subscription in a set of credentials
159 * @new: The newly prepared set of credentials to alter
160 * @group_info: The group list to install
161 *
162 * Validate a group subscription and, if valid, insert it into a set
163 * of credentials.
164 */
165int set_groups(struct cred *new, struct group_info *group_info)
166{
167 int retval;
168
169 retval = security_task_setgroups(group_info);
170 if (retval)
171 return retval;
172
173 put_group_info(new->group_info);
174 groups_sort(group_info);
175 get_group_info(group_info);
176 new->group_info = group_info;
177 return 0;
178}
179
180EXPORT_SYMBOL(set_groups);
181
182/**
183 * set_current_groups - Change current's group subscription
184 * @group_info: The group list to impose
185 *
186 * Validate a group subscription and, if valid, impose it upon current's task
187 * security record.
188 */
189int set_current_groups(struct group_info *group_info)
190{
191 struct cred *new;
192 int ret;
193
194 new = prepare_creds();
195 if (!new)
196 return -ENOMEM;
197
198 ret = set_groups(new, group_info);
199 if (ret < 0) {
200 abort_creds(new);
201 return ret;
202 }
203
204 return commit_creds(new);
205}
206
207EXPORT_SYMBOL(set_current_groups);
208
209SYSCALL_DEFINE2(getgroups, int, gidsetsize, gid_t __user *, grouplist)
210{
211 const struct cred *cred = current_cred();
212 int i;
213
214 if (gidsetsize < 0)
215 return -EINVAL;
216
217 /* no need to grab task_lock here; it cannot change */
218 i = cred->group_info->ngroups;
219 if (gidsetsize) {
220 if (i > gidsetsize) {
221 i = -EINVAL;
222 goto out;
223 }
224 if (groups_to_user(grouplist, cred->group_info)) {
225 i = -EFAULT;
226 goto out;
227 }
228 }
229out:
230 return i;
231}
232
233/*
234 * SMP: Our groups are copy-on-write. We can set them safely
235 * without another task interfering.
236 */
237
238SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist)
239{
240 struct group_info *group_info;
241 int retval;
242
243 if (!capable(CAP_SETGID))
244 return -EPERM;
245 if ((unsigned)gidsetsize > NGROUPS_MAX)
246 return -EINVAL;
247
248 group_info = groups_alloc(gidsetsize);
249 if (!group_info)
250 return -ENOMEM;
251 retval = groups_from_user(group_info, grouplist);
252 if (retval) {
253 put_group_info(group_info);
254 return retval;
255 }
256
257 retval = set_current_groups(group_info);
258 put_group_info(group_info);
259
260 return retval;
261}
262
263/*
264 * Check whether we're fsgid/egid or in the supplemental group..
265 */
266int in_group_p(gid_t grp)
267{
268 const struct cred *cred = current_cred();
269 int retval = 1;
270
271 if (grp != cred->fsgid)
272 retval = groups_search(cred->group_info, grp);
273 return retval;
274}
275
276EXPORT_SYMBOL(in_group_p);
277
278int in_egroup_p(gid_t grp)
279{
280 const struct cred *cred = current_cred();
281 int retval = 1;
282
283 if (grp != cred->egid)
284 retval = groups_search(cred->group_info, grp);
285 return retval;
286}
287
288EXPORT_SYMBOL(in_egroup_p);
diff --git a/kernel/kfifo.c b/kernel/kfifo.c
index bc41ad0f24f8..26539e3228e5 100644
--- a/kernel/kfifo.c
+++ b/kernel/kfifo.c
@@ -72,9 +72,9 @@ struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, spinlock_t *lock)
72 72
73 /* 73 /*
74 * round up to the next power of 2, since our 'let the indices 74 * round up to the next power of 2, since our 'let the indices
75 * wrap' tachnique works only in this case. 75 * wrap' technique works only in this case.
76 */ 76 */
77 if (size & (size - 1)) { 77 if (!is_power_of_2(size)) {
78 BUG_ON(size > 0x80000000); 78 BUG_ON(size > 0x80000000);
79 size = roundup_pow_of_two(size); 79 size = roundup_pow_of_two(size);
80 } 80 }
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 41c88fe40500..7fa441333529 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -9,6 +9,7 @@
9#include <linux/kthread.h> 9#include <linux/kthread.h>
10#include <linux/completion.h> 10#include <linux/completion.h>
11#include <linux/err.h> 11#include <linux/err.h>
12#include <linux/cpuset.h>
12#include <linux/unistd.h> 13#include <linux/unistd.h>
13#include <linux/file.h> 14#include <linux/file.h>
14#include <linux/module.h> 15#include <linux/module.h>
@@ -236,6 +237,7 @@ int kthreadd(void *unused)
236 ignore_signals(tsk); 237 ignore_signals(tsk);
237 set_user_nice(tsk, KTHREAD_NICE_LEVEL); 238 set_user_nice(tsk, KTHREAD_NICE_LEVEL);
238 set_cpus_allowed_ptr(tsk, cpu_all_mask); 239 set_cpus_allowed_ptr(tsk, cpu_all_mask);
240 set_mems_allowed(node_possible_map);
239 241
240 current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG; 242 current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
241 243
diff --git a/kernel/power/process.c b/kernel/power/process.c
index ca634019497a..da2072d73811 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -117,9 +117,12 @@ int freeze_processes(void)
117 if (error) 117 if (error)
118 goto Exit; 118 goto Exit;
119 printk("done."); 119 printk("done.");
120
121 oom_killer_disable();
120 Exit: 122 Exit:
121 BUG_ON(in_atomic()); 123 BUG_ON(in_atomic());
122 printk("\n"); 124 printk("\n");
125
123 return error; 126 return error;
124} 127}
125 128
@@ -145,6 +148,8 @@ static void thaw_tasks(bool nosig_only)
145 148
146void thaw_processes(void) 149void thaw_processes(void)
147{ 150{
151 oom_killer_enable();
152
148 printk("Restarting tasks ... "); 153 printk("Restarting tasks ... ");
149 thaw_tasks(true); 154 thaw_tasks(true);
150 thaw_tasks(false); 155 thaw_tasks(false);
diff --git a/kernel/profile.c b/kernel/profile.c
index 28cf26ad2d24..69911b5745eb 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -365,7 +365,7 @@ static int __cpuinit profile_cpu_callback(struct notifier_block *info,
365 node = cpu_to_node(cpu); 365 node = cpu_to_node(cpu);
366 per_cpu(cpu_profile_flip, cpu) = 0; 366 per_cpu(cpu_profile_flip, cpu) = 0;
367 if (!per_cpu(cpu_profile_hits, cpu)[1]) { 367 if (!per_cpu(cpu_profile_hits, cpu)[1]) {
368 page = alloc_pages_node(node, 368 page = alloc_pages_exact_node(node,
369 GFP_KERNEL | __GFP_ZERO, 369 GFP_KERNEL | __GFP_ZERO,
370 0); 370 0);
371 if (!page) 371 if (!page)
@@ -373,7 +373,7 @@ static int __cpuinit profile_cpu_callback(struct notifier_block *info,
373 per_cpu(cpu_profile_hits, cpu)[1] = page_address(page); 373 per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
374 } 374 }
375 if (!per_cpu(cpu_profile_hits, cpu)[0]) { 375 if (!per_cpu(cpu_profile_hits, cpu)[0]) {
376 page = alloc_pages_node(node, 376 page = alloc_pages_exact_node(node,
377 GFP_KERNEL | __GFP_ZERO, 377 GFP_KERNEL | __GFP_ZERO,
378 0); 378 0);
379 if (!page) 379 if (!page)
@@ -564,14 +564,14 @@ static int create_hash_tables(void)
564 int node = cpu_to_node(cpu); 564 int node = cpu_to_node(cpu);
565 struct page *page; 565 struct page *page;
566 566
567 page = alloc_pages_node(node, 567 page = alloc_pages_exact_node(node,
568 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 568 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
569 0); 569 0);
570 if (!page) 570 if (!page)
571 goto out_cleanup; 571 goto out_cleanup;
572 per_cpu(cpu_profile_hits, cpu)[1] 572 per_cpu(cpu_profile_hits, cpu)[1]
573 = (struct profile_hit *)page_address(page); 573 = (struct profile_hit *)page_address(page);
574 page = alloc_pages_node(node, 574 page = alloc_pages_exact_node(node,
575 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 575 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
576 0); 576 0);
577 if (!page) 577 if (!page)
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index 521ed2004d63..09d7519557d3 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -319,6 +319,15 @@ cant_get_ref:
319EXPORT_SYMBOL(slow_work_enqueue); 319EXPORT_SYMBOL(slow_work_enqueue);
320 320
321/* 321/*
322 * Schedule a cull of the thread pool at some time in the near future
323 */
324static void slow_work_schedule_cull(void)
325{
326 mod_timer(&slow_work_cull_timer,
327 round_jiffies(jiffies + SLOW_WORK_CULL_TIMEOUT));
328}
329
330/*
322 * Worker thread culling algorithm 331 * Worker thread culling algorithm
323 */ 332 */
324static bool slow_work_cull_thread(void) 333static bool slow_work_cull_thread(void)
@@ -335,8 +344,7 @@ static bool slow_work_cull_thread(void)
335 list_empty(&vslow_work_queue) && 344 list_empty(&vslow_work_queue) &&
336 atomic_read(&slow_work_thread_count) > 345 atomic_read(&slow_work_thread_count) >
337 slow_work_min_threads) { 346 slow_work_min_threads) {
338 mod_timer(&slow_work_cull_timer, 347 slow_work_schedule_cull();
339 jiffies + SLOW_WORK_CULL_TIMEOUT);
340 do_cull = true; 348 do_cull = true;
341 } 349 }
342 } 350 }
@@ -393,8 +401,7 @@ static int slow_work_thread(void *_data)
393 list_empty(&vslow_work_queue) && 401 list_empty(&vslow_work_queue) &&
394 atomic_read(&slow_work_thread_count) > 402 atomic_read(&slow_work_thread_count) >
395 slow_work_min_threads) 403 slow_work_min_threads)
396 mod_timer(&slow_work_cull_timer, 404 slow_work_schedule_cull();
397 jiffies + SLOW_WORK_CULL_TIMEOUT);
398 continue; 405 continue;
399 } 406 }
400 407
@@ -458,7 +465,7 @@ static void slow_work_new_thread_execute(struct slow_work *work)
458 if (atomic_dec_and_test(&slow_work_thread_count)) 465 if (atomic_dec_and_test(&slow_work_thread_count))
459 BUG(); /* we're running on a slow work thread... */ 466 BUG(); /* we're running on a slow work thread... */
460 mod_timer(&slow_work_oom_timer, 467 mod_timer(&slow_work_oom_timer,
461 jiffies + SLOW_WORK_OOM_TIMEOUT); 468 round_jiffies(jiffies + SLOW_WORK_OOM_TIMEOUT));
462 } else { 469 } else {
463 /* ratelimit the starting of new threads */ 470 /* ratelimit the starting of new threads */
464 mod_timer(&slow_work_oom_timer, jiffies + 1); 471 mod_timer(&slow_work_oom_timer, jiffies + 1);
@@ -502,8 +509,7 @@ static int slow_work_min_threads_sysctl(struct ctl_table *table, int write,
502 if (n < 0 && !slow_work_may_not_start_new_thread) 509 if (n < 0 && !slow_work_may_not_start_new_thread)
503 slow_work_enqueue(&slow_work_new_thread); 510 slow_work_enqueue(&slow_work_new_thread);
504 else if (n > 0) 511 else if (n > 0)
505 mod_timer(&slow_work_cull_timer, 512 slow_work_schedule_cull();
506 jiffies + SLOW_WORK_CULL_TIMEOUT);
507 } 513 }
508 mutex_unlock(&slow_work_user_lock); 514 mutex_unlock(&slow_work_user_lock);
509 } 515 }
@@ -529,8 +535,7 @@ static int slow_work_max_threads_sysctl(struct ctl_table *table, int write,
529 atomic_read(&slow_work_thread_count); 535 atomic_read(&slow_work_thread_count);
530 536
531 if (n < 0) 537 if (n < 0)
532 mod_timer(&slow_work_cull_timer, 538 slow_work_schedule_cull();
533 jiffies + SLOW_WORK_CULL_TIMEOUT);
534 } 539 }
535 mutex_unlock(&slow_work_user_lock); 540 mutex_unlock(&slow_work_user_lock);
536 } 541 }
diff --git a/kernel/sys.c b/kernel/sys.c
index 438d99a38c87..b3f1097c76fa 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1113,289 +1113,6 @@ out:
1113 return err; 1113 return err;
1114} 1114}
1115 1115
1116/*
1117 * Supplementary group IDs
1118 */
1119
1120/* init to 2 - one for init_task, one to ensure it is never freed */
1121struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
1122
1123struct group_info *groups_alloc(int gidsetsize)
1124{
1125 struct group_info *group_info;
1126 int nblocks;
1127 int i;
1128
1129 nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
1130 /* Make sure we always allocate at least one indirect block pointer */
1131 nblocks = nblocks ? : 1;
1132 group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
1133 if (!group_info)
1134 return NULL;
1135 group_info->ngroups = gidsetsize;
1136 group_info->nblocks = nblocks;
1137 atomic_set(&group_info->usage, 1);
1138
1139 if (gidsetsize <= NGROUPS_SMALL)
1140 group_info->blocks[0] = group_info->small_block;
1141 else {
1142 for (i = 0; i < nblocks; i++) {
1143 gid_t *b;
1144 b = (void *)__get_free_page(GFP_USER);
1145 if (!b)
1146 goto out_undo_partial_alloc;
1147 group_info->blocks[i] = b;
1148 }
1149 }
1150 return group_info;
1151
1152out_undo_partial_alloc:
1153 while (--i >= 0) {
1154 free_page((unsigned long)group_info->blocks[i]);
1155 }
1156 kfree(group_info);
1157 return NULL;
1158}
1159
1160EXPORT_SYMBOL(groups_alloc);
1161
1162void groups_free(struct group_info *group_info)
1163{
1164 if (group_info->blocks[0] != group_info->small_block) {
1165 int i;
1166 for (i = 0; i < group_info->nblocks; i++)
1167 free_page((unsigned long)group_info->blocks[i]);
1168 }
1169 kfree(group_info);
1170}
1171
1172EXPORT_SYMBOL(groups_free);
1173
1174/* export the group_info to a user-space array */
1175static int groups_to_user(gid_t __user *grouplist,
1176 const struct group_info *group_info)
1177{
1178 int i;
1179 unsigned int count = group_info->ngroups;
1180
1181 for (i = 0; i < group_info->nblocks; i++) {
1182 unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
1183 unsigned int len = cp_count * sizeof(*grouplist);
1184
1185 if (copy_to_user(grouplist, group_info->blocks[i], len))
1186 return -EFAULT;
1187
1188 grouplist += NGROUPS_PER_BLOCK;
1189 count -= cp_count;
1190 }
1191 return 0;
1192}
1193
1194/* fill a group_info from a user-space array - it must be allocated already */
1195static int groups_from_user(struct group_info *group_info,
1196 gid_t __user *grouplist)
1197{
1198 int i;
1199 unsigned int count = group_info->ngroups;
1200
1201 for (i = 0; i < group_info->nblocks; i++) {
1202 unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
1203 unsigned int len = cp_count * sizeof(*grouplist);
1204
1205 if (copy_from_user(group_info->blocks[i], grouplist, len))
1206 return -EFAULT;
1207
1208 grouplist += NGROUPS_PER_BLOCK;
1209 count -= cp_count;
1210 }
1211 return 0;
1212}
1213
1214/* a simple Shell sort */
1215static void groups_sort(struct group_info *group_info)
1216{
1217 int base, max, stride;
1218 int gidsetsize = group_info->ngroups;
1219
1220 for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
1221 ; /* nothing */
1222 stride /= 3;
1223
1224 while (stride) {
1225 max = gidsetsize - stride;
1226 for (base = 0; base < max; base++) {
1227 int left = base;
1228 int right = left + stride;
1229 gid_t tmp = GROUP_AT(group_info, right);
1230
1231 while (left >= 0 && GROUP_AT(group_info, left) > tmp) {
1232 GROUP_AT(group_info, right) =
1233 GROUP_AT(group_info, left);
1234 right = left;
1235 left -= stride;
1236 }
1237 GROUP_AT(group_info, right) = tmp;
1238 }
1239 stride /= 3;
1240 }
1241}
1242
1243/* a simple bsearch */
1244int groups_search(const struct group_info *group_info, gid_t grp)
1245{
1246 unsigned int left, right;
1247
1248 if (!group_info)
1249 return 0;
1250
1251 left = 0;
1252 right = group_info->ngroups;
1253 while (left < right) {
1254 unsigned int mid = (left+right)/2;
1255 int cmp = grp - GROUP_AT(group_info, mid);
1256 if (cmp > 0)
1257 left = mid + 1;
1258 else if (cmp < 0)
1259 right = mid;
1260 else
1261 return 1;
1262 }
1263 return 0;
1264}
1265
1266/**
1267 * set_groups - Change a group subscription in a set of credentials
1268 * @new: The newly prepared set of credentials to alter
1269 * @group_info: The group list to install
1270 *
1271 * Validate a group subscription and, if valid, insert it into a set
1272 * of credentials.
1273 */
1274int set_groups(struct cred *new, struct group_info *group_info)
1275{
1276 int retval;
1277
1278 retval = security_task_setgroups(group_info);
1279 if (retval)
1280 return retval;
1281
1282 put_group_info(new->group_info);
1283 groups_sort(group_info);
1284 get_group_info(group_info);
1285 new->group_info = group_info;
1286 return 0;
1287}
1288
1289EXPORT_SYMBOL(set_groups);
1290
1291/**
1292 * set_current_groups - Change current's group subscription
1293 * @group_info: The group list to impose
1294 *
1295 * Validate a group subscription and, if valid, impose it upon current's task
1296 * security record.
1297 */
1298int set_current_groups(struct group_info *group_info)
1299{
1300 struct cred *new;
1301 int ret;
1302
1303 new = prepare_creds();
1304 if (!new)
1305 return -ENOMEM;
1306
1307 ret = set_groups(new, group_info);
1308 if (ret < 0) {
1309 abort_creds(new);
1310 return ret;
1311 }
1312
1313 return commit_creds(new);
1314}
1315
1316EXPORT_SYMBOL(set_current_groups);
1317
1318SYSCALL_DEFINE2(getgroups, int, gidsetsize, gid_t __user *, grouplist)
1319{
1320 const struct cred *cred = current_cred();
1321 int i;
1322
1323 if (gidsetsize < 0)
1324 return -EINVAL;
1325
1326 /* no need to grab task_lock here; it cannot change */
1327 i = cred->group_info->ngroups;
1328 if (gidsetsize) {
1329 if (i > gidsetsize) {
1330 i = -EINVAL;
1331 goto out;
1332 }
1333 if (groups_to_user(grouplist, cred->group_info)) {
1334 i = -EFAULT;
1335 goto out;
1336 }
1337 }
1338out:
1339 return i;
1340}
1341
1342/*
1343 * SMP: Our groups are copy-on-write. We can set them safely
1344 * without another task interfering.
1345 */
1346
1347SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist)
1348{
1349 struct group_info *group_info;
1350 int retval;
1351
1352 if (!capable(CAP_SETGID))
1353 return -EPERM;
1354 if ((unsigned)gidsetsize > NGROUPS_MAX)
1355 return -EINVAL;
1356
1357 group_info = groups_alloc(gidsetsize);
1358 if (!group_info)
1359 return -ENOMEM;
1360 retval = groups_from_user(group_info, grouplist);
1361 if (retval) {
1362 put_group_info(group_info);
1363 return retval;
1364 }
1365
1366 retval = set_current_groups(group_info);
1367 put_group_info(group_info);
1368
1369 return retval;
1370}
1371
1372/*
1373 * Check whether we're fsgid/egid or in the supplemental group..
1374 */
1375int in_group_p(gid_t grp)
1376{
1377 const struct cred *cred = current_cred();
1378 int retval = 1;
1379
1380 if (grp != cred->fsgid)
1381 retval = groups_search(cred->group_info, grp);
1382 return retval;
1383}
1384
1385EXPORT_SYMBOL(in_group_p);
1386
1387int in_egroup_p(gid_t grp)
1388{
1389 const struct cred *cred = current_cred();
1390 int retval = 1;
1391
1392 if (grp != cred->egid)
1393 retval = groups_search(cred->group_info, grp);
1394 return retval;
1395}
1396
1397EXPORT_SYMBOL(in_egroup_p);
1398
1399DECLARE_RWSEM(uts_sem); 1116DECLARE_RWSEM(uts_sem);
1400 1117
1401SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) 1118SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index f5c76b6cd616..ab462b9968d5 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1337,7 +1337,6 @@ static struct ctl_table vm_table[] = {
1337 .extra2 = &one, 1337 .extra2 = &one,
1338 }, 1338 },
1339#endif 1339#endif
1340#ifdef CONFIG_UNEVICTABLE_LRU
1341 { 1340 {
1342 .ctl_name = CTL_UNNUMBERED, 1341 .ctl_name = CTL_UNNUMBERED,
1343 .procname = "scan_unevictable_pages", 1342 .procname = "scan_unevictable_pages",
@@ -1346,7 +1345,6 @@ static struct ctl_table vm_table[] = {
1346 .mode = 0644, 1345 .mode = 0644,
1347 .proc_handler = &scan_unevictable_handler, 1346 .proc_handler = &scan_unevictable_handler,
1348 }, 1347 },
1349#endif
1350/* 1348/*
1351 * NOTE: do not add new entries to this table unless you have read 1349 * NOTE: do not add new entries to this table unless you have read
1352 * Documentation/sysctl/ctl_unnumbered.txt 1350 * Documentation/sysctl/ctl_unnumbered.txt
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c
index a65c31455541..e73822aa6e9a 100644
--- a/lib/dec_and_lock.c
+++ b/lib/dec_and_lock.c
@@ -19,11 +19,10 @@
19 */ 19 */
20int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) 20int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
21{ 21{
22#ifdef CONFIG_SMP
23 /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ 22 /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
24 if (atomic_add_unless(atomic, -1, 1)) 23 if (atomic_add_unless(atomic, -1, 1))
25 return 0; 24 return 0;
26#endif 25
27 /* Otherwise do it the slow way */ 26 /* Otherwise do it the slow way */
28 spin_lock(lock); 27 spin_lock(lock);
29 if (atomic_dec_and_test(atomic)) 28 if (atomic_dec_and_test(atomic))
diff --git a/lib/genalloc.c b/lib/genalloc.c
index f6d276db2d58..eed2bdb865e7 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -85,7 +85,6 @@ void gen_pool_destroy(struct gen_pool *pool)
85 int bit, end_bit; 85 int bit, end_bit;
86 86
87 87
88 write_lock(&pool->lock);
89 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { 88 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
90 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); 89 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
91 list_del(&chunk->next_chunk); 90 list_del(&chunk->next_chunk);
diff --git a/lib/hexdump.c b/lib/hexdump.c
index f07c0db81d26..39af2560f765 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -65,7 +65,8 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
65 65
66 for (j = 0; j < ngroups; j++) 66 for (j = 0; j < ngroups; j++)
67 lx += scnprintf(linebuf + lx, linebuflen - lx, 67 lx += scnprintf(linebuf + lx, linebuflen - lx,
68 "%16.16llx ", (unsigned long long)*(ptr8 + j)); 68 "%s%16.16llx", j ? " " : "",
69 (unsigned long long)*(ptr8 + j));
69 ascii_column = 17 * ngroups + 2; 70 ascii_column = 17 * ngroups + 2;
70 break; 71 break;
71 } 72 }
@@ -76,7 +77,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
76 77
77 for (j = 0; j < ngroups; j++) 78 for (j = 0; j < ngroups; j++)
78 lx += scnprintf(linebuf + lx, linebuflen - lx, 79 lx += scnprintf(linebuf + lx, linebuflen - lx,
79 "%8.8x ", *(ptr4 + j)); 80 "%s%8.8x", j ? " " : "", *(ptr4 + j));
80 ascii_column = 9 * ngroups + 2; 81 ascii_column = 9 * ngroups + 2;
81 break; 82 break;
82 } 83 }
@@ -87,19 +88,21 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
87 88
88 for (j = 0; j < ngroups; j++) 89 for (j = 0; j < ngroups; j++)
89 lx += scnprintf(linebuf + lx, linebuflen - lx, 90 lx += scnprintf(linebuf + lx, linebuflen - lx,
90 "%4.4x ", *(ptr2 + j)); 91 "%s%4.4x", j ? " " : "", *(ptr2 + j));
91 ascii_column = 5 * ngroups + 2; 92 ascii_column = 5 * ngroups + 2;
92 break; 93 break;
93 } 94 }
94 95
95 default: 96 default:
96 for (j = 0; (j < rowsize) && (j < len) && (lx + 4) < linebuflen; 97 for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
97 j++) {
98 ch = ptr[j]; 98 ch = ptr[j];
99 linebuf[lx++] = hex_asc_hi(ch); 99 linebuf[lx++] = hex_asc_hi(ch);
100 linebuf[lx++] = hex_asc_lo(ch); 100 linebuf[lx++] = hex_asc_lo(ch);
101 linebuf[lx++] = ' '; 101 linebuf[lx++] = ' ';
102 } 102 }
103 if (j)
104 lx--;
105
103 ascii_column = 3 * rowsize + 2; 106 ascii_column = 3 * rowsize + 2;
104 break; 107 break;
105 } 108 }
@@ -108,7 +111,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
108 111
109 while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) 112 while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
110 linebuf[lx++] = ' '; 113 linebuf[lx++] = ' ';
111 for (j = 0; (j < rowsize) && (j < len) && (lx + 2) < linebuflen; j++) 114 for (j = 0; (j < len) && (lx + 2) < linebuflen; j++)
112 linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] 115 linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j]
113 : '.'; 116 : '.';
114nil: 117nil:
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 4bb42a0344ec..23abbd93cae1 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -351,20 +351,12 @@ int radix_tree_insert(struct radix_tree_root *root,
351} 351}
352EXPORT_SYMBOL(radix_tree_insert); 352EXPORT_SYMBOL(radix_tree_insert);
353 353
354/** 354/*
355 * radix_tree_lookup_slot - lookup a slot in a radix tree 355 * is_slot == 1 : search for the slot.
356 * @root: radix tree root 356 * is_slot == 0 : search for the node.
357 * @index: index key
358 *
359 * Returns: the slot corresponding to the position @index in the
360 * radix tree @root. This is useful for update-if-exists operations.
361 *
362 * This function can be called under rcu_read_lock iff the slot is not
363 * modified by radix_tree_replace_slot, otherwise it must be called
364 * exclusive from other writers. Any dereference of the slot must be done
365 * using radix_tree_deref_slot.
366 */ 357 */
367void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) 358static void *radix_tree_lookup_element(struct radix_tree_root *root,
359 unsigned long index, int is_slot)
368{ 360{
369 unsigned int height, shift; 361 unsigned int height, shift;
370 struct radix_tree_node *node, **slot; 362 struct radix_tree_node *node, **slot;
@@ -376,7 +368,7 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
376 if (!radix_tree_is_indirect_ptr(node)) { 368 if (!radix_tree_is_indirect_ptr(node)) {
377 if (index > 0) 369 if (index > 0)
378 return NULL; 370 return NULL;
379 return (void **)&root->rnode; 371 return is_slot ? (void *)&root->rnode : node;
380 } 372 }
381 node = radix_tree_indirect_to_ptr(node); 373 node = radix_tree_indirect_to_ptr(node);
382 374
@@ -397,7 +389,25 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
397 height--; 389 height--;
398 } while (height > 0); 390 } while (height > 0);
399 391
400 return (void **)slot; 392 return is_slot ? (void *)slot:node;
393}
394
395/**
396 * radix_tree_lookup_slot - lookup a slot in a radix tree
397 * @root: radix tree root
398 * @index: index key
399 *
400 * Returns: the slot corresponding to the position @index in the
401 * radix tree @root. This is useful for update-if-exists operations.
402 *
403 * This function can be called under rcu_read_lock iff the slot is not
404 * modified by radix_tree_replace_slot, otherwise it must be called
405 * exclusive from other writers. Any dereference of the slot must be done
406 * using radix_tree_deref_slot.
407 */
408void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
409{
410 return (void **)radix_tree_lookup_element(root, index, 1);
401} 411}
402EXPORT_SYMBOL(radix_tree_lookup_slot); 412EXPORT_SYMBOL(radix_tree_lookup_slot);
403 413
@@ -415,38 +425,7 @@ EXPORT_SYMBOL(radix_tree_lookup_slot);
415 */ 425 */
416void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) 426void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index)
417{ 427{
418 unsigned int height, shift; 428 return radix_tree_lookup_element(root, index, 0);
419 struct radix_tree_node *node, **slot;
420
421 node = rcu_dereference(root->rnode);
422 if (node == NULL)
423 return NULL;
424
425 if (!radix_tree_is_indirect_ptr(node)) {
426 if (index > 0)
427 return NULL;
428 return node;
429 }
430 node = radix_tree_indirect_to_ptr(node);
431
432 height = node->height;
433 if (index > radix_tree_maxindex(height))
434 return NULL;
435
436 shift = (height-1) * RADIX_TREE_MAP_SHIFT;
437
438 do {
439 slot = (struct radix_tree_node **)
440 (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK));
441 node = rcu_dereference(*slot);
442 if (node == NULL)
443 return NULL;
444
445 shift -= RADIX_TREE_MAP_SHIFT;
446 height--;
447 } while (height > 0);
448
449 return node;
450} 429}
451EXPORT_SYMBOL(radix_tree_lookup); 430EXPORT_SYMBOL(radix_tree_lookup);
452 431
@@ -666,6 +645,43 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root,
666} 645}
667EXPORT_SYMBOL(radix_tree_next_hole); 646EXPORT_SYMBOL(radix_tree_next_hole);
668 647
648/**
649 * radix_tree_prev_hole - find the prev hole (not-present entry)
650 * @root: tree root
651 * @index: index key
652 * @max_scan: maximum range to search
653 *
654 * Search backwards in the range [max(index-max_scan+1, 0), index]
655 * for the first hole.
656 *
657 * Returns: the index of the hole if found, otherwise returns an index
658 * outside of the set specified (in which case 'index - return >= max_scan'
659 * will be true). In rare cases of wrap-around, LONG_MAX will be returned.
660 *
661 * radix_tree_next_hole may be called under rcu_read_lock. However, like
662 * radix_tree_gang_lookup, this will not atomically search a snapshot of
663 * the tree at a single point in time. For example, if a hole is created
664 * at index 10, then subsequently a hole is created at index 5,
665 * radix_tree_prev_hole covering both indexes may return 5 if called under
666 * rcu_read_lock.
667 */
668unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
669 unsigned long index, unsigned long max_scan)
670{
671 unsigned long i;
672
673 for (i = 0; i < max_scan; i++) {
674 if (!radix_tree_lookup(root, index))
675 break;
676 index--;
677 if (index == LONG_MAX)
678 break;
679 }
680
681 return index;
682}
683EXPORT_SYMBOL(radix_tree_prev_hole);
684
669static unsigned int 685static unsigned int
670__lookup(struct radix_tree_node *slot, void ***results, unsigned long index, 686__lookup(struct radix_tree_node *slot, void ***results, unsigned long index,
671 unsigned int max_items, unsigned long *next_index) 687 unsigned int max_items, unsigned long *next_index)
diff --git a/lib/rbtree.c b/lib/rbtree.c
index f653659e0bc1..e2aa3be29858 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -231,34 +231,34 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
231 node = node->rb_right; 231 node = node->rb_right;
232 while ((left = node->rb_left) != NULL) 232 while ((left = node->rb_left) != NULL)
233 node = left; 233 node = left;
234
235 if (rb_parent(old)) {
236 if (rb_parent(old)->rb_left == old)
237 rb_parent(old)->rb_left = node;
238 else
239 rb_parent(old)->rb_right = node;
240 } else
241 root->rb_node = node;
242
234 child = node->rb_right; 243 child = node->rb_right;
235 parent = rb_parent(node); 244 parent = rb_parent(node);
236 color = rb_color(node); 245 color = rb_color(node);
237 246
238 if (child)
239 rb_set_parent(child, parent);
240 if (parent == old) { 247 if (parent == old) {
241 parent->rb_right = child;
242 parent = node; 248 parent = node;
243 } else 249 } else {
250 if (child)
251 rb_set_parent(child, parent);
244 parent->rb_left = child; 252 parent->rb_left = child;
245 253
254 node->rb_right = old->rb_right;
255 rb_set_parent(old->rb_right, node);
256 }
257
246 node->rb_parent_color = old->rb_parent_color; 258 node->rb_parent_color = old->rb_parent_color;
247 node->rb_right = old->rb_right;
248 node->rb_left = old->rb_left; 259 node->rb_left = old->rb_left;
249
250 if (rb_parent(old))
251 {
252 if (rb_parent(old)->rb_left == old)
253 rb_parent(old)->rb_left = node;
254 else
255 rb_parent(old)->rb_right = node;
256 } else
257 root->rb_node = node;
258
259 rb_set_parent(old->rb_left, node); 260 rb_set_parent(old->rb_left, node);
260 if (old->rb_right) 261
261 rb_set_parent(old->rb_right, node);
262 goto color; 262 goto color;
263 } 263 }
264 264
diff --git a/mm/Kconfig b/mm/Kconfig
index 6f4610a9ce55..c948d4ca8bde 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -203,25 +203,13 @@ config VIRT_TO_BUS
203 def_bool y 203 def_bool y
204 depends on !ARCH_NO_VIRT_TO_BUS 204 depends on !ARCH_NO_VIRT_TO_BUS
205 205
206config UNEVICTABLE_LRU
207 bool "Add LRU list to track non-evictable pages"
208 default y
209 help
210 Keeps unevictable pages off of the active and inactive pageout
211 lists, so kswapd will not waste CPU time or have its balancing
212 algorithms thrown off by scanning these pages. Selecting this
213 will use one page flag and increase the code size a little,
214 say Y unless you know what you are doing.
215
216 See Documentation/vm/unevictable-lru.txt for more information.
217
218config HAVE_MLOCK 206config HAVE_MLOCK
219 bool 207 bool
220 default y if MMU=y 208 default y if MMU=y
221 209
222config HAVE_MLOCKED_PAGE_BIT 210config HAVE_MLOCKED_PAGE_BIT
223 bool 211 bool
224 default y if HAVE_MLOCK=y && UNEVICTABLE_LRU=y 212 default y if HAVE_MLOCK=y
225 213
226config MMU_NOTIFIER 214config MMU_NOTIFIER
227 bool 215 bool
diff --git a/mm/Makefile b/mm/Makefile
index c379ce08354a..5e0bd6426693 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -12,6 +12,7 @@ obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
12 readahead.o swap.o truncate.o vmscan.o shmem.o \ 12 readahead.o swap.o truncate.o vmscan.o shmem.o \
13 prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \ 13 prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
14 page_isolation.o mm_init.o $(mmu-y) 14 page_isolation.o mm_init.o $(mmu-y)
15obj-y += init-mm.o
15 16
16obj-$(CONFIG_PROC_PAGE_MONITOR) += pagewalk.o 17obj-$(CONFIG_PROC_PAGE_MONITOR) += pagewalk.o
17obj-$(CONFIG_BOUNCE) += bounce.o 18obj-$(CONFIG_BOUNCE) += bounce.o
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 54a0f8040afa..e43359214f6f 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -101,7 +101,7 @@ SYSCALL_DEFINE(fadvise64_64)(int fd, loff_t offset, loff_t len, int advice)
101 101
102 ret = force_page_cache_readahead(mapping, file, 102 ret = force_page_cache_readahead(mapping, file,
103 start_index, 103 start_index,
104 max_sane_readahead(nrpages)); 104 nrpages);
105 if (ret > 0) 105 if (ret > 0)
106 ret = 0; 106 ret = 0;
107 break; 107 break;
diff --git a/mm/filemap.c b/mm/filemap.c
index 1b60f30cebfa..22396713feb9 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -521,7 +521,7 @@ struct page *__page_cache_alloc(gfp_t gfp)
521{ 521{
522 if (cpuset_do_page_mem_spread()) { 522 if (cpuset_do_page_mem_spread()) {
523 int n = cpuset_mem_spread_node(); 523 int n = cpuset_mem_spread_node();
524 return alloc_pages_node(n, gfp, 0); 524 return alloc_pages_exact_node(n, gfp, 0);
525 } 525 }
526 return alloc_pages(gfp, 0); 526 return alloc_pages(gfp, 0);
527} 527}
@@ -1004,9 +1004,6 @@ EXPORT_SYMBOL(grab_cache_page_nowait);
1004static void shrink_readahead_size_eio(struct file *filp, 1004static void shrink_readahead_size_eio(struct file *filp,
1005 struct file_ra_state *ra) 1005 struct file_ra_state *ra)
1006{ 1006{
1007 if (!ra->ra_pages)
1008 return;
1009
1010 ra->ra_pages /= 4; 1007 ra->ra_pages /= 4;
1011} 1008}
1012 1009
@@ -1390,8 +1387,7 @@ do_readahead(struct address_space *mapping, struct file *filp,
1390 if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage) 1387 if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
1391 return -EINVAL; 1388 return -EINVAL;
1392 1389
1393 force_page_cache_readahead(mapping, filp, index, 1390 force_page_cache_readahead(mapping, filp, index, nr);
1394 max_sane_readahead(nr));
1395 return 0; 1391 return 0;
1396} 1392}
1397 1393
@@ -1457,6 +1453,73 @@ static int page_cache_read(struct file *file, pgoff_t offset)
1457 1453
1458#define MMAP_LOTSAMISS (100) 1454#define MMAP_LOTSAMISS (100)
1459 1455
1456/*
1457 * Synchronous readahead happens when we don't even find
1458 * a page in the page cache at all.
1459 */
1460static void do_sync_mmap_readahead(struct vm_area_struct *vma,
1461 struct file_ra_state *ra,
1462 struct file *file,
1463 pgoff_t offset)
1464{
1465 unsigned long ra_pages;
1466 struct address_space *mapping = file->f_mapping;
1467
1468 /* If we don't want any read-ahead, don't bother */
1469 if (VM_RandomReadHint(vma))
1470 return;
1471
1472 if (VM_SequentialReadHint(vma) ||
1473 offset - 1 == (ra->prev_pos >> PAGE_CACHE_SHIFT)) {
1474 page_cache_sync_readahead(mapping, ra, file, offset,
1475 ra->ra_pages);
1476 return;
1477 }
1478
1479 if (ra->mmap_miss < INT_MAX)
1480 ra->mmap_miss++;
1481
1482 /*
1483 * Do we miss much more than hit in this file? If so,
1484 * stop bothering with read-ahead. It will only hurt.
1485 */
1486 if (ra->mmap_miss > MMAP_LOTSAMISS)
1487 return;
1488
1489 /*
1490 * mmap read-around
1491 */
1492 ra_pages = max_sane_readahead(ra->ra_pages);
1493 if (ra_pages) {
1494 ra->start = max_t(long, 0, offset - ra_pages/2);
1495 ra->size = ra_pages;
1496 ra->async_size = 0;
1497 ra_submit(ra, mapping, file);
1498 }
1499}
1500
1501/*
1502 * Asynchronous readahead happens when we find the page and PG_readahead,
1503 * so we want to possibly extend the readahead further..
1504 */
1505static void do_async_mmap_readahead(struct vm_area_struct *vma,
1506 struct file_ra_state *ra,
1507 struct file *file,
1508 struct page *page,
1509 pgoff_t offset)
1510{
1511 struct address_space *mapping = file->f_mapping;
1512
1513 /* If we don't want any read-ahead, don't bother */
1514 if (VM_RandomReadHint(vma))
1515 return;
1516 if (ra->mmap_miss > 0)
1517 ra->mmap_miss--;
1518 if (PageReadahead(page))
1519 page_cache_async_readahead(mapping, ra, file,
1520 page, offset, ra->ra_pages);
1521}
1522
1460/** 1523/**
1461 * filemap_fault - read in file data for page fault handling 1524 * filemap_fault - read in file data for page fault handling
1462 * @vma: vma in which the fault was taken 1525 * @vma: vma in which the fault was taken
@@ -1476,78 +1539,44 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1476 struct address_space *mapping = file->f_mapping; 1539 struct address_space *mapping = file->f_mapping;
1477 struct file_ra_state *ra = &file->f_ra; 1540 struct file_ra_state *ra = &file->f_ra;
1478 struct inode *inode = mapping->host; 1541 struct inode *inode = mapping->host;
1542 pgoff_t offset = vmf->pgoff;
1479 struct page *page; 1543 struct page *page;
1480 pgoff_t size; 1544 pgoff_t size;
1481 int did_readaround = 0;
1482 int ret = 0; 1545 int ret = 0;
1483 1546
1484 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1547 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1485 if (vmf->pgoff >= size) 1548 if (offset >= size)
1486 return VM_FAULT_SIGBUS; 1549 return VM_FAULT_SIGBUS;
1487 1550
1488 /* If we don't want any read-ahead, don't bother */
1489 if (VM_RandomReadHint(vma))
1490 goto no_cached_page;
1491
1492 /* 1551 /*
1493 * Do we have something in the page cache already? 1552 * Do we have something in the page cache already?
1494 */ 1553 */
1495retry_find: 1554 page = find_get_page(mapping, offset);
1496 page = find_lock_page(mapping, vmf->pgoff); 1555 if (likely(page)) {
1497 /*
1498 * For sequential accesses, we use the generic readahead logic.
1499 */
1500 if (VM_SequentialReadHint(vma)) {
1501 if (!page) {
1502 page_cache_sync_readahead(mapping, ra, file,
1503 vmf->pgoff, 1);
1504 page = find_lock_page(mapping, vmf->pgoff);
1505 if (!page)
1506 goto no_cached_page;
1507 }
1508 if (PageReadahead(page)) {
1509 page_cache_async_readahead(mapping, ra, file, page,
1510 vmf->pgoff, 1);
1511 }
1512 }
1513
1514 if (!page) {
1515 unsigned long ra_pages;
1516
1517 ra->mmap_miss++;
1518
1519 /* 1556 /*
1520 * Do we miss much more than hit in this file? If so, 1557 * We found the page, so try async readahead before
1521 * stop bothering with read-ahead. It will only hurt. 1558 * waiting for the lock.
1522 */ 1559 */
1523 if (ra->mmap_miss > MMAP_LOTSAMISS) 1560 do_async_mmap_readahead(vma, ra, file, page, offset);
1524 goto no_cached_page; 1561 lock_page(page);
1525 1562
1526 /* 1563 /* Did it get truncated? */
1527 * To keep the pgmajfault counter straight, we need to 1564 if (unlikely(page->mapping != mapping)) {
1528 * check did_readaround, as this is an inner loop. 1565 unlock_page(page);
1529 */ 1566 put_page(page);
1530 if (!did_readaround) { 1567 goto no_cached_page;
1531 ret = VM_FAULT_MAJOR;
1532 count_vm_event(PGMAJFAULT);
1533 }
1534 did_readaround = 1;
1535 ra_pages = max_sane_readahead(file->f_ra.ra_pages);
1536 if (ra_pages) {
1537 pgoff_t start = 0;
1538
1539 if (vmf->pgoff > ra_pages / 2)
1540 start = vmf->pgoff - ra_pages / 2;
1541 do_page_cache_readahead(mapping, file, start, ra_pages);
1542 } 1568 }
1543 page = find_lock_page(mapping, vmf->pgoff); 1569 } else {
1570 /* No page in the page cache at all */
1571 do_sync_mmap_readahead(vma, ra, file, offset);
1572 count_vm_event(PGMAJFAULT);
1573 ret = VM_FAULT_MAJOR;
1574retry_find:
1575 page = find_lock_page(mapping, offset);
1544 if (!page) 1576 if (!page)
1545 goto no_cached_page; 1577 goto no_cached_page;
1546 } 1578 }
1547 1579
1548 if (!did_readaround)
1549 ra->mmap_miss--;
1550
1551 /* 1580 /*
1552 * We have a locked page in the page cache, now we need to check 1581 * We have a locked page in the page cache, now we need to check
1553 * that it's up-to-date. If not, it is going to be due to an error. 1582 * that it's up-to-date. If not, it is going to be due to an error.
@@ -1555,18 +1584,18 @@ retry_find:
1555 if (unlikely(!PageUptodate(page))) 1584 if (unlikely(!PageUptodate(page)))
1556 goto page_not_uptodate; 1585 goto page_not_uptodate;
1557 1586
1558 /* Must recheck i_size under page lock */ 1587 /*
1588 * Found the page and have a reference on it.
1589 * We must recheck i_size under page lock.
1590 */
1559 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1591 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1560 if (unlikely(vmf->pgoff >= size)) { 1592 if (unlikely(offset >= size)) {
1561 unlock_page(page); 1593 unlock_page(page);
1562 page_cache_release(page); 1594 page_cache_release(page);
1563 return VM_FAULT_SIGBUS; 1595 return VM_FAULT_SIGBUS;
1564 } 1596 }
1565 1597
1566 /* 1598 ra->prev_pos = (loff_t)offset << PAGE_CACHE_SHIFT;
1567 * Found the page and have a reference on it.
1568 */
1569 ra->prev_pos = (loff_t)page->index << PAGE_CACHE_SHIFT;
1570 vmf->page = page; 1599 vmf->page = page;
1571 return ret | VM_FAULT_LOCKED; 1600 return ret | VM_FAULT_LOCKED;
1572 1601
@@ -1575,7 +1604,7 @@ no_cached_page:
1575 * We're only likely to ever get here if MADV_RANDOM is in 1604 * We're only likely to ever get here if MADV_RANDOM is in
1576 * effect. 1605 * effect.
1577 */ 1606 */
1578 error = page_cache_read(file, vmf->pgoff); 1607 error = page_cache_read(file, offset);
1579 1608
1580 /* 1609 /*
1581 * The page we want has now been added to the page cache. 1610 * The page we want has now been added to the page cache.
@@ -1595,12 +1624,6 @@ no_cached_page:
1595 return VM_FAULT_SIGBUS; 1624 return VM_FAULT_SIGBUS;
1596 1625
1597page_not_uptodate: 1626page_not_uptodate:
1598 /* IO error path */
1599 if (!did_readaround) {
1600 ret = VM_FAULT_MAJOR;
1601 count_vm_event(PGMAJFAULT);
1602 }
1603
1604 /* 1627 /*
1605 * Umm, take care of errors if the page isn't up-to-date. 1628 * Umm, take care of errors if the page isn't up-to-date.
1606 * Try to re-read it _once_. We do this synchronously, 1629 * Try to re-read it _once_. We do this synchronously,
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e83ad2c9228c..a56e6f3ce979 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -578,41 +578,6 @@ static void free_huge_page(struct page *page)
578 hugetlb_put_quota(mapping, 1); 578 hugetlb_put_quota(mapping, 1);
579} 579}
580 580
581/*
582 * Increment or decrement surplus_huge_pages. Keep node-specific counters
583 * balanced by operating on them in a round-robin fashion.
584 * Returns 1 if an adjustment was made.
585 */
586static int adjust_pool_surplus(struct hstate *h, int delta)
587{
588 static int prev_nid;
589 int nid = prev_nid;
590 int ret = 0;
591
592 VM_BUG_ON(delta != -1 && delta != 1);
593 do {
594 nid = next_node(nid, node_online_map);
595 if (nid == MAX_NUMNODES)
596 nid = first_node(node_online_map);
597
598 /* To shrink on this node, there must be a surplus page */
599 if (delta < 0 && !h->surplus_huge_pages_node[nid])
600 continue;
601 /* Surplus cannot exceed the total number of pages */
602 if (delta > 0 && h->surplus_huge_pages_node[nid] >=
603 h->nr_huge_pages_node[nid])
604 continue;
605
606 h->surplus_huge_pages += delta;
607 h->surplus_huge_pages_node[nid] += delta;
608 ret = 1;
609 break;
610 } while (nid != prev_nid);
611
612 prev_nid = nid;
613 return ret;
614}
615
616static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) 581static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
617{ 582{
618 set_compound_page_dtor(page, free_huge_page); 583 set_compound_page_dtor(page, free_huge_page);
@@ -623,6 +588,34 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
623 put_page(page); /* free it into the hugepage allocator */ 588 put_page(page); /* free it into the hugepage allocator */
624} 589}
625 590
591static void prep_compound_gigantic_page(struct page *page, unsigned long order)
592{
593 int i;
594 int nr_pages = 1 << order;
595 struct page *p = page + 1;
596
597 /* we rely on prep_new_huge_page to set the destructor */
598 set_compound_order(page, order);
599 __SetPageHead(page);
600 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
601 __SetPageTail(p);
602 p->first_page = page;
603 }
604}
605
606int PageHuge(struct page *page)
607{
608 compound_page_dtor *dtor;
609
610 if (!PageCompound(page))
611 return 0;
612
613 page = compound_head(page);
614 dtor = get_compound_page_dtor(page);
615
616 return dtor == free_huge_page;
617}
618
626static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) 619static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
627{ 620{
628 struct page *page; 621 struct page *page;
@@ -630,7 +623,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
630 if (h->order >= MAX_ORDER) 623 if (h->order >= MAX_ORDER)
631 return NULL; 624 return NULL;
632 625
633 page = alloc_pages_node(nid, 626 page = alloc_pages_exact_node(nid,
634 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE| 627 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
635 __GFP_REPEAT|__GFP_NOWARN, 628 __GFP_REPEAT|__GFP_NOWARN,
636 huge_page_order(h)); 629 huge_page_order(h));
@@ -649,7 +642,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
649 * Use a helper variable to find the next node and then 642 * Use a helper variable to find the next node and then
650 * copy it back to hugetlb_next_nid afterwards: 643 * copy it back to hugetlb_next_nid afterwards:
651 * otherwise there's a window in which a racer might 644 * otherwise there's a window in which a racer might
652 * pass invalid nid MAX_NUMNODES to alloc_pages_node. 645 * pass invalid nid MAX_NUMNODES to alloc_pages_exact_node.
653 * But we don't need to use a spin_lock here: it really 646 * But we don't need to use a spin_lock here: it really
654 * doesn't matter if occasionally a racer chooses the 647 * doesn't matter if occasionally a racer chooses the
655 * same nid as we do. Move nid forward in the mask even 648 * same nid as we do. Move nid forward in the mask even
@@ -875,7 +868,7 @@ static void return_unused_surplus_pages(struct hstate *h,
875 * can no longer free unreserved surplus pages. This occurs when 868 * can no longer free unreserved surplus pages. This occurs when
876 * the nodes with surplus pages have no free pages. 869 * the nodes with surplus pages have no free pages.
877 */ 870 */
878 unsigned long remaining_iterations = num_online_nodes(); 871 unsigned long remaining_iterations = nr_online_nodes;
879 872
880 /* Uncommit the reservation */ 873 /* Uncommit the reservation */
881 h->resv_huge_pages -= unused_resv_pages; 874 h->resv_huge_pages -= unused_resv_pages;
@@ -904,7 +897,7 @@ static void return_unused_surplus_pages(struct hstate *h,
904 h->surplus_huge_pages--; 897 h->surplus_huge_pages--;
905 h->surplus_huge_pages_node[nid]--; 898 h->surplus_huge_pages_node[nid]--;
906 nr_pages--; 899 nr_pages--;
907 remaining_iterations = num_online_nodes(); 900 remaining_iterations = nr_online_nodes;
908 } 901 }
909 } 902 }
910} 903}
@@ -1140,6 +1133,41 @@ static inline void try_to_free_low(struct hstate *h, unsigned long count)
1140} 1133}
1141#endif 1134#endif
1142 1135
1136/*
1137 * Increment or decrement surplus_huge_pages. Keep node-specific counters
1138 * balanced by operating on them in a round-robin fashion.
1139 * Returns 1 if an adjustment was made.
1140 */
1141static int adjust_pool_surplus(struct hstate *h, int delta)
1142{
1143 static int prev_nid;
1144 int nid = prev_nid;
1145 int ret = 0;
1146
1147 VM_BUG_ON(delta != -1 && delta != 1);
1148 do {
1149 nid = next_node(nid, node_online_map);
1150 if (nid == MAX_NUMNODES)
1151 nid = first_node(node_online_map);
1152
1153 /* To shrink on this node, there must be a surplus page */
1154 if (delta < 0 && !h->surplus_huge_pages_node[nid])
1155 continue;
1156 /* Surplus cannot exceed the total number of pages */
1157 if (delta > 0 && h->surplus_huge_pages_node[nid] >=
1158 h->nr_huge_pages_node[nid])
1159 continue;
1160
1161 h->surplus_huge_pages += delta;
1162 h->surplus_huge_pages_node[nid] += delta;
1163 ret = 1;
1164 break;
1165 } while (nid != prev_nid);
1166
1167 prev_nid = nid;
1168 return ret;
1169}
1170
1143#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) 1171#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1144static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count) 1172static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
1145{ 1173{
diff --git a/mm/init-mm.c b/mm/init-mm.c
new file mode 100644
index 000000000000..57aba0da9668
--- /dev/null
+++ b/mm/init-mm.c
@@ -0,0 +1,20 @@
1#include <linux/mm_types.h>
2#include <linux/rbtree.h>
3#include <linux/rwsem.h>
4#include <linux/spinlock.h>
5#include <linux/list.h>
6#include <linux/cpumask.h>
7
8#include <asm/atomic.h>
9#include <asm/pgtable.h>
10
11struct mm_struct init_mm = {
12 .mm_rb = RB_ROOT,
13 .pgd = swapper_pg_dir,
14 .mm_users = ATOMIC_INIT(2),
15 .mm_count = ATOMIC_INIT(1),
16 .mmap_sem = __RWSEM_INITIALIZER(init_mm.mmap_sem),
17 .page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
18 .mmlist = LIST_HEAD_INIT(init_mm.mmlist),
19 .cpu_vm_mask = CPU_MASK_ALL,
20};
diff --git a/mm/internal.h b/mm/internal.h
index 987bb03fbdd8..f290c4db528b 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -16,9 +16,6 @@
16void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, 16void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
17 unsigned long floor, unsigned long ceiling); 17 unsigned long floor, unsigned long ceiling);
18 18
19extern void prep_compound_page(struct page *page, unsigned long order);
20extern void prep_compound_gigantic_page(struct page *page, unsigned long order);
21
22static inline void set_page_count(struct page *page, int v) 19static inline void set_page_count(struct page *page, int v)
23{ 20{
24 atomic_set(&page->_count, v); 21 atomic_set(&page->_count, v);
@@ -51,6 +48,8 @@ extern void putback_lru_page(struct page *page);
51 */ 48 */
52extern unsigned long highest_memmap_pfn; 49extern unsigned long highest_memmap_pfn;
53extern void __free_pages_bootmem(struct page *page, unsigned int order); 50extern void __free_pages_bootmem(struct page *page, unsigned int order);
51extern void prep_compound_page(struct page *page, unsigned long order);
52
54 53
55/* 54/*
56 * function for dealing with page's order in buddy system. 55 * function for dealing with page's order in buddy system.
@@ -74,7 +73,6 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
74} 73}
75#endif 74#endif
76 75
77#ifdef CONFIG_UNEVICTABLE_LRU
78/* 76/*
79 * unevictable_migrate_page() called only from migrate_page_copy() to 77 * unevictable_migrate_page() called only from migrate_page_copy() to
80 * migrate unevictable flag to new page. 78 * migrate unevictable flag to new page.
@@ -86,11 +84,6 @@ static inline void unevictable_migrate_page(struct page *new, struct page *old)
86 if (TestClearPageUnevictable(old)) 84 if (TestClearPageUnevictable(old))
87 SetPageUnevictable(new); 85 SetPageUnevictable(new);
88} 86}
89#else
90static inline void unevictable_migrate_page(struct page *new, struct page *old)
91{
92}
93#endif
94 87
95#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT 88#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
96/* 89/*
@@ -150,23 +143,6 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
150 } 143 }
151} 144}
152 145
153/*
154 * free_page_mlock() -- clean up attempts to free and mlocked() page.
155 * Page should not be on lru, so no need to fix that up.
156 * free_pages_check() will verify...
157 */
158static inline void free_page_mlock(struct page *page)
159{
160 if (unlikely(TestClearPageMlocked(page))) {
161 unsigned long flags;
162
163 local_irq_save(flags);
164 __dec_zone_page_state(page, NR_MLOCK);
165 __count_vm_event(UNEVICTABLE_MLOCKFREED);
166 local_irq_restore(flags);
167 }
168}
169
170#else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ 146#else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
171static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) 147static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
172{ 148{
@@ -175,7 +151,6 @@ static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
175static inline void clear_page_mlock(struct page *page) { } 151static inline void clear_page_mlock(struct page *page) { }
176static inline void mlock_vma_page(struct page *page) { } 152static inline void mlock_vma_page(struct page *page) { }
177static inline void mlock_migrate_page(struct page *new, struct page *old) { } 153static inline void mlock_migrate_page(struct page *new, struct page *old) { }
178static inline void free_page_mlock(struct page *page) { }
179 154
180#endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ 155#endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
181 156
@@ -284,4 +259,8 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
284 unsigned long start, int len, int flags, 259 unsigned long start, int len, int flags,
285 struct page **pages, struct vm_area_struct **vmas); 260 struct page **pages, struct vm_area_struct **vmas);
286 261
262#define ZONE_RECLAIM_NOSCAN -2
263#define ZONE_RECLAIM_FULL -1
264#define ZONE_RECLAIM_SOME 0
265#define ZONE_RECLAIM_SUCCESS 1
287#endif 266#endif
diff --git a/mm/madvise.c b/mm/madvise.c
index b9ce574827c8..76eb4193acdd 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -123,8 +123,7 @@ static long madvise_willneed(struct vm_area_struct * vma,
123 end = vma->vm_end; 123 end = vma->vm_end;
124 end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 124 end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
125 125
126 force_page_cache_readahead(file->f_mapping, 126 force_page_cache_readahead(file->f_mapping, file, start, end - start);
127 file, start, max_sane_readahead(end - start));
128 return 0; 127 return 0;
129} 128}
130 129
@@ -239,12 +238,30 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
239 break; 238 break;
240 239
241 default: 240 default:
242 error = -EINVAL; 241 BUG();
243 break; 242 break;
244 } 243 }
245 return error; 244 return error;
246} 245}
247 246
247static int
248madvise_behavior_valid(int behavior)
249{
250 switch (behavior) {
251 case MADV_DOFORK:
252 case MADV_DONTFORK:
253 case MADV_NORMAL:
254 case MADV_SEQUENTIAL:
255 case MADV_RANDOM:
256 case MADV_REMOVE:
257 case MADV_WILLNEED:
258 case MADV_DONTNEED:
259 return 1;
260
261 default:
262 return 0;
263 }
264}
248/* 265/*
249 * The madvise(2) system call. 266 * The madvise(2) system call.
250 * 267 *
@@ -290,6 +307,9 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
290 int write; 307 int write;
291 size_t len; 308 size_t len;
292 309
310 if (!madvise_behavior_valid(behavior))
311 return error;
312
293 write = madvise_need_mmap_write(behavior); 313 write = madvise_need_mmap_write(behavior);
294 if (write) 314 if (write)
295 down_write(&current->mm->mmap_sem); 315 down_write(&current->mm->mmap_sem);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 78eb8552818b..70db6e0a5eec 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -570,6 +570,17 @@ int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
570 return 0; 570 return 0;
571} 571}
572 572
573int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
574{
575 unsigned long active;
576 unsigned long inactive;
577
578 inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_FILE);
579 active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_FILE);
580
581 return (active > inactive);
582}
583
573unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, 584unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
574 struct zone *zone, 585 struct zone *zone,
575 enum lru_list lru) 586 enum lru_list lru)
diff --git a/mm/memory.c b/mm/memory.c
index 4126dd16778c..d5d1653d60a6 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1360,6 +1360,56 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1360 return i; 1360 return i;
1361} 1361}
1362 1362
1363/**
1364 * get_user_pages() - pin user pages in memory
1365 * @tsk: task_struct of target task
1366 * @mm: mm_struct of target mm
1367 * @start: starting user address
1368 * @len: number of pages from start to pin
1369 * @write: whether pages will be written to by the caller
1370 * @force: whether to force write access even if user mapping is
1371 * readonly. This will result in the page being COWed even
1372 * in MAP_SHARED mappings. You do not want this.
1373 * @pages: array that receives pointers to the pages pinned.
1374 * Should be at least nr_pages long. Or NULL, if caller
1375 * only intends to ensure the pages are faulted in.
1376 * @vmas: array of pointers to vmas corresponding to each page.
1377 * Or NULL if the caller does not require them.
1378 *
1379 * Returns number of pages pinned. This may be fewer than the number
1380 * requested. If len is 0 or negative, returns 0. If no pages
1381 * were pinned, returns -errno. Each page returned must be released
1382 * with a put_page() call when it is finished with. vmas will only
1383 * remain valid while mmap_sem is held.
1384 *
1385 * Must be called with mmap_sem held for read or write.
1386 *
1387 * get_user_pages walks a process's page tables and takes a reference to
1388 * each struct page that each user address corresponds to at a given
1389 * instant. That is, it takes the page that would be accessed if a user
1390 * thread accesses the given user virtual address at that instant.
1391 *
1392 * This does not guarantee that the page exists in the user mappings when
1393 * get_user_pages returns, and there may even be a completely different
1394 * page there in some cases (eg. if mmapped pagecache has been invalidated
1395 * and subsequently re faulted). However it does guarantee that the page
1396 * won't be freed completely. And mostly callers simply care that the page
1397 * contains data that was valid *at some point in time*. Typically, an IO
1398 * or similar operation cannot guarantee anything stronger anyway because
1399 * locks can't be held over the syscall boundary.
1400 *
1401 * If write=0, the page must not be written to. If the page is written to,
1402 * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called
1403 * after the page is finished with, and before put_page is called.
1404 *
1405 * get_user_pages is typically used for fewer-copy IO operations, to get a
1406 * handle on the memory by some means other than accesses via the user virtual
1407 * addresses. The pages may be submitted for DMA to devices or accessed via
1408 * their kernel linear mapping (via the kmap APIs). Care should be taken to
1409 * use the correct cache flushing APIs.
1410 *
1411 * See also get_user_pages_fast, for performance critical applications.
1412 */
1363int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1413int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1364 unsigned long start, int len, int write, int force, 1414 unsigned long start, int len, int write, int force,
1365 struct page **pages, struct vm_area_struct **vmas) 1415 struct page **pages, struct vm_area_struct **vmas)
@@ -3053,22 +3103,13 @@ int in_gate_area_no_task(unsigned long addr)
3053 3103
3054#endif /* __HAVE_ARCH_GATE_AREA */ 3104#endif /* __HAVE_ARCH_GATE_AREA */
3055 3105
3056#ifdef CONFIG_HAVE_IOREMAP_PROT 3106static int follow_pte(struct mm_struct *mm, unsigned long address,
3057int follow_phys(struct vm_area_struct *vma, 3107 pte_t **ptepp, spinlock_t **ptlp)
3058 unsigned long address, unsigned int flags,
3059 unsigned long *prot, resource_size_t *phys)
3060{ 3108{
3061 pgd_t *pgd; 3109 pgd_t *pgd;
3062 pud_t *pud; 3110 pud_t *pud;
3063 pmd_t *pmd; 3111 pmd_t *pmd;
3064 pte_t *ptep, pte; 3112 pte_t *ptep;
3065 spinlock_t *ptl;
3066 resource_size_t phys_addr = 0;
3067 struct mm_struct *mm = vma->vm_mm;
3068 int ret = -EINVAL;
3069
3070 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
3071 goto out;
3072 3113
3073 pgd = pgd_offset(mm, address); 3114 pgd = pgd_offset(mm, address);
3074 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 3115 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
@@ -3086,22 +3127,71 @@ int follow_phys(struct vm_area_struct *vma,
3086 if (pmd_huge(*pmd)) 3127 if (pmd_huge(*pmd))
3087 goto out; 3128 goto out;
3088 3129
3089 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 3130 ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
3090 if (!ptep) 3131 if (!ptep)
3091 goto out; 3132 goto out;
3133 if (!pte_present(*ptep))
3134 goto unlock;
3135 *ptepp = ptep;
3136 return 0;
3137unlock:
3138 pte_unmap_unlock(ptep, *ptlp);
3139out:
3140 return -EINVAL;
3141}
3092 3142
3143/**
3144 * follow_pfn - look up PFN at a user virtual address
3145 * @vma: memory mapping
3146 * @address: user virtual address
3147 * @pfn: location to store found PFN
3148 *
3149 * Only IO mappings and raw PFN mappings are allowed.
3150 *
3151 * Returns zero and the pfn at @pfn on success, -ve otherwise.
3152 */
3153int follow_pfn(struct vm_area_struct *vma, unsigned long address,
3154 unsigned long *pfn)
3155{
3156 int ret = -EINVAL;
3157 spinlock_t *ptl;
3158 pte_t *ptep;
3159
3160 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
3161 return ret;
3162
3163 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
3164 if (ret)
3165 return ret;
3166 *pfn = pte_pfn(*ptep);
3167 pte_unmap_unlock(ptep, ptl);
3168 return 0;
3169}
3170EXPORT_SYMBOL(follow_pfn);
3171
3172#ifdef CONFIG_HAVE_IOREMAP_PROT
3173int follow_phys(struct vm_area_struct *vma,
3174 unsigned long address, unsigned int flags,
3175 unsigned long *prot, resource_size_t *phys)
3176{
3177 int ret = -EINVAL;
3178 pte_t *ptep, pte;
3179 spinlock_t *ptl;
3180
3181 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
3182 goto out;
3183
3184 if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
3185 goto out;
3093 pte = *ptep; 3186 pte = *ptep;
3094 if (!pte_present(pte)) 3187
3095 goto unlock;
3096 if ((flags & FOLL_WRITE) && !pte_write(pte)) 3188 if ((flags & FOLL_WRITE) && !pte_write(pte))
3097 goto unlock; 3189 goto unlock;
3098 phys_addr = pte_pfn(pte);
3099 phys_addr <<= PAGE_SHIFT; /* Shift here to avoid overflow on PAE */
3100 3190
3101 *prot = pgprot_val(pte_pgprot(pte)); 3191 *prot = pgprot_val(pte_pgprot(pte));
3102 *phys = phys_addr; 3192 *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
3103 ret = 0;
3104 3193
3194 ret = 0;
3105unlock: 3195unlock:
3106 pte_unmap_unlock(ptep, ptl); 3196 pte_unmap_unlock(ptep, ptl);
3107out: 3197out:
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index c083cf5fd6df..e4412a676c88 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -422,7 +422,8 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
422 zone->present_pages += onlined_pages; 422 zone->present_pages += onlined_pages;
423 zone->zone_pgdat->node_present_pages += onlined_pages; 423 zone->zone_pgdat->node_present_pages += onlined_pages;
424 424
425 setup_per_zone_pages_min(); 425 setup_per_zone_wmarks();
426 calculate_zone_inactive_ratio(zone);
426 if (onlined_pages) { 427 if (onlined_pages) {
427 kswapd_run(zone_to_nid(zone)); 428 kswapd_run(zone_to_nid(zone));
428 node_set_state(zone_to_nid(zone), N_HIGH_MEMORY); 429 node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
@@ -832,6 +833,9 @@ repeat:
832 totalram_pages -= offlined_pages; 833 totalram_pages -= offlined_pages;
833 num_physpages -= offlined_pages; 834 num_physpages -= offlined_pages;
834 835
836 setup_per_zone_wmarks();
837 calculate_zone_inactive_ratio(zone);
838
835 vm_total_pages = nr_free_pagecache_pages(); 839 vm_total_pages = nr_free_pagecache_pages();
836 writeback_set_ratelimit(); 840 writeback_set_ratelimit();
837 841
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 3eb4a6fdc043..e08e2c4da63a 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -182,13 +182,54 @@ static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
182 return 0; 182 return 0;
183} 183}
184 184
185/* Create a new policy */ 185/*
186 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
187 * any, for the new policy. mpol_new() has already validated the nodes
188 * parameter with respect to the policy mode and flags. But, we need to
189 * handle an empty nodemask with MPOL_PREFERRED here.
190 *
191 * Must be called holding task's alloc_lock to protect task's mems_allowed
192 * and mempolicy. May also be called holding the mmap_semaphore for write.
193 */
194static int mpol_set_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
195{
196 nodemask_t cpuset_context_nmask;
197 int ret;
198
199 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
200 if (pol == NULL)
201 return 0;
202
203 VM_BUG_ON(!nodes);
204 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
205 nodes = NULL; /* explicit local allocation */
206 else {
207 if (pol->flags & MPOL_F_RELATIVE_NODES)
208 mpol_relative_nodemask(&cpuset_context_nmask, nodes,
209 &cpuset_current_mems_allowed);
210 else
211 nodes_and(cpuset_context_nmask, *nodes,
212 cpuset_current_mems_allowed);
213 if (mpol_store_user_nodemask(pol))
214 pol->w.user_nodemask = *nodes;
215 else
216 pol->w.cpuset_mems_allowed =
217 cpuset_current_mems_allowed;
218 }
219
220 ret = mpol_ops[pol->mode].create(pol,
221 nodes ? &cpuset_context_nmask : NULL);
222 return ret;
223}
224
225/*
226 * This function just creates a new policy, does some check and simple
227 * initialization. You must invoke mpol_set_nodemask() to set nodes.
228 */
186static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 229static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
187 nodemask_t *nodes) 230 nodemask_t *nodes)
188{ 231{
189 struct mempolicy *policy; 232 struct mempolicy *policy;
190 nodemask_t cpuset_context_nmask;
191 int ret;
192 233
193 pr_debug("setting mode %d flags %d nodes[0] %lx\n", 234 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
194 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1); 235 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
@@ -210,7 +251,6 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
210 if (((flags & MPOL_F_STATIC_NODES) || 251 if (((flags & MPOL_F_STATIC_NODES) ||
211 (flags & MPOL_F_RELATIVE_NODES))) 252 (flags & MPOL_F_RELATIVE_NODES)))
212 return ERR_PTR(-EINVAL); 253 return ERR_PTR(-EINVAL);
213 nodes = NULL; /* flag local alloc */
214 } 254 }
215 } else if (nodes_empty(*nodes)) 255 } else if (nodes_empty(*nodes))
216 return ERR_PTR(-EINVAL); 256 return ERR_PTR(-EINVAL);
@@ -221,30 +261,6 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
221 policy->mode = mode; 261 policy->mode = mode;
222 policy->flags = flags; 262 policy->flags = flags;
223 263
224 if (nodes) {
225 /*
226 * cpuset related setup doesn't apply to local allocation
227 */
228 cpuset_update_task_memory_state();
229 if (flags & MPOL_F_RELATIVE_NODES)
230 mpol_relative_nodemask(&cpuset_context_nmask, nodes,
231 &cpuset_current_mems_allowed);
232 else
233 nodes_and(cpuset_context_nmask, *nodes,
234 cpuset_current_mems_allowed);
235 if (mpol_store_user_nodemask(policy))
236 policy->w.user_nodemask = *nodes;
237 else
238 policy->w.cpuset_mems_allowed =
239 cpuset_mems_allowed(current);
240 }
241
242 ret = mpol_ops[mode].create(policy,
243 nodes ? &cpuset_context_nmask : NULL);
244 if (ret < 0) {
245 kmem_cache_free(policy_cache, policy);
246 return ERR_PTR(ret);
247 }
248 return policy; 264 return policy;
249} 265}
250 266
@@ -324,6 +340,8 @@ static void mpol_rebind_policy(struct mempolicy *pol,
324/* 340/*
325 * Wrapper for mpol_rebind_policy() that just requires task 341 * Wrapper for mpol_rebind_policy() that just requires task
326 * pointer, and updates task mempolicy. 342 * pointer, and updates task mempolicy.
343 *
344 * Called with task's alloc_lock held.
327 */ 345 */
328 346
329void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) 347void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
@@ -600,8 +618,9 @@ static void mpol_set_task_struct_flag(void)
600static long do_set_mempolicy(unsigned short mode, unsigned short flags, 618static long do_set_mempolicy(unsigned short mode, unsigned short flags,
601 nodemask_t *nodes) 619 nodemask_t *nodes)
602{ 620{
603 struct mempolicy *new; 621 struct mempolicy *new, *old;
604 struct mm_struct *mm = current->mm; 622 struct mm_struct *mm = current->mm;
623 int ret;
605 624
606 new = mpol_new(mode, flags, nodes); 625 new = mpol_new(mode, flags, nodes);
607 if (IS_ERR(new)) 626 if (IS_ERR(new))
@@ -615,20 +634,33 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags,
615 */ 634 */
616 if (mm) 635 if (mm)
617 down_write(&mm->mmap_sem); 636 down_write(&mm->mmap_sem);
618 mpol_put(current->mempolicy); 637 task_lock(current);
638 ret = mpol_set_nodemask(new, nodes);
639 if (ret) {
640 task_unlock(current);
641 if (mm)
642 up_write(&mm->mmap_sem);
643 mpol_put(new);
644 return ret;
645 }
646 old = current->mempolicy;
619 current->mempolicy = new; 647 current->mempolicy = new;
620 mpol_set_task_struct_flag(); 648 mpol_set_task_struct_flag();
621 if (new && new->mode == MPOL_INTERLEAVE && 649 if (new && new->mode == MPOL_INTERLEAVE &&
622 nodes_weight(new->v.nodes)) 650 nodes_weight(new->v.nodes))
623 current->il_next = first_node(new->v.nodes); 651 current->il_next = first_node(new->v.nodes);
652 task_unlock(current);
624 if (mm) 653 if (mm)
625 up_write(&mm->mmap_sem); 654 up_write(&mm->mmap_sem);
626 655
656 mpol_put(old);
627 return 0; 657 return 0;
628} 658}
629 659
630/* 660/*
631 * Return nodemask for policy for get_mempolicy() query 661 * Return nodemask for policy for get_mempolicy() query
662 *
663 * Called with task's alloc_lock held
632 */ 664 */
633static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) 665static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
634{ 666{
@@ -674,7 +706,6 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
674 struct vm_area_struct *vma = NULL; 706 struct vm_area_struct *vma = NULL;
675 struct mempolicy *pol = current->mempolicy; 707 struct mempolicy *pol = current->mempolicy;
676 708
677 cpuset_update_task_memory_state();
678 if (flags & 709 if (flags &
679 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 710 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
680 return -EINVAL; 711 return -EINVAL;
@@ -683,7 +714,9 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
683 if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 714 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
684 return -EINVAL; 715 return -EINVAL;
685 *policy = 0; /* just so it's initialized */ 716 *policy = 0; /* just so it's initialized */
717 task_lock(current);
686 *nmask = cpuset_current_mems_allowed; 718 *nmask = cpuset_current_mems_allowed;
719 task_unlock(current);
687 return 0; 720 return 0;
688 } 721 }
689 722
@@ -738,8 +771,11 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
738 } 771 }
739 772
740 err = 0; 773 err = 0;
741 if (nmask) 774 if (nmask) {
775 task_lock(current);
742 get_policy_nodemask(pol, nmask); 776 get_policy_nodemask(pol, nmask);
777 task_unlock(current);
778 }
743 779
744 out: 780 out:
745 mpol_cond_put(pol); 781 mpol_cond_put(pol);
@@ -767,7 +803,7 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
767 803
768static struct page *new_node_page(struct page *page, unsigned long node, int **x) 804static struct page *new_node_page(struct page *page, unsigned long node, int **x)
769{ 805{
770 return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0); 806 return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
771} 807}
772 808
773/* 809/*
@@ -979,6 +1015,14 @@ static long do_mbind(unsigned long start, unsigned long len,
979 return err; 1015 return err;
980 } 1016 }
981 down_write(&mm->mmap_sem); 1017 down_write(&mm->mmap_sem);
1018 task_lock(current);
1019 err = mpol_set_nodemask(new, nmask);
1020 task_unlock(current);
1021 if (err) {
1022 up_write(&mm->mmap_sem);
1023 mpol_put(new);
1024 return err;
1025 }
982 vma = check_range(mm, start, end, nmask, 1026 vma = check_range(mm, start, end, nmask,
983 flags | MPOL_MF_INVERT, &pagelist); 1027 flags | MPOL_MF_INVERT, &pagelist);
984 1028
@@ -1545,8 +1589,6 @@ alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1545 struct mempolicy *pol = get_vma_policy(current, vma, addr); 1589 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1546 struct zonelist *zl; 1590 struct zonelist *zl;
1547 1591
1548 cpuset_update_task_memory_state();
1549
1550 if (unlikely(pol->mode == MPOL_INTERLEAVE)) { 1592 if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
1551 unsigned nid; 1593 unsigned nid;
1552 1594
@@ -1593,8 +1635,6 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1593{ 1635{
1594 struct mempolicy *pol = current->mempolicy; 1636 struct mempolicy *pol = current->mempolicy;
1595 1637
1596 if ((gfp & __GFP_WAIT) && !in_interrupt())
1597 cpuset_update_task_memory_state();
1598 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE)) 1638 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1599 pol = &default_policy; 1639 pol = &default_policy;
1600 1640
@@ -1854,6 +1894,8 @@ restart:
1854 */ 1894 */
1855void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 1895void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
1856{ 1896{
1897 int ret;
1898
1857 sp->root = RB_ROOT; /* empty tree == default mempolicy */ 1899 sp->root = RB_ROOT; /* empty tree == default mempolicy */
1858 spin_lock_init(&sp->lock); 1900 spin_lock_init(&sp->lock);
1859 1901
@@ -1863,9 +1905,19 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
1863 1905
1864 /* contextualize the tmpfs mount point mempolicy */ 1906 /* contextualize the tmpfs mount point mempolicy */
1865 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 1907 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
1866 mpol_put(mpol); /* drop our ref on sb mpol */ 1908 if (IS_ERR(new)) {
1867 if (IS_ERR(new)) 1909 mpol_put(mpol); /* drop our ref on sb mpol */
1868 return; /* no valid nodemask intersection */ 1910 return; /* no valid nodemask intersection */
1911 }
1912
1913 task_lock(current);
1914 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask);
1915 task_unlock(current);
1916 mpol_put(mpol); /* drop our ref on sb mpol */
1917 if (ret) {
1918 mpol_put(new);
1919 return;
1920 }
1869 1921
1870 /* Create pseudo-vma that contains just the policy */ 1922 /* Create pseudo-vma that contains just the policy */
1871 memset(&pvma, 0, sizeof(struct vm_area_struct)); 1923 memset(&pvma, 0, sizeof(struct vm_area_struct));
@@ -2086,8 +2138,19 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
2086 new = mpol_new(mode, mode_flags, &nodes); 2138 new = mpol_new(mode, mode_flags, &nodes);
2087 if (IS_ERR(new)) 2139 if (IS_ERR(new))
2088 err = 1; 2140 err = 1;
2089 else if (no_context) 2141 else {
2090 new->w.user_nodemask = nodes; /* save for contextualization */ 2142 int ret;
2143
2144 task_lock(current);
2145 ret = mpol_set_nodemask(new, &nodes);
2146 task_unlock(current);
2147 if (ret)
2148 err = 1;
2149 else if (no_context) {
2150 /* save for contextualization */
2151 new->w.user_nodemask = nodes;
2152 }
2153 }
2091 2154
2092out: 2155out:
2093 /* Restore string for error message */ 2156 /* Restore string for error message */
diff --git a/mm/migrate.c b/mm/migrate.c
index 068655d8f883..939888f9ddab 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -802,7 +802,7 @@ static struct page *new_page_node(struct page *p, unsigned long private,
802 802
803 *result = &pm->status; 803 *result = &pm->status;
804 804
805 return alloc_pages_node(pm->node, 805 return alloc_pages_exact_node(pm->node,
806 GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0); 806 GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
807} 807}
808 808
@@ -820,7 +820,6 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
820 struct page_to_node *pp; 820 struct page_to_node *pp;
821 LIST_HEAD(pagelist); 821 LIST_HEAD(pagelist);
822 822
823 migrate_prep();
824 down_read(&mm->mmap_sem); 823 down_read(&mm->mmap_sem);
825 824
826 /* 825 /*
@@ -907,6 +906,9 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
907 pm = (struct page_to_node *)__get_free_page(GFP_KERNEL); 906 pm = (struct page_to_node *)__get_free_page(GFP_KERNEL);
908 if (!pm) 907 if (!pm)
909 goto out; 908 goto out;
909
910 migrate_prep();
911
910 /* 912 /*
911 * Store a chunk of page_to_node array in a page, 913 * Store a chunk of page_to_node array in a page,
912 * but keep the last one as a marker 914 * but keep the last one as a marker
diff --git a/mm/mlock.c b/mm/mlock.c
index ac130433c7d3..45eb650b9654 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -31,7 +31,6 @@ int can_do_mlock(void)
31} 31}
32EXPORT_SYMBOL(can_do_mlock); 32EXPORT_SYMBOL(can_do_mlock);
33 33
34#ifdef CONFIG_UNEVICTABLE_LRU
35/* 34/*
36 * Mlocked pages are marked with PageMlocked() flag for efficient testing 35 * Mlocked pages are marked with PageMlocked() flag for efficient testing
37 * in vmscan and, possibly, the fault path; and to support semi-accurate 36 * in vmscan and, possibly, the fault path; and to support semi-accurate
@@ -261,27 +260,6 @@ static int __mlock_posix_error_return(long retval)
261 return retval; 260 return retval;
262} 261}
263 262
264#else /* CONFIG_UNEVICTABLE_LRU */
265
266/*
267 * Just make pages present if VM_LOCKED. No-op if unlocking.
268 */
269static long __mlock_vma_pages_range(struct vm_area_struct *vma,
270 unsigned long start, unsigned long end,
271 int mlock)
272{
273 if (mlock && (vma->vm_flags & VM_LOCKED))
274 return make_pages_present(start, end);
275 return 0;
276}
277
278static inline int __mlock_posix_error_return(long retval)
279{
280 return 0;
281}
282
283#endif /* CONFIG_UNEVICTABLE_LRU */
284
285/** 263/**
286 * mlock_vma_pages_range() - mlock pages in specified vma range. 264 * mlock_vma_pages_range() - mlock pages in specified vma range.
287 * @vma - the vma containing the specfied address range 265 * @vma - the vma containing the specfied address range
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index a7b2460e922b..175a67a78a99 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -58,6 +58,7 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
58 unsigned long points, cpu_time, run_time; 58 unsigned long points, cpu_time, run_time;
59 struct mm_struct *mm; 59 struct mm_struct *mm;
60 struct task_struct *child; 60 struct task_struct *child;
61 int oom_adj;
61 62
62 task_lock(p); 63 task_lock(p);
63 mm = p->mm; 64 mm = p->mm;
@@ -65,6 +66,11 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
65 task_unlock(p); 66 task_unlock(p);
66 return 0; 67 return 0;
67 } 68 }
69 oom_adj = mm->oom_adj;
70 if (oom_adj == OOM_DISABLE) {
71 task_unlock(p);
72 return 0;
73 }
68 74
69 /* 75 /*
70 * The memory size of the process is the basis for the badness. 76 * The memory size of the process is the basis for the badness.
@@ -148,15 +154,15 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
148 points /= 8; 154 points /= 8;
149 155
150 /* 156 /*
151 * Adjust the score by oomkilladj. 157 * Adjust the score by oom_adj.
152 */ 158 */
153 if (p->oomkilladj) { 159 if (oom_adj) {
154 if (p->oomkilladj > 0) { 160 if (oom_adj > 0) {
155 if (!points) 161 if (!points)
156 points = 1; 162 points = 1;
157 points <<= p->oomkilladj; 163 points <<= oom_adj;
158 } else 164 } else
159 points >>= -(p->oomkilladj); 165 points >>= -(oom_adj);
160 } 166 }
161 167
162#ifdef DEBUG 168#ifdef DEBUG
@@ -251,11 +257,8 @@ static struct task_struct *select_bad_process(unsigned long *ppoints,
251 *ppoints = ULONG_MAX; 257 *ppoints = ULONG_MAX;
252 } 258 }
253 259
254 if (p->oomkilladj == OOM_DISABLE)
255 continue;
256
257 points = badness(p, uptime.tv_sec); 260 points = badness(p, uptime.tv_sec);
258 if (points > *ppoints || !chosen) { 261 if (points > *ppoints) {
259 chosen = p; 262 chosen = p;
260 *ppoints = points; 263 *ppoints = points;
261 } 264 }
@@ -304,8 +307,7 @@ static void dump_tasks(const struct mem_cgroup *mem)
304 } 307 }
305 printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d %3d %s\n", 308 printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d %3d %s\n",
306 p->pid, __task_cred(p)->uid, p->tgid, mm->total_vm, 309 p->pid, __task_cred(p)->uid, p->tgid, mm->total_vm,
307 get_mm_rss(mm), (int)task_cpu(p), p->oomkilladj, 310 get_mm_rss(mm), (int)task_cpu(p), mm->oom_adj, p->comm);
308 p->comm);
309 task_unlock(p); 311 task_unlock(p);
310 } while_each_thread(g, p); 312 } while_each_thread(g, p);
311} 313}
@@ -323,11 +325,8 @@ static void __oom_kill_task(struct task_struct *p, int verbose)
323 return; 325 return;
324 } 326 }
325 327
326 if (!p->mm) { 328 if (!p->mm)
327 WARN_ON(1);
328 printk(KERN_WARNING "tried to kill an mm-less task!\n");
329 return; 329 return;
330 }
331 330
332 if (verbose) 331 if (verbose)
333 printk(KERN_ERR "Killed process %d (%s)\n", 332 printk(KERN_ERR "Killed process %d (%s)\n",
@@ -349,28 +348,13 @@ static int oom_kill_task(struct task_struct *p)
349 struct mm_struct *mm; 348 struct mm_struct *mm;
350 struct task_struct *g, *q; 349 struct task_struct *g, *q;
351 350
351 task_lock(p);
352 mm = p->mm; 352 mm = p->mm;
353 353 if (!mm || mm->oom_adj == OOM_DISABLE) {
354 /* WARNING: mm may not be dereferenced since we did not obtain its 354 task_unlock(p);
355 * value from get_task_mm(p). This is OK since all we need to do is
356 * compare mm to q->mm below.
357 *
358 * Furthermore, even if mm contains a non-NULL value, p->mm may
359 * change to NULL at any time since we do not hold task_lock(p).
360 * However, this is of no concern to us.
361 */
362
363 if (mm == NULL)
364 return 1; 355 return 1;
365 356 }
366 /* 357 task_unlock(p);
367 * Don't kill the process if any threads are set to OOM_DISABLE
368 */
369 do_each_thread(g, q) {
370 if (q->mm == mm && q->oomkilladj == OOM_DISABLE)
371 return 1;
372 } while_each_thread(g, q);
373
374 __oom_kill_task(p, 1); 358 __oom_kill_task(p, 1);
375 359
376 /* 360 /*
@@ -393,10 +377,11 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
393 struct task_struct *c; 377 struct task_struct *c;
394 378
395 if (printk_ratelimit()) { 379 if (printk_ratelimit()) {
396 printk(KERN_WARNING "%s invoked oom-killer: "
397 "gfp_mask=0x%x, order=%d, oomkilladj=%d\n",
398 current->comm, gfp_mask, order, current->oomkilladj);
399 task_lock(current); 380 task_lock(current);
381 printk(KERN_WARNING "%s invoked oom-killer: "
382 "gfp_mask=0x%x, order=%d, oom_adj=%d\n",
383 current->comm, gfp_mask, order,
384 current->mm ? current->mm->oom_adj : OOM_DISABLE);
400 cpuset_print_task_mems_allowed(current); 385 cpuset_print_task_mems_allowed(current);
401 task_unlock(current); 386 task_unlock(current);
402 dump_stack(); 387 dump_stack();
@@ -409,8 +394,9 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
409 /* 394 /*
410 * If the task is already exiting, don't alarm the sysadmin or kill 395 * If the task is already exiting, don't alarm the sysadmin or kill
411 * its children or threads, just set TIF_MEMDIE so it can die quickly 396 * its children or threads, just set TIF_MEMDIE so it can die quickly
397 * if its mm is still attached.
412 */ 398 */
413 if (p->flags & PF_EXITING) { 399 if (p->mm && (p->flags & PF_EXITING)) {
414 __oom_kill_task(p, 0); 400 __oom_kill_task(p, 0);
415 return 0; 401 return 0;
416 } 402 }
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index bb553c3e955d..7b0dcea4935b 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -265,18 +265,19 @@ static void bdi_writeout_fraction(struct backing_dev_info *bdi,
265 * This avoids exceeding the total dirty_limit when the floating averages 265 * This avoids exceeding the total dirty_limit when the floating averages
266 * fluctuate too quickly. 266 * fluctuate too quickly.
267 */ 267 */
268static void 268static void clip_bdi_dirty_limit(struct backing_dev_info *bdi,
269clip_bdi_dirty_limit(struct backing_dev_info *bdi, long dirty, long *pbdi_dirty) 269 unsigned long dirty, unsigned long *pbdi_dirty)
270{ 270{
271 long avail_dirty; 271 unsigned long avail_dirty;
272 272
273 avail_dirty = dirty - 273 avail_dirty = global_page_state(NR_FILE_DIRTY) +
274 (global_page_state(NR_FILE_DIRTY) +
275 global_page_state(NR_WRITEBACK) + 274 global_page_state(NR_WRITEBACK) +
276 global_page_state(NR_UNSTABLE_NFS) + 275 global_page_state(NR_UNSTABLE_NFS) +
277 global_page_state(NR_WRITEBACK_TEMP)); 276 global_page_state(NR_WRITEBACK_TEMP);
278 277
279 if (avail_dirty < 0) 278 if (avail_dirty < dirty)
279 avail_dirty = dirty - avail_dirty;
280 else
280 avail_dirty = 0; 281 avail_dirty = 0;
281 282
282 avail_dirty += bdi_stat(bdi, BDI_RECLAIMABLE) + 283 avail_dirty += bdi_stat(bdi, BDI_RECLAIMABLE) +
@@ -299,10 +300,10 @@ static inline void task_dirties_fraction(struct task_struct *tsk,
299 * 300 *
300 * dirty -= (dirty/8) * p_{t} 301 * dirty -= (dirty/8) * p_{t}
301 */ 302 */
302static void task_dirty_limit(struct task_struct *tsk, long *pdirty) 303static void task_dirty_limit(struct task_struct *tsk, unsigned long *pdirty)
303{ 304{
304 long numerator, denominator; 305 long numerator, denominator;
305 long dirty = *pdirty; 306 unsigned long dirty = *pdirty;
306 u64 inv = dirty >> 3; 307 u64 inv = dirty >> 3;
307 308
308 task_dirties_fraction(tsk, &numerator, &denominator); 309 task_dirties_fraction(tsk, &numerator, &denominator);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0727896a88ac..a5f3c278c573 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -162,17 +162,25 @@ static unsigned long __meminitdata dma_reserve;
162 162
163#if MAX_NUMNODES > 1 163#if MAX_NUMNODES > 1
164int nr_node_ids __read_mostly = MAX_NUMNODES; 164int nr_node_ids __read_mostly = MAX_NUMNODES;
165int nr_online_nodes __read_mostly = 1;
165EXPORT_SYMBOL(nr_node_ids); 166EXPORT_SYMBOL(nr_node_ids);
167EXPORT_SYMBOL(nr_online_nodes);
166#endif 168#endif
167 169
168int page_group_by_mobility_disabled __read_mostly; 170int page_group_by_mobility_disabled __read_mostly;
169 171
170static void set_pageblock_migratetype(struct page *page, int migratetype) 172static void set_pageblock_migratetype(struct page *page, int migratetype)
171{ 173{
174
175 if (unlikely(page_group_by_mobility_disabled))
176 migratetype = MIGRATE_UNMOVABLE;
177
172 set_pageblock_flags_group(page, (unsigned long)migratetype, 178 set_pageblock_flags_group(page, (unsigned long)migratetype,
173 PB_migrate, PB_migrate_end); 179 PB_migrate, PB_migrate_end);
174} 180}
175 181
182bool oom_killer_disabled __read_mostly;
183
176#ifdef CONFIG_DEBUG_VM 184#ifdef CONFIG_DEBUG_VM
177static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 185static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
178{ 186{
@@ -295,23 +303,6 @@ void prep_compound_page(struct page *page, unsigned long order)
295 } 303 }
296} 304}
297 305
298#ifdef CONFIG_HUGETLBFS
299void prep_compound_gigantic_page(struct page *page, unsigned long order)
300{
301 int i;
302 int nr_pages = 1 << order;
303 struct page *p = page + 1;
304
305 set_compound_page_dtor(page, free_compound_page);
306 set_compound_order(page, order);
307 __SetPageHead(page);
308 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
309 __SetPageTail(p);
310 p->first_page = page;
311 }
312}
313#endif
314
315static int destroy_compound_page(struct page *page, unsigned long order) 306static int destroy_compound_page(struct page *page, unsigned long order)
316{ 307{
317 int i; 308 int i;
@@ -418,7 +409,7 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
418 return 0; 409 return 0;
419 410
420 if (PageBuddy(buddy) && page_order(buddy) == order) { 411 if (PageBuddy(buddy) && page_order(buddy) == order) {
421 BUG_ON(page_count(buddy) != 0); 412 VM_BUG_ON(page_count(buddy) != 0);
422 return 1; 413 return 1;
423 } 414 }
424 return 0; 415 return 0;
@@ -449,22 +440,22 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
449 */ 440 */
450 441
451static inline void __free_one_page(struct page *page, 442static inline void __free_one_page(struct page *page,
452 struct zone *zone, unsigned int order) 443 struct zone *zone, unsigned int order,
444 int migratetype)
453{ 445{
454 unsigned long page_idx; 446 unsigned long page_idx;
455 int order_size = 1 << order;
456 int migratetype = get_pageblock_migratetype(page);
457 447
458 if (unlikely(PageCompound(page))) 448 if (unlikely(PageCompound(page)))
459 if (unlikely(destroy_compound_page(page, order))) 449 if (unlikely(destroy_compound_page(page, order)))
460 return; 450 return;
461 451
452 VM_BUG_ON(migratetype == -1);
453
462 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); 454 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
463 455
464 VM_BUG_ON(page_idx & (order_size - 1)); 456 VM_BUG_ON(page_idx & ((1 << order) - 1));
465 VM_BUG_ON(bad_range(zone, page)); 457 VM_BUG_ON(bad_range(zone, page));
466 458
467 __mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
468 while (order < MAX_ORDER-1) { 459 while (order < MAX_ORDER-1) {
469 unsigned long combined_idx; 460 unsigned long combined_idx;
470 struct page *buddy; 461 struct page *buddy;
@@ -488,12 +479,27 @@ static inline void __free_one_page(struct page *page,
488 zone->free_area[order].nr_free++; 479 zone->free_area[order].nr_free++;
489} 480}
490 481
482#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
483/*
484 * free_page_mlock() -- clean up attempts to free and mlocked() page.
485 * Page should not be on lru, so no need to fix that up.
486 * free_pages_check() will verify...
487 */
488static inline void free_page_mlock(struct page *page)
489{
490 __ClearPageMlocked(page);
491 __dec_zone_page_state(page, NR_MLOCK);
492 __count_vm_event(UNEVICTABLE_MLOCKFREED);
493}
494#else
495static void free_page_mlock(struct page *page) { }
496#endif
497
491static inline int free_pages_check(struct page *page) 498static inline int free_pages_check(struct page *page)
492{ 499{
493 free_page_mlock(page);
494 if (unlikely(page_mapcount(page) | 500 if (unlikely(page_mapcount(page) |
495 (page->mapping != NULL) | 501 (page->mapping != NULL) |
496 (page_count(page) != 0) | 502 (atomic_read(&page->_count) != 0) |
497 (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) { 503 (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
498 bad_page(page); 504 bad_page(page);
499 return 1; 505 return 1;
@@ -520,6 +526,8 @@ static void free_pages_bulk(struct zone *zone, int count,
520 spin_lock(&zone->lock); 526 spin_lock(&zone->lock);
521 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); 527 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
522 zone->pages_scanned = 0; 528 zone->pages_scanned = 0;
529
530 __mod_zone_page_state(zone, NR_FREE_PAGES, count << order);
523 while (count--) { 531 while (count--) {
524 struct page *page; 532 struct page *page;
525 533
@@ -527,17 +535,20 @@ static void free_pages_bulk(struct zone *zone, int count,
527 page = list_entry(list->prev, struct page, lru); 535 page = list_entry(list->prev, struct page, lru);
528 /* have to delete it as __free_one_page list manipulates */ 536 /* have to delete it as __free_one_page list manipulates */
529 list_del(&page->lru); 537 list_del(&page->lru);
530 __free_one_page(page, zone, order); 538 __free_one_page(page, zone, order, page_private(page));
531 } 539 }
532 spin_unlock(&zone->lock); 540 spin_unlock(&zone->lock);
533} 541}
534 542
535static void free_one_page(struct zone *zone, struct page *page, int order) 543static void free_one_page(struct zone *zone, struct page *page, int order,
544 int migratetype)
536{ 545{
537 spin_lock(&zone->lock); 546 spin_lock(&zone->lock);
538 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); 547 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
539 zone->pages_scanned = 0; 548 zone->pages_scanned = 0;
540 __free_one_page(page, zone, order); 549
550 __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
551 __free_one_page(page, zone, order, migratetype);
541 spin_unlock(&zone->lock); 552 spin_unlock(&zone->lock);
542} 553}
543 554
@@ -546,6 +557,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
546 unsigned long flags; 557 unsigned long flags;
547 int i; 558 int i;
548 int bad = 0; 559 int bad = 0;
560 int clearMlocked = PageMlocked(page);
549 561
550 kmemcheck_free_shadow(page, order); 562 kmemcheck_free_shadow(page, order);
551 563
@@ -563,8 +575,11 @@ static void __free_pages_ok(struct page *page, unsigned int order)
563 kernel_map_pages(page, 1 << order, 0); 575 kernel_map_pages(page, 1 << order, 0);
564 576
565 local_irq_save(flags); 577 local_irq_save(flags);
578 if (unlikely(clearMlocked))
579 free_page_mlock(page);
566 __count_vm_events(PGFREE, 1 << order); 580 __count_vm_events(PGFREE, 1 << order);
567 free_one_page(page_zone(page), page, order); 581 free_one_page(page_zone(page), page, order,
582 get_pageblock_migratetype(page));
568 local_irq_restore(flags); 583 local_irq_restore(flags);
569} 584}
570 585
@@ -635,7 +650,7 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
635{ 650{
636 if (unlikely(page_mapcount(page) | 651 if (unlikely(page_mapcount(page) |
637 (page->mapping != NULL) | 652 (page->mapping != NULL) |
638 (page_count(page) != 0) | 653 (atomic_read(&page->_count) != 0) |
639 (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) { 654 (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
640 bad_page(page); 655 bad_page(page);
641 return 1; 656 return 1;
@@ -660,7 +675,8 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
660 * Go through the free lists for the given migratetype and remove 675 * Go through the free lists for the given migratetype and remove
661 * the smallest available page from the freelists 676 * the smallest available page from the freelists
662 */ 677 */
663static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 678static inline
679struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
664 int migratetype) 680 int migratetype)
665{ 681{
666 unsigned int current_order; 682 unsigned int current_order;
@@ -678,7 +694,6 @@ static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
678 list_del(&page->lru); 694 list_del(&page->lru);
679 rmv_page_order(page); 695 rmv_page_order(page);
680 area->nr_free--; 696 area->nr_free--;
681 __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
682 expand(zone, page, order, current_order, area, migratetype); 697 expand(zone, page, order, current_order, area, migratetype);
683 return page; 698 return page;
684 } 699 }
@@ -769,8 +784,8 @@ static int move_freepages_block(struct zone *zone, struct page *page,
769} 784}
770 785
771/* Remove an element from the buddy allocator from the fallback list */ 786/* Remove an element from the buddy allocator from the fallback list */
772static struct page *__rmqueue_fallback(struct zone *zone, int order, 787static inline struct page *
773 int start_migratetype) 788__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
774{ 789{
775 struct free_area * area; 790 struct free_area * area;
776 int current_order; 791 int current_order;
@@ -818,8 +833,6 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,
818 /* Remove the page from the freelists */ 833 /* Remove the page from the freelists */
819 list_del(&page->lru); 834 list_del(&page->lru);
820 rmv_page_order(page); 835 rmv_page_order(page);
821 __mod_zone_page_state(zone, NR_FREE_PAGES,
822 -(1UL << order));
823 836
824 if (current_order == pageblock_order) 837 if (current_order == pageblock_order)
825 set_pageblock_migratetype(page, 838 set_pageblock_migratetype(page,
@@ -830,8 +843,7 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,
830 } 843 }
831 } 844 }
832 845
833 /* Use MIGRATE_RESERVE rather than fail an allocation */ 846 return NULL;
834 return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);
835} 847}
836 848
837/* 849/*
@@ -843,11 +855,23 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order,
843{ 855{
844 struct page *page; 856 struct page *page;
845 857
858retry_reserve:
846 page = __rmqueue_smallest(zone, order, migratetype); 859 page = __rmqueue_smallest(zone, order, migratetype);
847 860
848 if (unlikely(!page)) 861 if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
849 page = __rmqueue_fallback(zone, order, migratetype); 862 page = __rmqueue_fallback(zone, order, migratetype);
850 863
864 /*
865 * Use MIGRATE_RESERVE rather than fail an allocation. goto
866 * is used because __rmqueue_smallest is an inline function
867 * and we want just one call site
868 */
869 if (!page) {
870 migratetype = MIGRATE_RESERVE;
871 goto retry_reserve;
872 }
873 }
874
851 return page; 875 return page;
852} 876}
853 877
@@ -881,6 +905,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
881 set_page_private(page, migratetype); 905 set_page_private(page, migratetype);
882 list = &page->lru; 906 list = &page->lru;
883 } 907 }
908 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
884 spin_unlock(&zone->lock); 909 spin_unlock(&zone->lock);
885 return i; 910 return i;
886} 911}
@@ -996,6 +1021,7 @@ static void free_hot_cold_page(struct page *page, int cold)
996 struct zone *zone = page_zone(page); 1021 struct zone *zone = page_zone(page);
997 struct per_cpu_pages *pcp; 1022 struct per_cpu_pages *pcp;
998 unsigned long flags; 1023 unsigned long flags;
1024 int clearMlocked = PageMlocked(page);
999 1025
1000 kmemcheck_free_shadow(page, 0); 1026 kmemcheck_free_shadow(page, 0);
1001 1027
@@ -1012,13 +1038,16 @@ static void free_hot_cold_page(struct page *page, int cold)
1012 kernel_map_pages(page, 1, 0); 1038 kernel_map_pages(page, 1, 0);
1013 1039
1014 pcp = &zone_pcp(zone, get_cpu())->pcp; 1040 pcp = &zone_pcp(zone, get_cpu())->pcp;
1041 set_page_private(page, get_pageblock_migratetype(page));
1015 local_irq_save(flags); 1042 local_irq_save(flags);
1043 if (unlikely(clearMlocked))
1044 free_page_mlock(page);
1016 __count_vm_event(PGFREE); 1045 __count_vm_event(PGFREE);
1046
1017 if (cold) 1047 if (cold)
1018 list_add_tail(&page->lru, &pcp->list); 1048 list_add_tail(&page->lru, &pcp->list);
1019 else 1049 else
1020 list_add(&page->lru, &pcp->list); 1050 list_add(&page->lru, &pcp->list);
1021 set_page_private(page, get_pageblock_migratetype(page));
1022 pcp->count++; 1051 pcp->count++;
1023 if (pcp->count >= pcp->high) { 1052 if (pcp->count >= pcp->high) {
1024 free_pages_bulk(zone, pcp->batch, &pcp->list, 0); 1053 free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
@@ -1071,14 +1100,15 @@ void split_page(struct page *page, unsigned int order)
1071 * we cheat by calling it from here, in the order > 0 path. Saves a branch 1100 * we cheat by calling it from here, in the order > 0 path. Saves a branch
1072 * or two. 1101 * or two.
1073 */ 1102 */
1074static struct page *buffered_rmqueue(struct zone *preferred_zone, 1103static inline
1075 struct zone *zone, int order, gfp_t gfp_flags) 1104struct page *buffered_rmqueue(struct zone *preferred_zone,
1105 struct zone *zone, int order, gfp_t gfp_flags,
1106 int migratetype)
1076{ 1107{
1077 unsigned long flags; 1108 unsigned long flags;
1078 struct page *page; 1109 struct page *page;
1079 int cold = !!(gfp_flags & __GFP_COLD); 1110 int cold = !!(gfp_flags & __GFP_COLD);
1080 int cpu; 1111 int cpu;
1081 int migratetype = allocflags_to_migratetype(gfp_flags);
1082 1112
1083again: 1113again:
1084 cpu = get_cpu(); 1114 cpu = get_cpu();
@@ -1115,8 +1145,22 @@ again:
1115 list_del(&page->lru); 1145 list_del(&page->lru);
1116 pcp->count--; 1146 pcp->count--;
1117 } else { 1147 } else {
1148 if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1149 /*
1150 * __GFP_NOFAIL is not to be used in new code.
1151 *
1152 * All __GFP_NOFAIL callers should be fixed so that they
1153 * properly detect and handle allocation failures.
1154 *
1155 * We most definitely don't want callers attempting to
1156 * allocate greater than single-page units with
1157 * __GFP_NOFAIL.
1158 */
1159 WARN_ON_ONCE(order > 0);
1160 }
1118 spin_lock_irqsave(&zone->lock, flags); 1161 spin_lock_irqsave(&zone->lock, flags);
1119 page = __rmqueue(zone, order, migratetype); 1162 page = __rmqueue(zone, order, migratetype);
1163 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1120 spin_unlock(&zone->lock); 1164 spin_unlock(&zone->lock);
1121 if (!page) 1165 if (!page)
1122 goto failed; 1166 goto failed;
@@ -1138,10 +1182,15 @@ failed:
1138 return NULL; 1182 return NULL;
1139} 1183}
1140 1184
1141#define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */ 1185/* The ALLOC_WMARK bits are used as an index to zone->watermark */
1142#define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */ 1186#define ALLOC_WMARK_MIN WMARK_MIN
1143#define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */ 1187#define ALLOC_WMARK_LOW WMARK_LOW
1144#define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */ 1188#define ALLOC_WMARK_HIGH WMARK_HIGH
1189#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
1190
1191/* Mask to get the watermark bits */
1192#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
1193
1145#define ALLOC_HARDER 0x10 /* try to alloc harder */ 1194#define ALLOC_HARDER 0x10 /* try to alloc harder */
1146#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ 1195#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
1147#define ALLOC_CPUSET 0x40 /* check for correct cpuset */ 1196#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
@@ -1399,23 +1448,18 @@ static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1399 */ 1448 */
1400static struct page * 1449static struct page *
1401get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, 1450get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1402 struct zonelist *zonelist, int high_zoneidx, int alloc_flags) 1451 struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
1452 struct zone *preferred_zone, int migratetype)
1403{ 1453{
1404 struct zoneref *z; 1454 struct zoneref *z;
1405 struct page *page = NULL; 1455 struct page *page = NULL;
1406 int classzone_idx; 1456 int classzone_idx;
1407 struct zone *zone, *preferred_zone; 1457 struct zone *zone;
1408 nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */ 1458 nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1409 int zlc_active = 0; /* set if using zonelist_cache */ 1459 int zlc_active = 0; /* set if using zonelist_cache */
1410 int did_zlc_setup = 0; /* just call zlc_setup() one time */ 1460 int did_zlc_setup = 0; /* just call zlc_setup() one time */
1411 1461
1412 (void)first_zones_zonelist(zonelist, high_zoneidx, nodemask,
1413 &preferred_zone);
1414 if (!preferred_zone)
1415 return NULL;
1416
1417 classzone_idx = zone_idx(preferred_zone); 1462 classzone_idx = zone_idx(preferred_zone);
1418
1419zonelist_scan: 1463zonelist_scan:
1420 /* 1464 /*
1421 * Scan zonelist, looking for a zone with enough free. 1465 * Scan zonelist, looking for a zone with enough free.
@@ -1430,31 +1474,49 @@ zonelist_scan:
1430 !cpuset_zone_allowed_softwall(zone, gfp_mask)) 1474 !cpuset_zone_allowed_softwall(zone, gfp_mask))
1431 goto try_next_zone; 1475 goto try_next_zone;
1432 1476
1477 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
1433 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) { 1478 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1434 unsigned long mark; 1479 unsigned long mark;
1435 if (alloc_flags & ALLOC_WMARK_MIN) 1480 int ret;
1436 mark = zone->pages_min; 1481
1437 else if (alloc_flags & ALLOC_WMARK_LOW) 1482 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
1438 mark = zone->pages_low; 1483 if (zone_watermark_ok(zone, order, mark,
1439 else 1484 classzone_idx, alloc_flags))
1440 mark = zone->pages_high; 1485 goto try_this_zone;
1441 if (!zone_watermark_ok(zone, order, mark, 1486
1442 classzone_idx, alloc_flags)) { 1487 if (zone_reclaim_mode == 0)
1443 if (!zone_reclaim_mode || 1488 goto this_zone_full;
1444 !zone_reclaim(zone, gfp_mask, order)) 1489
1490 ret = zone_reclaim(zone, gfp_mask, order);
1491 switch (ret) {
1492 case ZONE_RECLAIM_NOSCAN:
1493 /* did not scan */
1494 goto try_next_zone;
1495 case ZONE_RECLAIM_FULL:
1496 /* scanned but unreclaimable */
1497 goto this_zone_full;
1498 default:
1499 /* did we reclaim enough */
1500 if (!zone_watermark_ok(zone, order, mark,
1501 classzone_idx, alloc_flags))
1445 goto this_zone_full; 1502 goto this_zone_full;
1446 } 1503 }
1447 } 1504 }
1448 1505
1449 page = buffered_rmqueue(preferred_zone, zone, order, gfp_mask); 1506try_this_zone:
1507 page = buffered_rmqueue(preferred_zone, zone, order,
1508 gfp_mask, migratetype);
1450 if (page) 1509 if (page)
1451 break; 1510 break;
1452this_zone_full: 1511this_zone_full:
1453 if (NUMA_BUILD) 1512 if (NUMA_BUILD)
1454 zlc_mark_zone_full(zonelist, z); 1513 zlc_mark_zone_full(zonelist, z);
1455try_next_zone: 1514try_next_zone:
1456 if (NUMA_BUILD && !did_zlc_setup) { 1515 if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
1457 /* we do zlc_setup after the first zone is tried */ 1516 /*
1517 * we do zlc_setup after the first zone is tried but only
1518 * if there are multiple nodes make it worthwhile
1519 */
1458 allowednodes = zlc_setup(zonelist, alloc_flags); 1520 allowednodes = zlc_setup(zonelist, alloc_flags);
1459 zlc_active = 1; 1521 zlc_active = 1;
1460 did_zlc_setup = 1; 1522 did_zlc_setup = 1;
@@ -1469,47 +1531,217 @@ try_next_zone:
1469 return page; 1531 return page;
1470} 1532}
1471 1533
1534static inline int
1535should_alloc_retry(gfp_t gfp_mask, unsigned int order,
1536 unsigned long pages_reclaimed)
1537{
1538 /* Do not loop if specifically requested */
1539 if (gfp_mask & __GFP_NORETRY)
1540 return 0;
1541
1542 /*
1543 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
1544 * means __GFP_NOFAIL, but that may not be true in other
1545 * implementations.
1546 */
1547 if (order <= PAGE_ALLOC_COSTLY_ORDER)
1548 return 1;
1549
1550 /*
1551 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
1552 * specified, then we retry until we no longer reclaim any pages
1553 * (above), or we've reclaimed an order of pages at least as
1554 * large as the allocation's order. In both cases, if the
1555 * allocation still fails, we stop retrying.
1556 */
1557 if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
1558 return 1;
1559
1560 /*
1561 * Don't let big-order allocations loop unless the caller
1562 * explicitly requests that.
1563 */
1564 if (gfp_mask & __GFP_NOFAIL)
1565 return 1;
1566
1567 return 0;
1568}
1569
1570static inline struct page *
1571__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1572 struct zonelist *zonelist, enum zone_type high_zoneidx,
1573 nodemask_t *nodemask, struct zone *preferred_zone,
1574 int migratetype)
1575{
1576 struct page *page;
1577
1578 /* Acquire the OOM killer lock for the zones in zonelist */
1579 if (!try_set_zone_oom(zonelist, gfp_mask)) {
1580 schedule_timeout_uninterruptible(1);
1581 return NULL;
1582 }
1583
1584 /*
1585 * Go through the zonelist yet one more time, keep very high watermark
1586 * here, this is only to catch a parallel oom killing, we must fail if
1587 * we're still under heavy pressure.
1588 */
1589 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1590 order, zonelist, high_zoneidx,
1591 ALLOC_WMARK_HIGH|ALLOC_CPUSET,
1592 preferred_zone, migratetype);
1593 if (page)
1594 goto out;
1595
1596 /* The OOM killer will not help higher order allocs */
1597 if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_NOFAIL))
1598 goto out;
1599
1600 /* Exhausted what can be done so it's blamo time */
1601 out_of_memory(zonelist, gfp_mask, order);
1602
1603out:
1604 clear_zonelist_oom(zonelist, gfp_mask);
1605 return page;
1606}
1607
1608/* The really slow allocator path where we enter direct reclaim */
1609static inline struct page *
1610__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1611 struct zonelist *zonelist, enum zone_type high_zoneidx,
1612 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1613 int migratetype, unsigned long *did_some_progress)
1614{
1615 struct page *page = NULL;
1616 struct reclaim_state reclaim_state;
1617 struct task_struct *p = current;
1618
1619 cond_resched();
1620
1621 /* We now go into synchronous reclaim */
1622 cpuset_memory_pressure_bump();
1623
1624 /*
1625 * The task's cpuset might have expanded its set of allowable nodes
1626 */
1627 p->flags |= PF_MEMALLOC;
1628 lockdep_set_current_reclaim_state(gfp_mask);
1629 reclaim_state.reclaimed_slab = 0;
1630 p->reclaim_state = &reclaim_state;
1631
1632 *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
1633
1634 p->reclaim_state = NULL;
1635 lockdep_clear_current_reclaim_state();
1636 p->flags &= ~PF_MEMALLOC;
1637
1638 cond_resched();
1639
1640 if (order != 0)
1641 drain_all_pages();
1642
1643 if (likely(*did_some_progress))
1644 page = get_page_from_freelist(gfp_mask, nodemask, order,
1645 zonelist, high_zoneidx,
1646 alloc_flags, preferred_zone,
1647 migratetype);
1648 return page;
1649}
1650
1472/* 1651/*
1473 * This is the 'heart' of the zoned buddy allocator. 1652 * This is called in the allocator slow-path if the allocation request is of
1653 * sufficient urgency to ignore watermarks and take other desperate measures
1474 */ 1654 */
1475struct page * 1655static inline struct page *
1476__alloc_pages_internal(gfp_t gfp_mask, unsigned int order, 1656__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
1477 struct zonelist *zonelist, nodemask_t *nodemask) 1657 struct zonelist *zonelist, enum zone_type high_zoneidx,
1658 nodemask_t *nodemask, struct zone *preferred_zone,
1659 int migratetype)
1660{
1661 struct page *page;
1662
1663 do {
1664 page = get_page_from_freelist(gfp_mask, nodemask, order,
1665 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
1666 preferred_zone, migratetype);
1667
1668 if (!page && gfp_mask & __GFP_NOFAIL)
1669 congestion_wait(WRITE, HZ/50);
1670 } while (!page && (gfp_mask & __GFP_NOFAIL));
1671
1672 return page;
1673}
1674
1675static inline
1676void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
1677 enum zone_type high_zoneidx)
1478{ 1678{
1479 const gfp_t wait = gfp_mask & __GFP_WAIT;
1480 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
1481 struct zoneref *z; 1679 struct zoneref *z;
1482 struct zone *zone; 1680 struct zone *zone;
1483 struct page *page;
1484 struct reclaim_state reclaim_state;
1485 struct task_struct *p = current;
1486 int do_retry;
1487 int alloc_flags;
1488 unsigned long did_some_progress;
1489 unsigned long pages_reclaimed = 0;
1490 1681
1491 lockdep_trace_alloc(gfp_mask); 1682 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
1683 wakeup_kswapd(zone, order);
1684}
1492 1685
1493 might_sleep_if(wait); 1686static inline int
1687gfp_to_alloc_flags(gfp_t gfp_mask)
1688{
1689 struct task_struct *p = current;
1690 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
1691 const gfp_t wait = gfp_mask & __GFP_WAIT;
1494 1692
1495 if (should_fail_alloc_page(gfp_mask, order)) 1693 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
1496 return NULL; 1694 BUILD_BUG_ON(__GFP_HIGH != ALLOC_HIGH);
1497 1695
1498restart: 1696 /*
1499 z = zonelist->_zonerefs; /* the list of zones suitable for gfp_mask */ 1697 * The caller may dip into page reserves a bit more if the caller
1698 * cannot run direct reclaim, or if the caller has realtime scheduling
1699 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
1700 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
1701 */
1702 alloc_flags |= (gfp_mask & __GFP_HIGH);
1500 1703
1501 if (unlikely(!z->zone)) { 1704 if (!wait) {
1705 alloc_flags |= ALLOC_HARDER;
1502 /* 1706 /*
1503 * Happens if we have an empty zonelist as a result of 1707 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
1504 * GFP_THISNODE being used on a memoryless node 1708 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1505 */ 1709 */
1506 return NULL; 1710 alloc_flags &= ~ALLOC_CPUSET;
1711 } else if (unlikely(rt_task(p)))
1712 alloc_flags |= ALLOC_HARDER;
1713
1714 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
1715 if (!in_interrupt() &&
1716 ((p->flags & PF_MEMALLOC) ||
1717 unlikely(test_thread_flag(TIF_MEMDIE))))
1718 alloc_flags |= ALLOC_NO_WATERMARKS;
1507 } 1719 }
1508 1720
1509 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order, 1721 return alloc_flags;
1510 zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET); 1722}
1511 if (page) 1723
1512 goto got_pg; 1724static inline struct page *
1725__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
1726 struct zonelist *zonelist, enum zone_type high_zoneidx,
1727 nodemask_t *nodemask, struct zone *preferred_zone,
1728 int migratetype)
1729{
1730 const gfp_t wait = gfp_mask & __GFP_WAIT;
1731 struct page *page = NULL;
1732 int alloc_flags;
1733 unsigned long pages_reclaimed = 0;
1734 unsigned long did_some_progress;
1735 struct task_struct *p = current;
1736
1737 /*
1738 * In the slowpath, we sanity check order to avoid ever trying to
1739 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
1740 * be using allocators in order of preference for an area that is
1741 * too large.
1742 */
1743 if (WARN_ON_ONCE(order >= MAX_ORDER))
1744 return NULL;
1513 1745
1514 /* 1746 /*
1515 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and 1747 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
@@ -1522,154 +1754,83 @@ restart:
1522 if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE) 1754 if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
1523 goto nopage; 1755 goto nopage;
1524 1756
1525 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) 1757 wake_all_kswapd(order, zonelist, high_zoneidx);
1526 wakeup_kswapd(zone, order);
1527 1758
1528 /* 1759 /*
1529 * OK, we're below the kswapd watermark and have kicked background 1760 * OK, we're below the kswapd watermark and have kicked background
1530 * reclaim. Now things get more complex, so set up alloc_flags according 1761 * reclaim. Now things get more complex, so set up alloc_flags according
1531 * to how we want to proceed. 1762 * to how we want to proceed.
1532 *
1533 * The caller may dip into page reserves a bit more if the caller
1534 * cannot run direct reclaim, or if the caller has realtime scheduling
1535 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
1536 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
1537 */ 1763 */
1538 alloc_flags = ALLOC_WMARK_MIN; 1764 alloc_flags = gfp_to_alloc_flags(gfp_mask);
1539 if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
1540 alloc_flags |= ALLOC_HARDER;
1541 if (gfp_mask & __GFP_HIGH)
1542 alloc_flags |= ALLOC_HIGH;
1543 if (wait)
1544 alloc_flags |= ALLOC_CPUSET;
1545 1765
1546 /* 1766restart:
1547 * Go through the zonelist again. Let __GFP_HIGH and allocations 1767 /* This is the last chance, in general, before the goto nopage. */
1548 * coming from realtime tasks go deeper into reserves.
1549 *
1550 * This is the last chance, in general, before the goto nopage.
1551 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
1552 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1553 */
1554 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, 1768 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
1555 high_zoneidx, alloc_flags); 1769 high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
1770 preferred_zone, migratetype);
1556 if (page) 1771 if (page)
1557 goto got_pg; 1772 goto got_pg;
1558 1773
1559 /* This allocation should allow future memory freeing. */
1560
1561rebalance: 1774rebalance:
1562 if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE))) 1775 /* Allocate without watermarks if the context allows */
1563 && !in_interrupt()) { 1776 if (alloc_flags & ALLOC_NO_WATERMARKS) {
1564 if (!(gfp_mask & __GFP_NOMEMALLOC)) { 1777 page = __alloc_pages_high_priority(gfp_mask, order,
1565nofail_alloc: 1778 zonelist, high_zoneidx, nodemask,
1566 /* go through the zonelist yet again, ignoring mins */ 1779 preferred_zone, migratetype);
1567 page = get_page_from_freelist(gfp_mask, nodemask, order, 1780 if (page)
1568 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS); 1781 goto got_pg;
1569 if (page)
1570 goto got_pg;
1571 if (gfp_mask & __GFP_NOFAIL) {
1572 congestion_wait(WRITE, HZ/50);
1573 goto nofail_alloc;
1574 }
1575 }
1576 goto nopage;
1577 } 1782 }
1578 1783
1579 /* Atomic allocations - we can't balance anything */ 1784 /* Atomic allocations - we can't balance anything */
1580 if (!wait) 1785 if (!wait)
1581 goto nopage; 1786 goto nopage;
1582 1787
1583 cond_resched(); 1788 /* Avoid recursion of direct reclaim */
1789 if (p->flags & PF_MEMALLOC)
1790 goto nopage;
1791
1792 /* Try direct reclaim and then allocating */
1793 page = __alloc_pages_direct_reclaim(gfp_mask, order,
1794 zonelist, high_zoneidx,
1795 nodemask,
1796 alloc_flags, preferred_zone,
1797 migratetype, &did_some_progress);
1798 if (page)
1799 goto got_pg;
1584 1800
1585 /* We now go into synchronous reclaim */
1586 cpuset_memory_pressure_bump();
1587 /* 1801 /*
1588 * The task's cpuset might have expanded its set of allowable nodes 1802 * If we failed to make any progress reclaiming, then we are
1803 * running out of options and have to consider going OOM
1589 */ 1804 */
1590 cpuset_update_task_memory_state(); 1805 if (!did_some_progress) {
1591 p->flags |= PF_MEMALLOC; 1806 if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
1592 1807 if (oom_killer_disabled)
1593 lockdep_set_current_reclaim_state(gfp_mask); 1808 goto nopage;
1594 reclaim_state.reclaimed_slab = 0; 1809 page = __alloc_pages_may_oom(gfp_mask, order,
1595 p->reclaim_state = &reclaim_state; 1810 zonelist, high_zoneidx,
1596 1811 nodemask, preferred_zone,
1597 did_some_progress = try_to_free_pages(zonelist, order, 1812 migratetype);
1598 gfp_mask, nodemask); 1813 if (page)
1599 1814 goto got_pg;
1600 p->reclaim_state = NULL;
1601 lockdep_clear_current_reclaim_state();
1602 p->flags &= ~PF_MEMALLOC;
1603
1604 cond_resched();
1605 1815
1606 if (order != 0) 1816 /*
1607 drain_all_pages(); 1817 * The OOM killer does not trigger for high-order
1818 * ~__GFP_NOFAIL allocations so if no progress is being
1819 * made, there are no other options and retrying is
1820 * unlikely to help.
1821 */
1822 if (order > PAGE_ALLOC_COSTLY_ORDER &&
1823 !(gfp_mask & __GFP_NOFAIL))
1824 goto nopage;
1608 1825
1609 if (likely(did_some_progress)) {
1610 page = get_page_from_freelist(gfp_mask, nodemask, order,
1611 zonelist, high_zoneidx, alloc_flags);
1612 if (page)
1613 goto got_pg;
1614 } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
1615 if (!try_set_zone_oom(zonelist, gfp_mask)) {
1616 schedule_timeout_uninterruptible(1);
1617 goto restart; 1826 goto restart;
1618 } 1827 }
1619
1620 /*
1621 * Go through the zonelist yet one more time, keep
1622 * very high watermark here, this is only to catch
1623 * a parallel oom killing, we must fail if we're still
1624 * under heavy pressure.
1625 */
1626 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1627 order, zonelist, high_zoneidx,
1628 ALLOC_WMARK_HIGH|ALLOC_CPUSET);
1629 if (page) {
1630 clear_zonelist_oom(zonelist, gfp_mask);
1631 goto got_pg;
1632 }
1633
1634 /* The OOM killer will not help higher order allocs so fail */
1635 if (order > PAGE_ALLOC_COSTLY_ORDER) {
1636 clear_zonelist_oom(zonelist, gfp_mask);
1637 goto nopage;
1638 }
1639
1640 out_of_memory(zonelist, gfp_mask, order);
1641 clear_zonelist_oom(zonelist, gfp_mask);
1642 goto restart;
1643 } 1828 }
1644 1829
1645 /* 1830 /* Check if we should retry the allocation */
1646 * Don't let big-order allocations loop unless the caller explicitly
1647 * requests that. Wait for some write requests to complete then retry.
1648 *
1649 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
1650 * means __GFP_NOFAIL, but that may not be true in other
1651 * implementations.
1652 *
1653 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
1654 * specified, then we retry until we no longer reclaim any pages
1655 * (above), or we've reclaimed an order of pages at least as
1656 * large as the allocation's order. In both cases, if the
1657 * allocation still fails, we stop retrying.
1658 */
1659 pages_reclaimed += did_some_progress; 1831 pages_reclaimed += did_some_progress;
1660 do_retry = 0; 1832 if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
1661 if (!(gfp_mask & __GFP_NORETRY)) { 1833 /* Wait for some write requests to complete then retry */
1662 if (order <= PAGE_ALLOC_COSTLY_ORDER) {
1663 do_retry = 1;
1664 } else {
1665 if (gfp_mask & __GFP_REPEAT &&
1666 pages_reclaimed < (1 << order))
1667 do_retry = 1;
1668 }
1669 if (gfp_mask & __GFP_NOFAIL)
1670 do_retry = 1;
1671 }
1672 if (do_retry) {
1673 congestion_wait(WRITE, HZ/50); 1834 congestion_wait(WRITE, HZ/50);
1674 goto rebalance; 1835 goto rebalance;
1675 } 1836 }
@@ -1687,8 +1848,53 @@ got_pg:
1687 if (kmemcheck_enabled) 1848 if (kmemcheck_enabled)
1688 kmemcheck_pagealloc_alloc(page, order, gfp_mask); 1849 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
1689 return page; 1850 return page;
1851
1852}
1853
1854/*
1855 * This is the 'heart' of the zoned buddy allocator.
1856 */
1857struct page *
1858__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
1859 struct zonelist *zonelist, nodemask_t *nodemask)
1860{
1861 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
1862 struct zone *preferred_zone;
1863 struct page *page;
1864 int migratetype = allocflags_to_migratetype(gfp_mask);
1865
1866 lockdep_trace_alloc(gfp_mask);
1867
1868 might_sleep_if(gfp_mask & __GFP_WAIT);
1869
1870 if (should_fail_alloc_page(gfp_mask, order))
1871 return NULL;
1872
1873 /*
1874 * Check the zones suitable for the gfp_mask contain at least one
1875 * valid zone. It's possible to have an empty zonelist as a result
1876 * of GFP_THISNODE and a memoryless node
1877 */
1878 if (unlikely(!zonelist->_zonerefs->zone))
1879 return NULL;
1880
1881 /* The preferred zone is used for statistics later */
1882 first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);
1883 if (!preferred_zone)
1884 return NULL;
1885
1886 /* First allocation attempt */
1887 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
1888 zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
1889 preferred_zone, migratetype);
1890 if (unlikely(!page))
1891 page = __alloc_pages_slowpath(gfp_mask, order,
1892 zonelist, high_zoneidx, nodemask,
1893 preferred_zone, migratetype);
1894
1895 return page;
1690} 1896}
1691EXPORT_SYMBOL(__alloc_pages_internal); 1897EXPORT_SYMBOL(__alloc_pages_nodemask);
1692 1898
1693/* 1899/*
1694 * Common helper functions. 1900 * Common helper functions.
@@ -1817,7 +2023,7 @@ static unsigned int nr_free_zone_pages(int offset)
1817 2023
1818 for_each_zone_zonelist(zone, z, zonelist, offset) { 2024 for_each_zone_zonelist(zone, z, zonelist, offset) {
1819 unsigned long size = zone->present_pages; 2025 unsigned long size = zone->present_pages;
1820 unsigned long high = zone->pages_high; 2026 unsigned long high = high_wmark_pages(zone);
1821 if (size > high) 2027 if (size > high)
1822 sum += size - high; 2028 sum += size - high;
1823 } 2029 }
@@ -1909,19 +2115,14 @@ void show_free_areas(void)
1909 2115
1910 printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n" 2116 printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n"
1911 " inactive_file:%lu" 2117 " inactive_file:%lu"
1912//TODO: check/adjust line lengths
1913#ifdef CONFIG_UNEVICTABLE_LRU
1914 " unevictable:%lu" 2118 " unevictable:%lu"
1915#endif
1916 " dirty:%lu writeback:%lu unstable:%lu\n" 2119 " dirty:%lu writeback:%lu unstable:%lu\n"
1917 " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n", 2120 " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
1918 global_page_state(NR_ACTIVE_ANON), 2121 global_page_state(NR_ACTIVE_ANON),
1919 global_page_state(NR_ACTIVE_FILE), 2122 global_page_state(NR_ACTIVE_FILE),
1920 global_page_state(NR_INACTIVE_ANON), 2123 global_page_state(NR_INACTIVE_ANON),
1921 global_page_state(NR_INACTIVE_FILE), 2124 global_page_state(NR_INACTIVE_FILE),
1922#ifdef CONFIG_UNEVICTABLE_LRU
1923 global_page_state(NR_UNEVICTABLE), 2125 global_page_state(NR_UNEVICTABLE),
1924#endif
1925 global_page_state(NR_FILE_DIRTY), 2126 global_page_state(NR_FILE_DIRTY),
1926 global_page_state(NR_WRITEBACK), 2127 global_page_state(NR_WRITEBACK),
1927 global_page_state(NR_UNSTABLE_NFS), 2128 global_page_state(NR_UNSTABLE_NFS),
@@ -1945,25 +2146,21 @@ void show_free_areas(void)
1945 " inactive_anon:%lukB" 2146 " inactive_anon:%lukB"
1946 " active_file:%lukB" 2147 " active_file:%lukB"
1947 " inactive_file:%lukB" 2148 " inactive_file:%lukB"
1948#ifdef CONFIG_UNEVICTABLE_LRU
1949 " unevictable:%lukB" 2149 " unevictable:%lukB"
1950#endif
1951 " present:%lukB" 2150 " present:%lukB"
1952 " pages_scanned:%lu" 2151 " pages_scanned:%lu"
1953 " all_unreclaimable? %s" 2152 " all_unreclaimable? %s"
1954 "\n", 2153 "\n",
1955 zone->name, 2154 zone->name,
1956 K(zone_page_state(zone, NR_FREE_PAGES)), 2155 K(zone_page_state(zone, NR_FREE_PAGES)),
1957 K(zone->pages_min), 2156 K(min_wmark_pages(zone)),
1958 K(zone->pages_low), 2157 K(low_wmark_pages(zone)),
1959 K(zone->pages_high), 2158 K(high_wmark_pages(zone)),
1960 K(zone_page_state(zone, NR_ACTIVE_ANON)), 2159 K(zone_page_state(zone, NR_ACTIVE_ANON)),
1961 K(zone_page_state(zone, NR_INACTIVE_ANON)), 2160 K(zone_page_state(zone, NR_INACTIVE_ANON)),
1962 K(zone_page_state(zone, NR_ACTIVE_FILE)), 2161 K(zone_page_state(zone, NR_ACTIVE_FILE)),
1963 K(zone_page_state(zone, NR_INACTIVE_FILE)), 2162 K(zone_page_state(zone, NR_INACTIVE_FILE)),
1964#ifdef CONFIG_UNEVICTABLE_LRU
1965 K(zone_page_state(zone, NR_UNEVICTABLE)), 2163 K(zone_page_state(zone, NR_UNEVICTABLE)),
1966#endif
1967 K(zone->present_pages), 2164 K(zone->present_pages),
1968 zone->pages_scanned, 2165 zone->pages_scanned,
1969 (zone_is_all_unreclaimable(zone) ? "yes" : "no") 2166 (zone_is_all_unreclaimable(zone) ? "yes" : "no")
@@ -2121,7 +2318,7 @@ int numa_zonelist_order_handler(ctl_table *table, int write,
2121} 2318}
2122 2319
2123 2320
2124#define MAX_NODE_LOAD (num_online_nodes()) 2321#define MAX_NODE_LOAD (nr_online_nodes)
2125static int node_load[MAX_NUMNODES]; 2322static int node_load[MAX_NUMNODES];
2126 2323
2127/** 2324/**
@@ -2330,7 +2527,7 @@ static void build_zonelists(pg_data_t *pgdat)
2330 2527
2331 /* NUMA-aware ordering of nodes */ 2528 /* NUMA-aware ordering of nodes */
2332 local_node = pgdat->node_id; 2529 local_node = pgdat->node_id;
2333 load = num_online_nodes(); 2530 load = nr_online_nodes;
2334 prev_node = local_node; 2531 prev_node = local_node;
2335 nodes_clear(used_mask); 2532 nodes_clear(used_mask);
2336 2533
@@ -2481,7 +2678,7 @@ void build_all_zonelists(void)
2481 2678
2482 printk("Built %i zonelists in %s order, mobility grouping %s. " 2679 printk("Built %i zonelists in %s order, mobility grouping %s. "
2483 "Total pages: %ld\n", 2680 "Total pages: %ld\n",
2484 num_online_nodes(), 2681 nr_online_nodes,
2485 zonelist_order_name[current_zonelist_order], 2682 zonelist_order_name[current_zonelist_order],
2486 page_group_by_mobility_disabled ? "off" : "on", 2683 page_group_by_mobility_disabled ? "off" : "on",
2487 vm_total_pages); 2684 vm_total_pages);
@@ -2560,8 +2757,8 @@ static inline unsigned long wait_table_bits(unsigned long size)
2560 2757
2561/* 2758/*
2562 * Mark a number of pageblocks as MIGRATE_RESERVE. The number 2759 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
2563 * of blocks reserved is based on zone->pages_min. The memory within the 2760 * of blocks reserved is based on min_wmark_pages(zone). The memory within
2564 * reserve will tend to store contiguous free pages. Setting min_free_kbytes 2761 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
2565 * higher will lead to a bigger reserve which will get freed as contiguous 2762 * higher will lead to a bigger reserve which will get freed as contiguous
2566 * blocks as reclaim kicks in 2763 * blocks as reclaim kicks in
2567 */ 2764 */
@@ -2574,7 +2771,7 @@ static void setup_zone_migrate_reserve(struct zone *zone)
2574 /* Get the start pfn, end pfn and the number of blocks to reserve */ 2771 /* Get the start pfn, end pfn and the number of blocks to reserve */
2575 start_pfn = zone->zone_start_pfn; 2772 start_pfn = zone->zone_start_pfn;
2576 end_pfn = start_pfn + zone->spanned_pages; 2773 end_pfn = start_pfn + zone->spanned_pages;
2577 reserve = roundup(zone->pages_min, pageblock_nr_pages) >> 2774 reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
2578 pageblock_order; 2775 pageblock_order;
2579 2776
2580 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 2777 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
@@ -3506,7 +3703,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
3506 zone_pcp_init(zone); 3703 zone_pcp_init(zone);
3507 for_each_lru(l) { 3704 for_each_lru(l) {
3508 INIT_LIST_HEAD(&zone->lru[l].list); 3705 INIT_LIST_HEAD(&zone->lru[l].list);
3509 zone->lru[l].nr_scan = 0; 3706 zone->lru[l].nr_saved_scan = 0;
3510 } 3707 }
3511 zone->reclaim_stat.recent_rotated[0] = 0; 3708 zone->reclaim_stat.recent_rotated[0] = 0;
3512 zone->reclaim_stat.recent_rotated[1] = 0; 3709 zone->reclaim_stat.recent_rotated[1] = 0;
@@ -4043,6 +4240,11 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4043 early_node_map[i].start_pfn, 4240 early_node_map[i].start_pfn,
4044 early_node_map[i].end_pfn); 4241 early_node_map[i].end_pfn);
4045 4242
4243 /*
4244 * find_zone_movable_pfns_for_nodes/early_calculate_totalpages init
4245 * that node_mask, clear it at first
4246 */
4247 nodes_clear(node_states[N_HIGH_MEMORY]);
4046 /* Initialise every node */ 4248 /* Initialise every node */
4047 mminit_verify_pageflags_layout(); 4249 mminit_verify_pageflags_layout();
4048 setup_nr_node_ids(); 4250 setup_nr_node_ids();
@@ -4177,8 +4379,8 @@ static void calculate_totalreserve_pages(void)
4177 max = zone->lowmem_reserve[j]; 4379 max = zone->lowmem_reserve[j];
4178 } 4380 }
4179 4381
4180 /* we treat pages_high as reserved pages. */ 4382 /* we treat the high watermark as reserved pages. */
4181 max += zone->pages_high; 4383 max += high_wmark_pages(zone);
4182 4384
4183 if (max > zone->present_pages) 4385 if (max > zone->present_pages)
4184 max = zone->present_pages; 4386 max = zone->present_pages;
@@ -4228,12 +4430,13 @@ static void setup_per_zone_lowmem_reserve(void)
4228} 4430}
4229 4431
4230/** 4432/**
4231 * setup_per_zone_pages_min - called when min_free_kbytes changes. 4433 * setup_per_zone_wmarks - called when min_free_kbytes changes
4434 * or when memory is hot-{added|removed}
4232 * 4435 *
4233 * Ensures that the pages_{min,low,high} values for each zone are set correctly 4436 * Ensures that the watermark[min,low,high] values for each zone are set
4234 * with respect to min_free_kbytes. 4437 * correctly with respect to min_free_kbytes.
4235 */ 4438 */
4236void setup_per_zone_pages_min(void) 4439void setup_per_zone_wmarks(void)
4237{ 4440{
4238 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 4441 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
4239 unsigned long lowmem_pages = 0; 4442 unsigned long lowmem_pages = 0;
@@ -4258,7 +4461,7 @@ void setup_per_zone_pages_min(void)
4258 * need highmem pages, so cap pages_min to a small 4461 * need highmem pages, so cap pages_min to a small
4259 * value here. 4462 * value here.
4260 * 4463 *
4261 * The (pages_high-pages_low) and (pages_low-pages_min) 4464 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
4262 * deltas controls asynch page reclaim, and so should 4465 * deltas controls asynch page reclaim, and so should
4263 * not be capped for highmem. 4466 * not be capped for highmem.
4264 */ 4467 */
@@ -4269,17 +4472,17 @@ void setup_per_zone_pages_min(void)
4269 min_pages = SWAP_CLUSTER_MAX; 4472 min_pages = SWAP_CLUSTER_MAX;
4270 if (min_pages > 128) 4473 if (min_pages > 128)
4271 min_pages = 128; 4474 min_pages = 128;
4272 zone->pages_min = min_pages; 4475 zone->watermark[WMARK_MIN] = min_pages;
4273 } else { 4476 } else {
4274 /* 4477 /*
4275 * If it's a lowmem zone, reserve a number of pages 4478 * If it's a lowmem zone, reserve a number of pages
4276 * proportionate to the zone's size. 4479 * proportionate to the zone's size.
4277 */ 4480 */
4278 zone->pages_min = tmp; 4481 zone->watermark[WMARK_MIN] = tmp;
4279 } 4482 }
4280 4483
4281 zone->pages_low = zone->pages_min + (tmp >> 2); 4484 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
4282 zone->pages_high = zone->pages_min + (tmp >> 1); 4485 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
4283 setup_zone_migrate_reserve(zone); 4486 setup_zone_migrate_reserve(zone);
4284 spin_unlock_irqrestore(&zone->lock, flags); 4487 spin_unlock_irqrestore(&zone->lock, flags);
4285 } 4488 }
@@ -4289,8 +4492,6 @@ void setup_per_zone_pages_min(void)
4289} 4492}
4290 4493
4291/** 4494/**
4292 * setup_per_zone_inactive_ratio - called when min_free_kbytes changes.
4293 *
4294 * The inactive anon list should be small enough that the VM never has to 4495 * The inactive anon list should be small enough that the VM never has to
4295 * do too much work, but large enough that each inactive page has a chance 4496 * do too much work, but large enough that each inactive page has a chance
4296 * to be referenced again before it is swapped out. 4497 * to be referenced again before it is swapped out.
@@ -4311,21 +4512,26 @@ void setup_per_zone_pages_min(void)
4311 * 1TB 101 10GB 4512 * 1TB 101 10GB
4312 * 10TB 320 32GB 4513 * 10TB 320 32GB
4313 */ 4514 */
4314static void setup_per_zone_inactive_ratio(void) 4515void calculate_zone_inactive_ratio(struct zone *zone)
4315{ 4516{
4316 struct zone *zone; 4517 unsigned int gb, ratio;
4317
4318 for_each_zone(zone) {
4319 unsigned int gb, ratio;
4320 4518
4321 /* Zone size in gigabytes */ 4519 /* Zone size in gigabytes */
4322 gb = zone->present_pages >> (30 - PAGE_SHIFT); 4520 gb = zone->present_pages >> (30 - PAGE_SHIFT);
4521 if (gb)
4323 ratio = int_sqrt(10 * gb); 4522 ratio = int_sqrt(10 * gb);
4324 if (!ratio) 4523 else
4325 ratio = 1; 4524 ratio = 1;
4326 4525
4327 zone->inactive_ratio = ratio; 4526 zone->inactive_ratio = ratio;
4328 } 4527}
4528
4529static void __init setup_per_zone_inactive_ratio(void)
4530{
4531 struct zone *zone;
4532
4533 for_each_zone(zone)
4534 calculate_zone_inactive_ratio(zone);
4329} 4535}
4330 4536
4331/* 4537/*
@@ -4352,7 +4558,7 @@ static void setup_per_zone_inactive_ratio(void)
4352 * 8192MB: 11584k 4558 * 8192MB: 11584k
4353 * 16384MB: 16384k 4559 * 16384MB: 16384k
4354 */ 4560 */
4355static int __init init_per_zone_pages_min(void) 4561static int __init init_per_zone_wmark_min(void)
4356{ 4562{
4357 unsigned long lowmem_kbytes; 4563 unsigned long lowmem_kbytes;
4358 4564
@@ -4363,12 +4569,12 @@ static int __init init_per_zone_pages_min(void)
4363 min_free_kbytes = 128; 4569 min_free_kbytes = 128;
4364 if (min_free_kbytes > 65536) 4570 if (min_free_kbytes > 65536)
4365 min_free_kbytes = 65536; 4571 min_free_kbytes = 65536;
4366 setup_per_zone_pages_min(); 4572 setup_per_zone_wmarks();
4367 setup_per_zone_lowmem_reserve(); 4573 setup_per_zone_lowmem_reserve();
4368 setup_per_zone_inactive_ratio(); 4574 setup_per_zone_inactive_ratio();
4369 return 0; 4575 return 0;
4370} 4576}
4371module_init(init_per_zone_pages_min) 4577module_init(init_per_zone_wmark_min)
4372 4578
4373/* 4579/*
4374 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 4580 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
@@ -4380,7 +4586,7 @@ int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
4380{ 4586{
4381 proc_dointvec(table, write, file, buffer, length, ppos); 4587 proc_dointvec(table, write, file, buffer, length, ppos);
4382 if (write) 4588 if (write)
4383 setup_per_zone_pages_min(); 4589 setup_per_zone_wmarks();
4384 return 0; 4590 return 0;
4385} 4591}
4386 4592
@@ -4424,7 +4630,7 @@ int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
4424 * whenever sysctl_lowmem_reserve_ratio changes. 4630 * whenever sysctl_lowmem_reserve_ratio changes.
4425 * 4631 *
4426 * The reserve ratio obviously has absolutely no relation with the 4632 * The reserve ratio obviously has absolutely no relation with the
4427 * pages_min watermarks. The lowmem reserve ratio can only make sense 4633 * minimum watermarks. The lowmem reserve ratio can only make sense
4428 * if in function of the boot time zone sizes. 4634 * if in function of the boot time zone sizes.
4429 */ 4635 */
4430int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write, 4636int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
@@ -4531,23 +4737,13 @@ void *__init alloc_large_system_hash(const char *tablename,
4531 else if (hashdist) 4737 else if (hashdist)
4532 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); 4738 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
4533 else { 4739 else {
4534 unsigned long order = get_order(size);
4535 table = (void*) __get_free_pages(GFP_ATOMIC, order);
4536 /* 4740 /*
4537 * If bucketsize is not a power-of-two, we may free 4741 * If bucketsize is not a power-of-two, we may free
4538 * some pages at the end of hash table. 4742 * some pages at the end of hash table which
4743 * alloc_pages_exact() automatically does
4539 */ 4744 */
4540 if (table) { 4745 if (get_order(size) < MAX_ORDER)
4541 unsigned long alloc_end = (unsigned long)table + 4746 table = alloc_pages_exact(size, GFP_ATOMIC);
4542 (PAGE_SIZE << order);
4543 unsigned long used = (unsigned long)table +
4544 PAGE_ALIGN(size);
4545 split_page(virt_to_page(table), order);
4546 while (used < alloc_end) {
4547 free_page(used);
4548 used += PAGE_SIZE;
4549 }
4550 }
4551 } 4747 }
4552 } while (!table && size > PAGE_SIZE && --log2qty); 4748 } while (!table && size > PAGE_SIZE && --log2qty);
4553 4749
diff --git a/mm/page_io.c b/mm/page_io.c
index 3023c475e041..c6f3e5071de3 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -120,7 +120,7 @@ out:
120 return ret; 120 return ret;
121} 121}
122 122
123int swap_readpage(struct file *file, struct page *page) 123int swap_readpage(struct page *page)
124{ 124{
125 struct bio *bio; 125 struct bio *bio;
126 int ret = 0; 126 int ret = 0;
diff --git a/mm/readahead.c b/mm/readahead.c
index 133b6d525513..aa1aa2345235 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -133,15 +133,12 @@ out:
133} 133}
134 134
135/* 135/*
136 * do_page_cache_readahead actually reads a chunk of disk. It allocates all 136 * __do_page_cache_readahead() actually reads a chunk of disk. It allocates all
137 * the pages first, then submits them all for I/O. This avoids the very bad 137 * the pages first, then submits them all for I/O. This avoids the very bad
138 * behaviour which would occur if page allocations are causing VM writeback. 138 * behaviour which would occur if page allocations are causing VM writeback.
139 * We really don't want to intermingle reads and writes like that. 139 * We really don't want to intermingle reads and writes like that.
140 * 140 *
141 * Returns the number of pages requested, or the maximum amount of I/O allowed. 141 * Returns the number of pages requested, or the maximum amount of I/O allowed.
142 *
143 * do_page_cache_readahead() returns -1 if it encountered request queue
144 * congestion.
145 */ 142 */
146static int 143static int
147__do_page_cache_readahead(struct address_space *mapping, struct file *filp, 144__do_page_cache_readahead(struct address_space *mapping, struct file *filp,
@@ -210,6 +207,7 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
210 if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages)) 207 if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
211 return -EINVAL; 208 return -EINVAL;
212 209
210 nr_to_read = max_sane_readahead(nr_to_read);
213 while (nr_to_read) { 211 while (nr_to_read) {
214 int err; 212 int err;
215 213
@@ -231,22 +229,6 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
231} 229}
232 230
233/* 231/*
234 * This version skips the IO if the queue is read-congested, and will tell the
235 * block layer to abandon the readahead if request allocation would block.
236 *
237 * force_page_cache_readahead() will ignore queue congestion and will block on
238 * request queues.
239 */
240int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
241 pgoff_t offset, unsigned long nr_to_read)
242{
243 if (bdi_read_congested(mapping->backing_dev_info))
244 return -1;
245
246 return __do_page_cache_readahead(mapping, filp, offset, nr_to_read, 0);
247}
248
249/*
250 * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a 232 * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
251 * sensible upper limit. 233 * sensible upper limit.
252 */ 234 */
@@ -259,7 +241,7 @@ unsigned long max_sane_readahead(unsigned long nr)
259/* 241/*
260 * Submit IO for the read-ahead request in file_ra_state. 242 * Submit IO for the read-ahead request in file_ra_state.
261 */ 243 */
262static unsigned long ra_submit(struct file_ra_state *ra, 244unsigned long ra_submit(struct file_ra_state *ra,
263 struct address_space *mapping, struct file *filp) 245 struct address_space *mapping, struct file *filp)
264{ 246{
265 int actual; 247 int actual;
@@ -348,6 +330,59 @@ static unsigned long get_next_ra_size(struct file_ra_state *ra,
348 */ 330 */
349 331
350/* 332/*
333 * Count contiguously cached pages from @offset-1 to @offset-@max,
334 * this count is a conservative estimation of
335 * - length of the sequential read sequence, or
336 * - thrashing threshold in memory tight systems
337 */
338static pgoff_t count_history_pages(struct address_space *mapping,
339 struct file_ra_state *ra,
340 pgoff_t offset, unsigned long max)
341{
342 pgoff_t head;
343
344 rcu_read_lock();
345 head = radix_tree_prev_hole(&mapping->page_tree, offset - 1, max);
346 rcu_read_unlock();
347
348 return offset - 1 - head;
349}
350
351/*
352 * page cache context based read-ahead
353 */
354static int try_context_readahead(struct address_space *mapping,
355 struct file_ra_state *ra,
356 pgoff_t offset,
357 unsigned long req_size,
358 unsigned long max)
359{
360 pgoff_t size;
361
362 size = count_history_pages(mapping, ra, offset, max);
363
364 /*
365 * no history pages:
366 * it could be a random read
367 */
368 if (!size)
369 return 0;
370
371 /*
372 * starts from beginning of file:
373 * it is a strong indication of long-run stream (or whole-file-read)
374 */
375 if (size >= offset)
376 size *= 2;
377
378 ra->start = offset;
379 ra->size = get_init_ra_size(size + req_size, max);
380 ra->async_size = ra->size;
381
382 return 1;
383}
384
385/*
351 * A minimal readahead algorithm for trivial sequential/random reads. 386 * A minimal readahead algorithm for trivial sequential/random reads.
352 */ 387 */
353static unsigned long 388static unsigned long
@@ -356,34 +391,26 @@ ondemand_readahead(struct address_space *mapping,
356 bool hit_readahead_marker, pgoff_t offset, 391 bool hit_readahead_marker, pgoff_t offset,
357 unsigned long req_size) 392 unsigned long req_size)
358{ 393{
359 int max = ra->ra_pages; /* max readahead pages */ 394 unsigned long max = max_sane_readahead(ra->ra_pages);
360 pgoff_t prev_offset; 395
361 int sequential; 396 /*
397 * start of file
398 */
399 if (!offset)
400 goto initial_readahead;
362 401
363 /* 402 /*
364 * It's the expected callback offset, assume sequential access. 403 * It's the expected callback offset, assume sequential access.
365 * Ramp up sizes, and push forward the readahead window. 404 * Ramp up sizes, and push forward the readahead window.
366 */ 405 */
367 if (offset && (offset == (ra->start + ra->size - ra->async_size) || 406 if ((offset == (ra->start + ra->size - ra->async_size) ||
368 offset == (ra->start + ra->size))) { 407 offset == (ra->start + ra->size))) {
369 ra->start += ra->size; 408 ra->start += ra->size;
370 ra->size = get_next_ra_size(ra, max); 409 ra->size = get_next_ra_size(ra, max);
371 ra->async_size = ra->size; 410 ra->async_size = ra->size;
372 goto readit; 411 goto readit;
373 } 412 }
374 413
375 prev_offset = ra->prev_pos >> PAGE_CACHE_SHIFT;
376 sequential = offset - prev_offset <= 1UL || req_size > max;
377
378 /*
379 * Standalone, small read.
380 * Read as is, and do not pollute the readahead state.
381 */
382 if (!hit_readahead_marker && !sequential) {
383 return __do_page_cache_readahead(mapping, filp,
384 offset, req_size, 0);
385 }
386
387 /* 414 /*
388 * Hit a marked page without valid readahead state. 415 * Hit a marked page without valid readahead state.
389 * E.g. interleaved reads. 416 * E.g. interleaved reads.
@@ -394,7 +421,7 @@ ondemand_readahead(struct address_space *mapping,
394 pgoff_t start; 421 pgoff_t start;
395 422
396 rcu_read_lock(); 423 rcu_read_lock();
397 start = radix_tree_next_hole(&mapping->page_tree, offset,max+1); 424 start = radix_tree_next_hole(&mapping->page_tree, offset+1,max);
398 rcu_read_unlock(); 425 rcu_read_unlock();
399 426
400 if (!start || start - offset > max) 427 if (!start || start - offset > max)
@@ -402,23 +429,53 @@ ondemand_readahead(struct address_space *mapping,
402 429
403 ra->start = start; 430 ra->start = start;
404 ra->size = start - offset; /* old async_size */ 431 ra->size = start - offset; /* old async_size */
432 ra->size += req_size;
405 ra->size = get_next_ra_size(ra, max); 433 ra->size = get_next_ra_size(ra, max);
406 ra->async_size = ra->size; 434 ra->async_size = ra->size;
407 goto readit; 435 goto readit;
408 } 436 }
409 437
410 /* 438 /*
411 * It may be one of 439 * oversize read
412 * - first read on start of file 440 */
413 * - sequential cache miss 441 if (req_size > max)
414 * - oversize random read 442 goto initial_readahead;
415 * Start readahead for it. 443
444 /*
445 * sequential cache miss
446 */
447 if (offset - (ra->prev_pos >> PAGE_CACHE_SHIFT) <= 1UL)
448 goto initial_readahead;
449
450 /*
451 * Query the page cache and look for the traces(cached history pages)
452 * that a sequential stream would leave behind.
453 */
454 if (try_context_readahead(mapping, ra, offset, req_size, max))
455 goto readit;
456
457 /*
458 * standalone, small random read
459 * Read as is, and do not pollute the readahead state.
416 */ 460 */
461 return __do_page_cache_readahead(mapping, filp, offset, req_size, 0);
462
463initial_readahead:
417 ra->start = offset; 464 ra->start = offset;
418 ra->size = get_init_ra_size(req_size, max); 465 ra->size = get_init_ra_size(req_size, max);
419 ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size; 466 ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
420 467
421readit: 468readit:
469 /*
470 * Will this read hit the readahead marker made by itself?
471 * If so, trigger the readahead marker hit now, and merge
472 * the resulted next readahead window into the current one.
473 */
474 if (offset == ra->start && ra->size == ra->async_size) {
475 ra->async_size = get_next_ra_size(ra, max);
476 ra->size += ra->async_size;
477 }
478
422 return ra_submit(ra, mapping, filp); 479 return ra_submit(ra, mapping, filp);
423} 480}
424 481
diff --git a/mm/rmap.c b/mm/rmap.c
index 23122af32611..c9ccc1a72dc3 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -333,7 +333,9 @@ static int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
333 * repeatedly from either page_referenced_anon or page_referenced_file. 333 * repeatedly from either page_referenced_anon or page_referenced_file.
334 */ 334 */
335static int page_referenced_one(struct page *page, 335static int page_referenced_one(struct page *page,
336 struct vm_area_struct *vma, unsigned int *mapcount) 336 struct vm_area_struct *vma,
337 unsigned int *mapcount,
338 unsigned long *vm_flags)
337{ 339{
338 struct mm_struct *mm = vma->vm_mm; 340 struct mm_struct *mm = vma->vm_mm;
339 unsigned long address; 341 unsigned long address;
@@ -381,11 +383,14 @@ out_unmap:
381 (*mapcount)--; 383 (*mapcount)--;
382 pte_unmap_unlock(pte, ptl); 384 pte_unmap_unlock(pte, ptl);
383out: 385out:
386 if (referenced)
387 *vm_flags |= vma->vm_flags;
384 return referenced; 388 return referenced;
385} 389}
386 390
387static int page_referenced_anon(struct page *page, 391static int page_referenced_anon(struct page *page,
388 struct mem_cgroup *mem_cont) 392 struct mem_cgroup *mem_cont,
393 unsigned long *vm_flags)
389{ 394{
390 unsigned int mapcount; 395 unsigned int mapcount;
391 struct anon_vma *anon_vma; 396 struct anon_vma *anon_vma;
@@ -405,7 +410,8 @@ static int page_referenced_anon(struct page *page,
405 */ 410 */
406 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) 411 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
407 continue; 412 continue;
408 referenced += page_referenced_one(page, vma, &mapcount); 413 referenced += page_referenced_one(page, vma,
414 &mapcount, vm_flags);
409 if (!mapcount) 415 if (!mapcount)
410 break; 416 break;
411 } 417 }
@@ -418,6 +424,7 @@ static int page_referenced_anon(struct page *page,
418 * page_referenced_file - referenced check for object-based rmap 424 * page_referenced_file - referenced check for object-based rmap
419 * @page: the page we're checking references on. 425 * @page: the page we're checking references on.
420 * @mem_cont: target memory controller 426 * @mem_cont: target memory controller
427 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
421 * 428 *
422 * For an object-based mapped page, find all the places it is mapped and 429 * For an object-based mapped page, find all the places it is mapped and
423 * check/clear the referenced flag. This is done by following the page->mapping 430 * check/clear the referenced flag. This is done by following the page->mapping
@@ -427,7 +434,8 @@ static int page_referenced_anon(struct page *page,
427 * This function is only called from page_referenced for object-based pages. 434 * This function is only called from page_referenced for object-based pages.
428 */ 435 */
429static int page_referenced_file(struct page *page, 436static int page_referenced_file(struct page *page,
430 struct mem_cgroup *mem_cont) 437 struct mem_cgroup *mem_cont,
438 unsigned long *vm_flags)
431{ 439{
432 unsigned int mapcount; 440 unsigned int mapcount;
433 struct address_space *mapping = page->mapping; 441 struct address_space *mapping = page->mapping;
@@ -467,7 +475,8 @@ static int page_referenced_file(struct page *page,
467 */ 475 */
468 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) 476 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
469 continue; 477 continue;
470 referenced += page_referenced_one(page, vma, &mapcount); 478 referenced += page_referenced_one(page, vma,
479 &mapcount, vm_flags);
471 if (!mapcount) 480 if (!mapcount)
472 break; 481 break;
473 } 482 }
@@ -481,29 +490,35 @@ static int page_referenced_file(struct page *page,
481 * @page: the page to test 490 * @page: the page to test
482 * @is_locked: caller holds lock on the page 491 * @is_locked: caller holds lock on the page
483 * @mem_cont: target memory controller 492 * @mem_cont: target memory controller
493 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
484 * 494 *
485 * Quick test_and_clear_referenced for all mappings to a page, 495 * Quick test_and_clear_referenced for all mappings to a page,
486 * returns the number of ptes which referenced the page. 496 * returns the number of ptes which referenced the page.
487 */ 497 */
488int page_referenced(struct page *page, int is_locked, 498int page_referenced(struct page *page,
489 struct mem_cgroup *mem_cont) 499 int is_locked,
500 struct mem_cgroup *mem_cont,
501 unsigned long *vm_flags)
490{ 502{
491 int referenced = 0; 503 int referenced = 0;
492 504
493 if (TestClearPageReferenced(page)) 505 if (TestClearPageReferenced(page))
494 referenced++; 506 referenced++;
495 507
508 *vm_flags = 0;
496 if (page_mapped(page) && page->mapping) { 509 if (page_mapped(page) && page->mapping) {
497 if (PageAnon(page)) 510 if (PageAnon(page))
498 referenced += page_referenced_anon(page, mem_cont); 511 referenced += page_referenced_anon(page, mem_cont,
512 vm_flags);
499 else if (is_locked) 513 else if (is_locked)
500 referenced += page_referenced_file(page, mem_cont); 514 referenced += page_referenced_file(page, mem_cont,
515 vm_flags);
501 else if (!trylock_page(page)) 516 else if (!trylock_page(page))
502 referenced++; 517 referenced++;
503 else { 518 else {
504 if (page->mapping) 519 if (page->mapping)
505 referenced += 520 referenced += page_referenced_file(page,
506 page_referenced_file(page, mem_cont); 521 mem_cont, vm_flags);
507 unlock_page(page); 522 unlock_page(page);
508 } 523 }
509 } 524 }
@@ -1202,7 +1217,6 @@ int try_to_unmap(struct page *page, int migration)
1202 return ret; 1217 return ret;
1203} 1218}
1204 1219
1205#ifdef CONFIG_UNEVICTABLE_LRU
1206/** 1220/**
1207 * try_to_munlock - try to munlock a page 1221 * try_to_munlock - try to munlock a page
1208 * @page: the page to be munlocked 1222 * @page: the page to be munlocked
@@ -1226,4 +1240,4 @@ int try_to_munlock(struct page *page)
1226 else 1240 else
1227 return try_to_unmap_file(page, 1, 0); 1241 return try_to_unmap_file(page, 1, 0);
1228} 1242}
1229#endif 1243
diff --git a/mm/shmem.c b/mm/shmem.c
index 0132fbd45a23..e89d7ec18eda 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1097,7 +1097,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1097 shmem_swp_unmap(entry); 1097 shmem_swp_unmap(entry);
1098unlock: 1098unlock:
1099 spin_unlock(&info->lock); 1099 spin_unlock(&info->lock);
1100 swap_free(swap); 1100 swapcache_free(swap, NULL);
1101redirty: 1101redirty:
1102 set_page_dirty(page); 1102 set_page_dirty(page);
1103 if (wbc->for_reclaim) 1103 if (wbc->for_reclaim)
@@ -2612,7 +2612,7 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
2612 * @size: size to be set for the file 2612 * @size: size to be set for the file
2613 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 2613 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
2614 */ 2614 */
2615struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) 2615struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
2616{ 2616{
2617 int error; 2617 int error;
2618 struct file *file; 2618 struct file *file;
diff --git a/mm/slab.c b/mm/slab.c
index af3376d0a833..f257d4dd474d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -818,7 +818,6 @@ static void __slab_error(const char *function, struct kmem_cache *cachep,
818 */ 818 */
819 819
820static int use_alien_caches __read_mostly = 1; 820static int use_alien_caches __read_mostly = 1;
821static int numa_platform __read_mostly = 1;
822static int __init noaliencache_setup(char *s) 821static int __init noaliencache_setup(char *s)
823{ 822{
824 use_alien_caches = 0; 823 use_alien_caches = 0;
@@ -1377,10 +1376,8 @@ void __init kmem_cache_init(void)
1377 int order; 1376 int order;
1378 int node; 1377 int node;
1379 1378
1380 if (num_possible_nodes() == 1) { 1379 if (num_possible_nodes() == 1)
1381 use_alien_caches = 0; 1380 use_alien_caches = 0;
1382 numa_platform = 0;
1383 }
1384 1381
1385 for (i = 0; i < NUM_INIT_LISTS; i++) { 1382 for (i = 0; i < NUM_INIT_LISTS; i++) {
1386 kmem_list3_init(&initkmem_list3[i]); 1383 kmem_list3_init(&initkmem_list3[i]);
@@ -1627,7 +1624,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1627 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1624 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1628 flags |= __GFP_RECLAIMABLE; 1625 flags |= __GFP_RECLAIMABLE;
1629 1626
1630 page = alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); 1627 page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
1631 if (!page) 1628 if (!page)
1632 return NULL; 1629 return NULL;
1633 1630
@@ -3193,7 +3190,7 @@ retry:
3193 if (local_flags & __GFP_WAIT) 3190 if (local_flags & __GFP_WAIT)
3194 local_irq_enable(); 3191 local_irq_enable();
3195 kmem_flagcheck(cache, flags); 3192 kmem_flagcheck(cache, flags);
3196 obj = kmem_getpages(cache, local_flags, -1); 3193 obj = kmem_getpages(cache, local_flags, numa_node_id());
3197 if (local_flags & __GFP_WAIT) 3194 if (local_flags & __GFP_WAIT)
3198 local_irq_disable(); 3195 local_irq_disable();
3199 if (obj) { 3196 if (obj) {
@@ -3530,7 +3527,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
3530 * variable to skip the call, which is mostly likely to be present in 3527 * variable to skip the call, which is mostly likely to be present in
3531 * the cache. 3528 * the cache.
3532 */ 3529 */
3533 if (numa_platform && cache_free_alien(cachep, objp)) 3530 if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3534 return; 3531 return;
3535 3532
3536 if (likely(ac->avail < ac->limit)) { 3533 if (likely(ac->avail < ac->limit)) {
diff --git a/mm/slob.c b/mm/slob.c
index 12f261499925..64f6db1943bf 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -46,7 +46,7 @@
46 * NUMA support in SLOB is fairly simplistic, pushing most of the real 46 * NUMA support in SLOB is fairly simplistic, pushing most of the real
47 * logic down to the page allocator, and simply doing the node accounting 47 * logic down to the page allocator, and simply doing the node accounting
48 * on the upper levels. In the event that a node id is explicitly 48 * on the upper levels. In the event that a node id is explicitly
49 * provided, alloc_pages_node() with the specified node id is used 49 * provided, alloc_pages_exact_node() with the specified node id is used
50 * instead. The common case (or when the node id isn't explicitly provided) 50 * instead. The common case (or when the node id isn't explicitly provided)
51 * will default to the current node, as per numa_node_id(). 51 * will default to the current node, as per numa_node_id().
52 * 52 *
@@ -244,7 +244,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
244 244
245#ifdef CONFIG_NUMA 245#ifdef CONFIG_NUMA
246 if (node != -1) 246 if (node != -1)
247 page = alloc_pages_node(node, gfp, order); 247 page = alloc_pages_exact_node(node, gfp, order);
248 else 248 else
249#endif 249#endif
250 page = alloc_pages(gfp, order); 250 page = alloc_pages(gfp, order);
diff --git a/mm/slub.c b/mm/slub.c
index 15960a09abb1..2701419b0adc 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3765,7 +3765,7 @@ static int list_locations(struct kmem_cache *s, char *buf,
3765 to_cpumask(l->cpus)); 3765 to_cpumask(l->cpus));
3766 } 3766 }
3767 3767
3768 if (num_online_nodes() > 1 && !nodes_empty(l->nodes) && 3768 if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
3769 len < PAGE_SIZE - 60) { 3769 len < PAGE_SIZE - 60) {
3770 len += sprintf(buf + len, " nodes="); 3770 len += sprintf(buf + len, " nodes=");
3771 len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50, 3771 len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 1416e7e9e02d..42cd38eba79f 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -124,7 +124,6 @@ void __delete_from_swap_cache(struct page *page)
124/** 124/**
125 * add_to_swap - allocate swap space for a page 125 * add_to_swap - allocate swap space for a page
126 * @page: page we want to move to swap 126 * @page: page we want to move to swap
127 * @gfp_mask: memory allocation flags
128 * 127 *
129 * Allocate swap space for the page and add the page to the 128 * Allocate swap space for the page and add the page to the
130 * swap cache. Caller needs to hold the page lock. 129 * swap cache. Caller needs to hold the page lock.
@@ -162,11 +161,11 @@ int add_to_swap(struct page *page)
162 return 1; 161 return 1;
163 case -EEXIST: 162 case -EEXIST:
164 /* Raced with "speculative" read_swap_cache_async */ 163 /* Raced with "speculative" read_swap_cache_async */
165 swap_free(entry); 164 swapcache_free(entry, NULL);
166 continue; 165 continue;
167 default: 166 default:
168 /* -ENOMEM radix-tree allocation failure */ 167 /* -ENOMEM radix-tree allocation failure */
169 swap_free(entry); 168 swapcache_free(entry, NULL);
170 return 0; 169 return 0;
171 } 170 }
172 } 171 }
@@ -188,8 +187,7 @@ void delete_from_swap_cache(struct page *page)
188 __delete_from_swap_cache(page); 187 __delete_from_swap_cache(page);
189 spin_unlock_irq(&swapper_space.tree_lock); 188 spin_unlock_irq(&swapper_space.tree_lock);
190 189
191 mem_cgroup_uncharge_swapcache(page, entry); 190 swapcache_free(entry, page);
192 swap_free(entry);
193 page_cache_release(page); 191 page_cache_release(page);
194} 192}
195 193
@@ -293,7 +291,10 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
293 /* 291 /*
294 * Swap entry may have been freed since our caller observed it. 292 * Swap entry may have been freed since our caller observed it.
295 */ 293 */
296 if (!swap_duplicate(entry)) 294 err = swapcache_prepare(entry);
295 if (err == -EEXIST) /* seems racy */
296 continue;
297 if (err) /* swp entry is obsolete ? */
297 break; 298 break;
298 299
299 /* 300 /*
@@ -312,12 +313,12 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
312 * Initiate read into locked page and return. 313 * Initiate read into locked page and return.
313 */ 314 */
314 lru_cache_add_anon(new_page); 315 lru_cache_add_anon(new_page);
315 swap_readpage(NULL, new_page); 316 swap_readpage(new_page);
316 return new_page; 317 return new_page;
317 } 318 }
318 ClearPageSwapBacked(new_page); 319 ClearPageSwapBacked(new_page);
319 __clear_page_locked(new_page); 320 __clear_page_locked(new_page);
320 swap_free(entry); 321 swapcache_free(entry, NULL);
321 } while (err != -ENOMEM); 322 } while (err != -ENOMEM);
322 323
323 if (new_page) 324 if (new_page)
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 312fafe0ab6e..28faa01cf578 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -53,6 +53,59 @@ static struct swap_info_struct swap_info[MAX_SWAPFILES];
53 53
54static DEFINE_MUTEX(swapon_mutex); 54static DEFINE_MUTEX(swapon_mutex);
55 55
56/* For reference count accounting in swap_map */
57/* enum for swap_map[] handling. internal use only */
58enum {
59 SWAP_MAP = 0, /* ops for reference from swap users */
60 SWAP_CACHE, /* ops for reference from swap cache */
61};
62
63static inline int swap_count(unsigned short ent)
64{
65 return ent & SWAP_COUNT_MASK;
66}
67
68static inline bool swap_has_cache(unsigned short ent)
69{
70 return !!(ent & SWAP_HAS_CACHE);
71}
72
73static inline unsigned short encode_swapmap(int count, bool has_cache)
74{
75 unsigned short ret = count;
76
77 if (has_cache)
78 return SWAP_HAS_CACHE | ret;
79 return ret;
80}
81
82/* returnes 1 if swap entry is freed */
83static int
84__try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
85{
86 int type = si - swap_info;
87 swp_entry_t entry = swp_entry(type, offset);
88 struct page *page;
89 int ret = 0;
90
91 page = find_get_page(&swapper_space, entry.val);
92 if (!page)
93 return 0;
94 /*
95 * This function is called from scan_swap_map() and it's called
96 * by vmscan.c at reclaiming pages. So, we hold a lock on a page, here.
97 * We have to use trylock for avoiding deadlock. This is a special
98 * case and you should use try_to_free_swap() with explicit lock_page()
99 * in usual operations.
100 */
101 if (trylock_page(page)) {
102 ret = try_to_free_swap(page);
103 unlock_page(page);
104 }
105 page_cache_release(page);
106 return ret;
107}
108
56/* 109/*
57 * We need this because the bdev->unplug_fn can sleep and we cannot 110 * We need this because the bdev->unplug_fn can sleep and we cannot
58 * hold swap_lock while calling the unplug_fn. And swap_lock 111 * hold swap_lock while calling the unplug_fn. And swap_lock
@@ -167,7 +220,8 @@ static int wait_for_discard(void *word)
167#define SWAPFILE_CLUSTER 256 220#define SWAPFILE_CLUSTER 256
168#define LATENCY_LIMIT 256 221#define LATENCY_LIMIT 256
169 222
170static inline unsigned long scan_swap_map(struct swap_info_struct *si) 223static inline unsigned long scan_swap_map(struct swap_info_struct *si,
224 int cache)
171{ 225{
172 unsigned long offset; 226 unsigned long offset;
173 unsigned long scan_base; 227 unsigned long scan_base;
@@ -273,6 +327,19 @@ checks:
273 goto no_page; 327 goto no_page;
274 if (offset > si->highest_bit) 328 if (offset > si->highest_bit)
275 scan_base = offset = si->lowest_bit; 329 scan_base = offset = si->lowest_bit;
330
331 /* reuse swap entry of cache-only swap if not busy. */
332 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
333 int swap_was_freed;
334 spin_unlock(&swap_lock);
335 swap_was_freed = __try_to_reclaim_swap(si, offset);
336 spin_lock(&swap_lock);
337 /* entry was freed successfully, try to use this again */
338 if (swap_was_freed)
339 goto checks;
340 goto scan; /* check next one */
341 }
342
276 if (si->swap_map[offset]) 343 if (si->swap_map[offset])
277 goto scan; 344 goto scan;
278 345
@@ -285,7 +352,10 @@ checks:
285 si->lowest_bit = si->max; 352 si->lowest_bit = si->max;
286 si->highest_bit = 0; 353 si->highest_bit = 0;
287 } 354 }
288 si->swap_map[offset] = 1; 355 if (cache == SWAP_CACHE) /* at usual swap-out via vmscan.c */
356 si->swap_map[offset] = encode_swapmap(0, true);
357 else /* at suspend */
358 si->swap_map[offset] = encode_swapmap(1, false);
289 si->cluster_next = offset + 1; 359 si->cluster_next = offset + 1;
290 si->flags -= SWP_SCANNING; 360 si->flags -= SWP_SCANNING;
291 361
@@ -351,6 +421,10 @@ scan:
351 spin_lock(&swap_lock); 421 spin_lock(&swap_lock);
352 goto checks; 422 goto checks;
353 } 423 }
424 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
425 spin_lock(&swap_lock);
426 goto checks;
427 }
354 if (unlikely(--latency_ration < 0)) { 428 if (unlikely(--latency_ration < 0)) {
355 cond_resched(); 429 cond_resched();
356 latency_ration = LATENCY_LIMIT; 430 latency_ration = LATENCY_LIMIT;
@@ -362,6 +436,10 @@ scan:
362 spin_lock(&swap_lock); 436 spin_lock(&swap_lock);
363 goto checks; 437 goto checks;
364 } 438 }
439 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
440 spin_lock(&swap_lock);
441 goto checks;
442 }
365 if (unlikely(--latency_ration < 0)) { 443 if (unlikely(--latency_ration < 0)) {
366 cond_resched(); 444 cond_resched();
367 latency_ration = LATENCY_LIMIT; 445 latency_ration = LATENCY_LIMIT;
@@ -401,7 +479,8 @@ swp_entry_t get_swap_page(void)
401 continue; 479 continue;
402 480
403 swap_list.next = next; 481 swap_list.next = next;
404 offset = scan_swap_map(si); 482 /* This is called for allocating swap entry for cache */
483 offset = scan_swap_map(si, SWAP_CACHE);
405 if (offset) { 484 if (offset) {
406 spin_unlock(&swap_lock); 485 spin_unlock(&swap_lock);
407 return swp_entry(type, offset); 486 return swp_entry(type, offset);
@@ -415,6 +494,7 @@ noswap:
415 return (swp_entry_t) {0}; 494 return (swp_entry_t) {0};
416} 495}
417 496
497/* The only caller of this function is now susupend routine */
418swp_entry_t get_swap_page_of_type(int type) 498swp_entry_t get_swap_page_of_type(int type)
419{ 499{
420 struct swap_info_struct *si; 500 struct swap_info_struct *si;
@@ -424,7 +504,8 @@ swp_entry_t get_swap_page_of_type(int type)
424 si = swap_info + type; 504 si = swap_info + type;
425 if (si->flags & SWP_WRITEOK) { 505 if (si->flags & SWP_WRITEOK) {
426 nr_swap_pages--; 506 nr_swap_pages--;
427 offset = scan_swap_map(si); 507 /* This is called for allocating swap entry, not cache */
508 offset = scan_swap_map(si, SWAP_MAP);
428 if (offset) { 509 if (offset) {
429 spin_unlock(&swap_lock); 510 spin_unlock(&swap_lock);
430 return swp_entry(type, offset); 511 return swp_entry(type, offset);
@@ -471,25 +552,38 @@ out:
471 return NULL; 552 return NULL;
472} 553}
473 554
474static int swap_entry_free(struct swap_info_struct *p, swp_entry_t ent) 555static int swap_entry_free(struct swap_info_struct *p,
556 swp_entry_t ent, int cache)
475{ 557{
476 unsigned long offset = swp_offset(ent); 558 unsigned long offset = swp_offset(ent);
477 int count = p->swap_map[offset]; 559 int count = swap_count(p->swap_map[offset]);
478 560 bool has_cache;
479 if (count < SWAP_MAP_MAX) { 561
480 count--; 562 has_cache = swap_has_cache(p->swap_map[offset]);
481 p->swap_map[offset] = count; 563
482 if (!count) { 564 if (cache == SWAP_MAP) { /* dropping usage count of swap */
483 if (offset < p->lowest_bit) 565 if (count < SWAP_MAP_MAX) {
484 p->lowest_bit = offset; 566 count--;
485 if (offset > p->highest_bit) 567 p->swap_map[offset] = encode_swapmap(count, has_cache);
486 p->highest_bit = offset;
487 if (p->prio > swap_info[swap_list.next].prio)
488 swap_list.next = p - swap_info;
489 nr_swap_pages++;
490 p->inuse_pages--;
491 mem_cgroup_uncharge_swap(ent);
492 } 568 }
569 } else { /* dropping swap cache flag */
570 VM_BUG_ON(!has_cache);
571 p->swap_map[offset] = encode_swapmap(count, false);
572
573 }
574 /* return code. */
575 count = p->swap_map[offset];
576 /* free if no reference */
577 if (!count) {
578 if (offset < p->lowest_bit)
579 p->lowest_bit = offset;
580 if (offset > p->highest_bit)
581 p->highest_bit = offset;
582 if (p->prio > swap_info[swap_list.next].prio)
583 swap_list.next = p - swap_info;
584 nr_swap_pages++;
585 p->inuse_pages--;
586 mem_cgroup_uncharge_swap(ent);
493 } 587 }
494 return count; 588 return count;
495} 589}
@@ -504,9 +598,26 @@ void swap_free(swp_entry_t entry)
504 598
505 p = swap_info_get(entry); 599 p = swap_info_get(entry);
506 if (p) { 600 if (p) {
507 swap_entry_free(p, entry); 601 swap_entry_free(p, entry, SWAP_MAP);
602 spin_unlock(&swap_lock);
603 }
604}
605
606/*
607 * Called after dropping swapcache to decrease refcnt to swap entries.
608 */
609void swapcache_free(swp_entry_t entry, struct page *page)
610{
611 struct swap_info_struct *p;
612
613 if (page)
614 mem_cgroup_uncharge_swapcache(page, entry);
615 p = swap_info_get(entry);
616 if (p) {
617 swap_entry_free(p, entry, SWAP_CACHE);
508 spin_unlock(&swap_lock); 618 spin_unlock(&swap_lock);
509 } 619 }
620 return;
510} 621}
511 622
512/* 623/*
@@ -521,8 +632,7 @@ static inline int page_swapcount(struct page *page)
521 entry.val = page_private(page); 632 entry.val = page_private(page);
522 p = swap_info_get(entry); 633 p = swap_info_get(entry);
523 if (p) { 634 if (p) {
524 /* Subtract the 1 for the swap cache itself */ 635 count = swap_count(p->swap_map[swp_offset(entry)]);
525 count = p->swap_map[swp_offset(entry)] - 1;
526 spin_unlock(&swap_lock); 636 spin_unlock(&swap_lock);
527 } 637 }
528 return count; 638 return count;
@@ -584,7 +694,7 @@ int free_swap_and_cache(swp_entry_t entry)
584 694
585 p = swap_info_get(entry); 695 p = swap_info_get(entry);
586 if (p) { 696 if (p) {
587 if (swap_entry_free(p, entry) == 1) { 697 if (swap_entry_free(p, entry, SWAP_MAP) == SWAP_HAS_CACHE) {
588 page = find_get_page(&swapper_space, entry.val); 698 page = find_get_page(&swapper_space, entry.val);
589 if (page && !trylock_page(page)) { 699 if (page && !trylock_page(page)) {
590 page_cache_release(page); 700 page_cache_release(page);
@@ -891,7 +1001,7 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si,
891 i = 1; 1001 i = 1;
892 } 1002 }
893 count = si->swap_map[i]; 1003 count = si->swap_map[i];
894 if (count && count != SWAP_MAP_BAD) 1004 if (count && swap_count(count) != SWAP_MAP_BAD)
895 break; 1005 break;
896 } 1006 }
897 return i; 1007 return i;
@@ -995,13 +1105,13 @@ static int try_to_unuse(unsigned int type)
995 */ 1105 */
996 shmem = 0; 1106 shmem = 0;
997 swcount = *swap_map; 1107 swcount = *swap_map;
998 if (swcount > 1) { 1108 if (swap_count(swcount)) {
999 if (start_mm == &init_mm) 1109 if (start_mm == &init_mm)
1000 shmem = shmem_unuse(entry, page); 1110 shmem = shmem_unuse(entry, page);
1001 else 1111 else
1002 retval = unuse_mm(start_mm, entry, page); 1112 retval = unuse_mm(start_mm, entry, page);
1003 } 1113 }
1004 if (*swap_map > 1) { 1114 if (swap_count(*swap_map)) {
1005 int set_start_mm = (*swap_map >= swcount); 1115 int set_start_mm = (*swap_map >= swcount);
1006 struct list_head *p = &start_mm->mmlist; 1116 struct list_head *p = &start_mm->mmlist;
1007 struct mm_struct *new_start_mm = start_mm; 1117 struct mm_struct *new_start_mm = start_mm;
@@ -1011,7 +1121,7 @@ static int try_to_unuse(unsigned int type)
1011 atomic_inc(&new_start_mm->mm_users); 1121 atomic_inc(&new_start_mm->mm_users);
1012 atomic_inc(&prev_mm->mm_users); 1122 atomic_inc(&prev_mm->mm_users);
1013 spin_lock(&mmlist_lock); 1123 spin_lock(&mmlist_lock);
1014 while (*swap_map > 1 && !retval && !shmem && 1124 while (swap_count(*swap_map) && !retval && !shmem &&
1015 (p = p->next) != &start_mm->mmlist) { 1125 (p = p->next) != &start_mm->mmlist) {
1016 mm = list_entry(p, struct mm_struct, mmlist); 1126 mm = list_entry(p, struct mm_struct, mmlist);
1017 if (!atomic_inc_not_zero(&mm->mm_users)) 1127 if (!atomic_inc_not_zero(&mm->mm_users))
@@ -1023,14 +1133,16 @@ static int try_to_unuse(unsigned int type)
1023 cond_resched(); 1133 cond_resched();
1024 1134
1025 swcount = *swap_map; 1135 swcount = *swap_map;
1026 if (swcount <= 1) 1136 if (!swap_count(swcount)) /* any usage ? */
1027 ; 1137 ;
1028 else if (mm == &init_mm) { 1138 else if (mm == &init_mm) {
1029 set_start_mm = 1; 1139 set_start_mm = 1;
1030 shmem = shmem_unuse(entry, page); 1140 shmem = shmem_unuse(entry, page);
1031 } else 1141 } else
1032 retval = unuse_mm(mm, entry, page); 1142 retval = unuse_mm(mm, entry, page);
1033 if (set_start_mm && *swap_map < swcount) { 1143
1144 if (set_start_mm &&
1145 swap_count(*swap_map) < swcount) {
1034 mmput(new_start_mm); 1146 mmput(new_start_mm);
1035 atomic_inc(&mm->mm_users); 1147 atomic_inc(&mm->mm_users);
1036 new_start_mm = mm; 1148 new_start_mm = mm;
@@ -1057,21 +1169,25 @@ static int try_to_unuse(unsigned int type)
1057 } 1169 }
1058 1170
1059 /* 1171 /*
1060 * How could swap count reach 0x7fff when the maximum 1172 * How could swap count reach 0x7ffe ?
1061 * pid is 0x7fff, and there's no way to repeat a swap 1173 * There's no way to repeat a swap page within an mm
1062 * page within an mm (except in shmem, where it's the 1174 * (except in shmem, where it's the shared object which takes
1063 * shared object which takes the reference count)? 1175 * the reference count)?
1064 * We believe SWAP_MAP_MAX cannot occur in Linux 2.4. 1176 * We believe SWAP_MAP_MAX cannot occur.(if occur, unsigned
1065 * 1177 * short is too small....)
1066 * If that's wrong, then we should worry more about 1178 * If that's wrong, then we should worry more about
1067 * exit_mmap() and do_munmap() cases described above: 1179 * exit_mmap() and do_munmap() cases described above:
1068 * we might be resetting SWAP_MAP_MAX too early here. 1180 * we might be resetting SWAP_MAP_MAX too early here.
1069 * We know "Undead"s can happen, they're okay, so don't 1181 * We know "Undead"s can happen, they're okay, so don't
1070 * report them; but do report if we reset SWAP_MAP_MAX. 1182 * report them; but do report if we reset SWAP_MAP_MAX.
1071 */ 1183 */
1072 if (*swap_map == SWAP_MAP_MAX) { 1184 /* We might release the lock_page() in unuse_mm(). */
1185 if (!PageSwapCache(page) || page_private(page) != entry.val)
1186 goto retry;
1187
1188 if (swap_count(*swap_map) == SWAP_MAP_MAX) {
1073 spin_lock(&swap_lock); 1189 spin_lock(&swap_lock);
1074 *swap_map = 1; 1190 *swap_map = encode_swapmap(0, true);
1075 spin_unlock(&swap_lock); 1191 spin_unlock(&swap_lock);
1076 reset_overflow = 1; 1192 reset_overflow = 1;
1077 } 1193 }
@@ -1089,7 +1205,8 @@ static int try_to_unuse(unsigned int type)
1089 * pages would be incorrect if swap supported "shared 1205 * pages would be incorrect if swap supported "shared
1090 * private" pages, but they are handled by tmpfs files. 1206 * private" pages, but they are handled by tmpfs files.
1091 */ 1207 */
1092 if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) { 1208 if (swap_count(*swap_map) &&
1209 PageDirty(page) && PageSwapCache(page)) {
1093 struct writeback_control wbc = { 1210 struct writeback_control wbc = {
1094 .sync_mode = WB_SYNC_NONE, 1211 .sync_mode = WB_SYNC_NONE,
1095 }; 1212 };
@@ -1116,6 +1233,7 @@ static int try_to_unuse(unsigned int type)
1116 * mark page dirty so shrink_page_list will preserve it. 1233 * mark page dirty so shrink_page_list will preserve it.
1117 */ 1234 */
1118 SetPageDirty(page); 1235 SetPageDirty(page);
1236retry:
1119 unlock_page(page); 1237 unlock_page(page);
1120 page_cache_release(page); 1238 page_cache_release(page);
1121 1239
@@ -1942,15 +2060,23 @@ void si_swapinfo(struct sysinfo *val)
1942 * 2060 *
1943 * Note: if swap_map[] reaches SWAP_MAP_MAX the entries are treated as 2061 * Note: if swap_map[] reaches SWAP_MAP_MAX the entries are treated as
1944 * "permanent", but will be reclaimed by the next swapoff. 2062 * "permanent", but will be reclaimed by the next swapoff.
2063 * Returns error code in following case.
2064 * - success -> 0
2065 * - swp_entry is invalid -> EINVAL
2066 * - swp_entry is migration entry -> EINVAL
2067 * - swap-cache reference is requested but there is already one. -> EEXIST
2068 * - swap-cache reference is requested but the entry is not used. -> ENOENT
1945 */ 2069 */
1946int swap_duplicate(swp_entry_t entry) 2070static int __swap_duplicate(swp_entry_t entry, bool cache)
1947{ 2071{
1948 struct swap_info_struct * p; 2072 struct swap_info_struct * p;
1949 unsigned long offset, type; 2073 unsigned long offset, type;
1950 int result = 0; 2074 int result = -EINVAL;
2075 int count;
2076 bool has_cache;
1951 2077
1952 if (is_migration_entry(entry)) 2078 if (is_migration_entry(entry))
1953 return 1; 2079 return -EINVAL;
1954 2080
1955 type = swp_type(entry); 2081 type = swp_type(entry);
1956 if (type >= nr_swapfiles) 2082 if (type >= nr_swapfiles)
@@ -1959,17 +2085,40 @@ int swap_duplicate(swp_entry_t entry)
1959 offset = swp_offset(entry); 2085 offset = swp_offset(entry);
1960 2086
1961 spin_lock(&swap_lock); 2087 spin_lock(&swap_lock);
1962 if (offset < p->max && p->swap_map[offset]) { 2088
1963 if (p->swap_map[offset] < SWAP_MAP_MAX - 1) { 2089 if (unlikely(offset >= p->max))
1964 p->swap_map[offset]++; 2090 goto unlock_out;
1965 result = 1; 2091
1966 } else if (p->swap_map[offset] <= SWAP_MAP_MAX) { 2092 count = swap_count(p->swap_map[offset]);
2093 has_cache = swap_has_cache(p->swap_map[offset]);
2094
2095 if (cache == SWAP_CACHE) { /* called for swapcache/swapin-readahead */
2096
2097 /* set SWAP_HAS_CACHE if there is no cache and entry is used */
2098 if (!has_cache && count) {
2099 p->swap_map[offset] = encode_swapmap(count, true);
2100 result = 0;
2101 } else if (has_cache) /* someone added cache */
2102 result = -EEXIST;
2103 else if (!count) /* no users */
2104 result = -ENOENT;
2105
2106 } else if (count || has_cache) {
2107 if (count < SWAP_MAP_MAX - 1) {
2108 p->swap_map[offset] = encode_swapmap(count + 1,
2109 has_cache);
2110 result = 0;
2111 } else if (count <= SWAP_MAP_MAX) {
1967 if (swap_overflow++ < 5) 2112 if (swap_overflow++ < 5)
1968 printk(KERN_WARNING "swap_dup: swap entry overflow\n"); 2113 printk(KERN_WARNING
1969 p->swap_map[offset] = SWAP_MAP_MAX; 2114 "swap_dup: swap entry overflow\n");
1970 result = 1; 2115 p->swap_map[offset] = encode_swapmap(SWAP_MAP_MAX,
2116 has_cache);
2117 result = 0;
1971 } 2118 }
1972 } 2119 } else
2120 result = -ENOENT; /* unused swap entry */
2121unlock_out:
1973 spin_unlock(&swap_lock); 2122 spin_unlock(&swap_lock);
1974out: 2123out:
1975 return result; 2124 return result;
@@ -1978,6 +2127,27 @@ bad_file:
1978 printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val); 2127 printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val);
1979 goto out; 2128 goto out;
1980} 2129}
2130/*
2131 * increase reference count of swap entry by 1.
2132 */
2133void swap_duplicate(swp_entry_t entry)
2134{
2135 __swap_duplicate(entry, SWAP_MAP);
2136}
2137
2138/*
2139 * @entry: swap entry for which we allocate swap cache.
2140 *
2141 * Called when allocating swap cache for exising swap entry,
2142 * This can return error codes. Returns 0 at success.
2143 * -EBUSY means there is a swap cache.
2144 * Note: return code is different from swap_duplicate().
2145 */
2146int swapcache_prepare(swp_entry_t entry)
2147{
2148 return __swap_duplicate(entry, SWAP_CACHE);
2149}
2150
1981 2151
1982struct swap_info_struct * 2152struct swap_info_struct *
1983get_swap_info_struct(unsigned type) 2153get_swap_info_struct(unsigned type)
@@ -2016,7 +2186,7 @@ int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
2016 /* Don't read in free or bad pages */ 2186 /* Don't read in free or bad pages */
2017 if (!si->swap_map[toff]) 2187 if (!si->swap_map[toff])
2018 break; 2188 break;
2019 if (si->swap_map[toff] == SWAP_MAP_BAD) 2189 if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD)
2020 break; 2190 break;
2021 } 2191 }
2022 /* Count contiguous allocated slots below our target */ 2192 /* Count contiguous allocated slots below our target */
@@ -2024,7 +2194,7 @@ int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
2024 /* Don't read in free or bad pages */ 2194 /* Don't read in free or bad pages */
2025 if (!si->swap_map[toff]) 2195 if (!si->swap_map[toff])
2026 break; 2196 break;
2027 if (si->swap_map[toff] == SWAP_MAP_BAD) 2197 if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD)
2028 break; 2198 break;
2029 } 2199 }
2030 spin_unlock(&swap_lock); 2200 spin_unlock(&swap_lock);
diff --git a/mm/truncate.c b/mm/truncate.c
index 12e1579f9165..ccc3ecf7cb98 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -267,8 +267,21 @@ void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
267} 267}
268EXPORT_SYMBOL(truncate_inode_pages); 268EXPORT_SYMBOL(truncate_inode_pages);
269 269
270unsigned long __invalidate_mapping_pages(struct address_space *mapping, 270/**
271 pgoff_t start, pgoff_t end, bool be_atomic) 271 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
272 * @mapping: the address_space which holds the pages to invalidate
273 * @start: the offset 'from' which to invalidate
274 * @end: the offset 'to' which to invalidate (inclusive)
275 *
276 * This function only removes the unlocked pages, if you want to
277 * remove all the pages of one inode, you must call truncate_inode_pages.
278 *
279 * invalidate_mapping_pages() will not block on IO activity. It will not
280 * invalidate pages which are dirty, locked, under writeback or mapped into
281 * pagetables.
282 */
283unsigned long invalidate_mapping_pages(struct address_space *mapping,
284 pgoff_t start, pgoff_t end)
272{ 285{
273 struct pagevec pvec; 286 struct pagevec pvec;
274 pgoff_t next = start; 287 pgoff_t next = start;
@@ -309,30 +322,10 @@ unlock:
309 break; 322 break;
310 } 323 }
311 pagevec_release(&pvec); 324 pagevec_release(&pvec);
312 if (likely(!be_atomic)) 325 cond_resched();
313 cond_resched();
314 } 326 }
315 return ret; 327 return ret;
316} 328}
317
318/**
319 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
320 * @mapping: the address_space which holds the pages to invalidate
321 * @start: the offset 'from' which to invalidate
322 * @end: the offset 'to' which to invalidate (inclusive)
323 *
324 * This function only removes the unlocked pages, if you want to
325 * remove all the pages of one inode, you must call truncate_inode_pages.
326 *
327 * invalidate_mapping_pages() will not block on IO activity. It will not
328 * invalidate pages which are dirty, locked, under writeback or mapped into
329 * pagetables.
330 */
331unsigned long invalidate_mapping_pages(struct address_space *mapping,
332 pgoff_t start, pgoff_t end)
333{
334 return __invalidate_mapping_pages(mapping, start, end, false);
335}
336EXPORT_SYMBOL(invalidate_mapping_pages); 329EXPORT_SYMBOL(invalidate_mapping_pages);
337 330
338/* 331/*
diff --git a/mm/util.c b/mm/util.c
index abc65aa7cdfc..d5d2213728c5 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -233,13 +233,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
233 * @pages: array that receives pointers to the pages pinned. 233 * @pages: array that receives pointers to the pages pinned.
234 * Should be at least nr_pages long. 234 * Should be at least nr_pages long.
235 * 235 *
236 * Attempt to pin user pages in memory without taking mm->mmap_sem.
237 * If not successful, it will fall back to taking the lock and
238 * calling get_user_pages().
239 *
240 * Returns number of pages pinned. This may be fewer than the number 236 * Returns number of pages pinned. This may be fewer than the number
241 * requested. If nr_pages is 0 or negative, returns 0. If no pages 237 * requested. If nr_pages is 0 or negative, returns 0. If no pages
242 * were pinned, returns -errno. 238 * were pinned, returns -errno.
239 *
240 * get_user_pages_fast provides equivalent functionality to get_user_pages,
241 * operating on current and current->mm, with force=0 and vma=NULL. However
242 * unlike get_user_pages, it must be called without mmap_sem held.
243 *
244 * get_user_pages_fast may take mmap_sem and page table locks, so no
245 * assumptions can be made about lack of locking. get_user_pages_fast is to be
246 * implemented in a way that is advantageous (vs get_user_pages()) when the
247 * user memory area is already faulted in and present in ptes. However if the
248 * pages have to be faulted in, it may turn out to be slightly slower so
249 * callers need to carefully consider what to use. On many architectures,
250 * get_user_pages_fast simply falls back to get_user_pages.
243 */ 251 */
244int __attribute__((weak)) get_user_pages_fast(unsigned long start, 252int __attribute__((weak)) get_user_pages_fast(unsigned long start,
245 int nr_pages, int write, struct page **pages) 253 int nr_pages, int write, struct page **pages)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 95c08a8cc2ba..4139aa52b941 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -470,8 +470,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page)
470 swp_entry_t swap = { .val = page_private(page) }; 470 swp_entry_t swap = { .val = page_private(page) };
471 __delete_from_swap_cache(page); 471 __delete_from_swap_cache(page);
472 spin_unlock_irq(&mapping->tree_lock); 472 spin_unlock_irq(&mapping->tree_lock);
473 mem_cgroup_uncharge_swapcache(page, swap); 473 swapcache_free(swap, page);
474 swap_free(swap);
475 } else { 474 } else {
476 __remove_from_page_cache(page); 475 __remove_from_page_cache(page);
477 spin_unlock_irq(&mapping->tree_lock); 476 spin_unlock_irq(&mapping->tree_lock);
@@ -514,7 +513,6 @@ int remove_mapping(struct address_space *mapping, struct page *page)
514 * 513 *
515 * lru_lock must not be held, interrupts must be enabled. 514 * lru_lock must not be held, interrupts must be enabled.
516 */ 515 */
517#ifdef CONFIG_UNEVICTABLE_LRU
518void putback_lru_page(struct page *page) 516void putback_lru_page(struct page *page)
519{ 517{
520 int lru; 518 int lru;
@@ -568,20 +566,6 @@ redo:
568 put_page(page); /* drop ref from isolate */ 566 put_page(page); /* drop ref from isolate */
569} 567}
570 568
571#else /* CONFIG_UNEVICTABLE_LRU */
572
573void putback_lru_page(struct page *page)
574{
575 int lru;
576 VM_BUG_ON(PageLRU(page));
577
578 lru = !!TestClearPageActive(page) + page_is_file_cache(page);
579 lru_cache_add_lru(page, lru);
580 put_page(page);
581}
582#endif /* CONFIG_UNEVICTABLE_LRU */
583
584
585/* 569/*
586 * shrink_page_list() returns the number of reclaimed pages 570 * shrink_page_list() returns the number of reclaimed pages
587 */ 571 */
@@ -593,6 +577,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
593 struct pagevec freed_pvec; 577 struct pagevec freed_pvec;
594 int pgactivate = 0; 578 int pgactivate = 0;
595 unsigned long nr_reclaimed = 0; 579 unsigned long nr_reclaimed = 0;
580 unsigned long vm_flags;
596 581
597 cond_resched(); 582 cond_resched();
598 583
@@ -643,7 +628,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
643 goto keep_locked; 628 goto keep_locked;
644 } 629 }
645 630
646 referenced = page_referenced(page, 1, sc->mem_cgroup); 631 referenced = page_referenced(page, 1,
632 sc->mem_cgroup, &vm_flags);
647 /* In active use or really unfreeable? Activate it. */ 633 /* In active use or really unfreeable? Activate it. */
648 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && 634 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
649 referenced && page_mapping_inuse(page)) 635 referenced && page_mapping_inuse(page))
@@ -943,18 +929,10 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
943 /* Check that we have not crossed a zone boundary. */ 929 /* Check that we have not crossed a zone boundary. */
944 if (unlikely(page_zone_id(cursor_page) != zone_id)) 930 if (unlikely(page_zone_id(cursor_page) != zone_id))
945 continue; 931 continue;
946 switch (__isolate_lru_page(cursor_page, mode, file)) { 932 if (__isolate_lru_page(cursor_page, mode, file) == 0) {
947 case 0:
948 list_move(&cursor_page->lru, dst); 933 list_move(&cursor_page->lru, dst);
949 nr_taken++; 934 nr_taken++;
950 scan++; 935 scan++;
951 break;
952
953 case -EBUSY:
954 /* else it is being freed elsewhere */
955 list_move(&cursor_page->lru, src);
956 default:
957 break; /* ! on LRU or wrong list */
958 } 936 }
959 } 937 }
960 } 938 }
@@ -1061,6 +1039,19 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1061 unsigned long nr_scanned = 0; 1039 unsigned long nr_scanned = 0;
1062 unsigned long nr_reclaimed = 0; 1040 unsigned long nr_reclaimed = 0;
1063 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1041 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1042 int lumpy_reclaim = 0;
1043
1044 /*
1045 * If we need a large contiguous chunk of memory, or have
1046 * trouble getting a small set of contiguous pages, we
1047 * will reclaim both active and inactive pages.
1048 *
1049 * We use the same threshold as pageout congestion_wait below.
1050 */
1051 if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
1052 lumpy_reclaim = 1;
1053 else if (sc->order && priority < DEF_PRIORITY - 2)
1054 lumpy_reclaim = 1;
1064 1055
1065 pagevec_init(&pvec, 1); 1056 pagevec_init(&pvec, 1);
1066 1057
@@ -1073,19 +1064,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1073 unsigned long nr_freed; 1064 unsigned long nr_freed;
1074 unsigned long nr_active; 1065 unsigned long nr_active;
1075 unsigned int count[NR_LRU_LISTS] = { 0, }; 1066 unsigned int count[NR_LRU_LISTS] = { 0, };
1076 int mode = ISOLATE_INACTIVE; 1067 int mode = lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE;
1077
1078 /*
1079 * If we need a large contiguous chunk of memory, or have
1080 * trouble getting a small set of contiguous pages, we
1081 * will reclaim both active and inactive pages.
1082 *
1083 * We use the same threshold as pageout congestion_wait below.
1084 */
1085 if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
1086 mode = ISOLATE_BOTH;
1087 else if (sc->order && priority < DEF_PRIORITY - 2)
1088 mode = ISOLATE_BOTH;
1089 1068
1090 nr_taken = sc->isolate_pages(sc->swap_cluster_max, 1069 nr_taken = sc->isolate_pages(sc->swap_cluster_max,
1091 &page_list, &nr_scan, sc->order, mode, 1070 &page_list, &nr_scan, sc->order, mode,
@@ -1122,7 +1101,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1122 * but that should be acceptable to the caller 1101 * but that should be acceptable to the caller
1123 */ 1102 */
1124 if (nr_freed < nr_taken && !current_is_kswapd() && 1103 if (nr_freed < nr_taken && !current_is_kswapd() &&
1125 sc->order > PAGE_ALLOC_COSTLY_ORDER) { 1104 lumpy_reclaim) {
1126 congestion_wait(WRITE, HZ/10); 1105 congestion_wait(WRITE, HZ/10);
1127 1106
1128 /* 1107 /*
@@ -1217,18 +1196,54 @@ static inline void note_zone_scanning_priority(struct zone *zone, int priority)
1217 * But we had to alter page->flags anyway. 1196 * But we had to alter page->flags anyway.
1218 */ 1197 */
1219 1198
1199static void move_active_pages_to_lru(struct zone *zone,
1200 struct list_head *list,
1201 enum lru_list lru)
1202{
1203 unsigned long pgmoved = 0;
1204 struct pagevec pvec;
1205 struct page *page;
1206
1207 pagevec_init(&pvec, 1);
1208
1209 while (!list_empty(list)) {
1210 page = lru_to_page(list);
1211 prefetchw_prev_lru_page(page, list, flags);
1212
1213 VM_BUG_ON(PageLRU(page));
1214 SetPageLRU(page);
1215
1216 VM_BUG_ON(!PageActive(page));
1217 if (!is_active_lru(lru))
1218 ClearPageActive(page); /* we are de-activating */
1219
1220 list_move(&page->lru, &zone->lru[lru].list);
1221 mem_cgroup_add_lru_list(page, lru);
1222 pgmoved++;
1223
1224 if (!pagevec_add(&pvec, page) || list_empty(list)) {
1225 spin_unlock_irq(&zone->lru_lock);
1226 if (buffer_heads_over_limit)
1227 pagevec_strip(&pvec);
1228 __pagevec_release(&pvec);
1229 spin_lock_irq(&zone->lru_lock);
1230 }
1231 }
1232 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1233 if (!is_active_lru(lru))
1234 __count_vm_events(PGDEACTIVATE, pgmoved);
1235}
1220 1236
1221static void shrink_active_list(unsigned long nr_pages, struct zone *zone, 1237static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1222 struct scan_control *sc, int priority, int file) 1238 struct scan_control *sc, int priority, int file)
1223{ 1239{
1224 unsigned long pgmoved; 1240 unsigned long pgmoved;
1225 int pgdeactivate = 0;
1226 unsigned long pgscanned; 1241 unsigned long pgscanned;
1242 unsigned long vm_flags;
1227 LIST_HEAD(l_hold); /* The pages which were snipped off */ 1243 LIST_HEAD(l_hold); /* The pages which were snipped off */
1244 LIST_HEAD(l_active);
1228 LIST_HEAD(l_inactive); 1245 LIST_HEAD(l_inactive);
1229 struct page *page; 1246 struct page *page;
1230 struct pagevec pvec;
1231 enum lru_list lru;
1232 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1247 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1233 1248
1234 lru_add_drain(); 1249 lru_add_drain();
@@ -1245,13 +1260,14 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1245 } 1260 }
1246 reclaim_stat->recent_scanned[!!file] += pgmoved; 1261 reclaim_stat->recent_scanned[!!file] += pgmoved;
1247 1262
1263 __count_zone_vm_events(PGREFILL, zone, pgscanned);
1248 if (file) 1264 if (file)
1249 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved); 1265 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved);
1250 else 1266 else
1251 __mod_zone_page_state(zone, NR_ACTIVE_ANON, -pgmoved); 1267 __mod_zone_page_state(zone, NR_ACTIVE_ANON, -pgmoved);
1252 spin_unlock_irq(&zone->lru_lock); 1268 spin_unlock_irq(&zone->lru_lock);
1253 1269
1254 pgmoved = 0; 1270 pgmoved = 0; /* count referenced (mapping) mapped pages */
1255 while (!list_empty(&l_hold)) { 1271 while (!list_empty(&l_hold)) {
1256 cond_resched(); 1272 cond_resched();
1257 page = lru_to_page(&l_hold); 1273 page = lru_to_page(&l_hold);
@@ -1264,58 +1280,44 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1264 1280
1265 /* page_referenced clears PageReferenced */ 1281 /* page_referenced clears PageReferenced */
1266 if (page_mapping_inuse(page) && 1282 if (page_mapping_inuse(page) &&
1267 page_referenced(page, 0, sc->mem_cgroup)) 1283 page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
1268 pgmoved++; 1284 pgmoved++;
1285 /*
1286 * Identify referenced, file-backed active pages and
1287 * give them one more trip around the active list. So
1288 * that executable code get better chances to stay in
1289 * memory under moderate memory pressure. Anon pages
1290 * are not likely to be evicted by use-once streaming
1291 * IO, plus JVM can create lots of anon VM_EXEC pages,
1292 * so we ignore them here.
1293 */
1294 if ((vm_flags & VM_EXEC) && !PageAnon(page)) {
1295 list_add(&page->lru, &l_active);
1296 continue;
1297 }
1298 }
1269 1299
1270 list_add(&page->lru, &l_inactive); 1300 list_add(&page->lru, &l_inactive);
1271 } 1301 }
1272 1302
1273 /* 1303 /*
1274 * Move the pages to the [file or anon] inactive list. 1304 * Move pages back to the lru list.
1275 */ 1305 */
1276 pagevec_init(&pvec, 1);
1277 lru = LRU_BASE + file * LRU_FILE;
1278
1279 spin_lock_irq(&zone->lru_lock); 1306 spin_lock_irq(&zone->lru_lock);
1280 /* 1307 /*
1281 * Count referenced pages from currently used mappings as 1308 * Count referenced pages from currently used mappings as rotated,
1282 * rotated, even though they are moved to the inactive list. 1309 * even though only some of them are actually re-activated. This
1283 * This helps balance scan pressure between file and anonymous 1310 * helps balance scan pressure between file and anonymous pages in
1284 * pages in get_scan_ratio. 1311 * get_scan_ratio.
1285 */ 1312 */
1286 reclaim_stat->recent_rotated[!!file] += pgmoved; 1313 reclaim_stat->recent_rotated[!!file] += pgmoved;
1287 1314
1288 pgmoved = 0; 1315 move_active_pages_to_lru(zone, &l_active,
1289 while (!list_empty(&l_inactive)) { 1316 LRU_ACTIVE + file * LRU_FILE);
1290 page = lru_to_page(&l_inactive); 1317 move_active_pages_to_lru(zone, &l_inactive,
1291 prefetchw_prev_lru_page(page, &l_inactive, flags); 1318 LRU_BASE + file * LRU_FILE);
1292 VM_BUG_ON(PageLRU(page));
1293 SetPageLRU(page);
1294 VM_BUG_ON(!PageActive(page));
1295 ClearPageActive(page);
1296 1319
1297 list_move(&page->lru, &zone->lru[lru].list);
1298 mem_cgroup_add_lru_list(page, lru);
1299 pgmoved++;
1300 if (!pagevec_add(&pvec, page)) {
1301 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1302 spin_unlock_irq(&zone->lru_lock);
1303 pgdeactivate += pgmoved;
1304 pgmoved = 0;
1305 if (buffer_heads_over_limit)
1306 pagevec_strip(&pvec);
1307 __pagevec_release(&pvec);
1308 spin_lock_irq(&zone->lru_lock);
1309 }
1310 }
1311 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1312 pgdeactivate += pgmoved;
1313 __count_zone_vm_events(PGREFILL, zone, pgscanned);
1314 __count_vm_events(PGDEACTIVATE, pgdeactivate);
1315 spin_unlock_irq(&zone->lru_lock); 1320 spin_unlock_irq(&zone->lru_lock);
1316 if (buffer_heads_over_limit)
1317 pagevec_strip(&pvec);
1318 pagevec_release(&pvec);
1319} 1321}
1320 1322
1321static int inactive_anon_is_low_global(struct zone *zone) 1323static int inactive_anon_is_low_global(struct zone *zone)
@@ -1350,12 +1352,48 @@ static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
1350 return low; 1352 return low;
1351} 1353}
1352 1354
1355static int inactive_file_is_low_global(struct zone *zone)
1356{
1357 unsigned long active, inactive;
1358
1359 active = zone_page_state(zone, NR_ACTIVE_FILE);
1360 inactive = zone_page_state(zone, NR_INACTIVE_FILE);
1361
1362 return (active > inactive);
1363}
1364
1365/**
1366 * inactive_file_is_low - check if file pages need to be deactivated
1367 * @zone: zone to check
1368 * @sc: scan control of this context
1369 *
1370 * When the system is doing streaming IO, memory pressure here
1371 * ensures that active file pages get deactivated, until more
1372 * than half of the file pages are on the inactive list.
1373 *
1374 * Once we get to that situation, protect the system's working
1375 * set from being evicted by disabling active file page aging.
1376 *
1377 * This uses a different ratio than the anonymous pages, because
1378 * the page cache uses a use-once replacement algorithm.
1379 */
1380static int inactive_file_is_low(struct zone *zone, struct scan_control *sc)
1381{
1382 int low;
1383
1384 if (scanning_global_lru(sc))
1385 low = inactive_file_is_low_global(zone);
1386 else
1387 low = mem_cgroup_inactive_file_is_low(sc->mem_cgroup);
1388 return low;
1389}
1390
1353static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, 1391static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1354 struct zone *zone, struct scan_control *sc, int priority) 1392 struct zone *zone, struct scan_control *sc, int priority)
1355{ 1393{
1356 int file = is_file_lru(lru); 1394 int file = is_file_lru(lru);
1357 1395
1358 if (lru == LRU_ACTIVE_FILE) { 1396 if (lru == LRU_ACTIVE_FILE && inactive_file_is_low(zone, sc)) {
1359 shrink_active_list(nr_to_scan, zone, sc, priority, file); 1397 shrink_active_list(nr_to_scan, zone, sc, priority, file);
1360 return 0; 1398 return 0;
1361 } 1399 }
@@ -1384,13 +1422,6 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1384 unsigned long ap, fp; 1422 unsigned long ap, fp;
1385 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1423 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1386 1424
1387 /* If we have no swap space, do not bother scanning anon pages. */
1388 if (!sc->may_swap || (nr_swap_pages <= 0)) {
1389 percent[0] = 0;
1390 percent[1] = 100;
1391 return;
1392 }
1393
1394 anon = zone_nr_pages(zone, sc, LRU_ACTIVE_ANON) + 1425 anon = zone_nr_pages(zone, sc, LRU_ACTIVE_ANON) +
1395 zone_nr_pages(zone, sc, LRU_INACTIVE_ANON); 1426 zone_nr_pages(zone, sc, LRU_INACTIVE_ANON);
1396 file = zone_nr_pages(zone, sc, LRU_ACTIVE_FILE) + 1427 file = zone_nr_pages(zone, sc, LRU_ACTIVE_FILE) +
@@ -1400,7 +1431,7 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1400 free = zone_page_state(zone, NR_FREE_PAGES); 1431 free = zone_page_state(zone, NR_FREE_PAGES);
1401 /* If we have very few page cache pages, 1432 /* If we have very few page cache pages,
1402 force-scan anon pages. */ 1433 force-scan anon pages. */
1403 if (unlikely(file + free <= zone->pages_high)) { 1434 if (unlikely(file + free <= high_wmark_pages(zone))) {
1404 percent[0] = 100; 1435 percent[0] = 100;
1405 percent[1] = 0; 1436 percent[1] = 0;
1406 return; 1437 return;
@@ -1455,6 +1486,26 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1455 percent[1] = 100 - percent[0]; 1486 percent[1] = 100 - percent[0];
1456} 1487}
1457 1488
1489/*
1490 * Smallish @nr_to_scan's are deposited in @nr_saved_scan,
1491 * until we collected @swap_cluster_max pages to scan.
1492 */
1493static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
1494 unsigned long *nr_saved_scan,
1495 unsigned long swap_cluster_max)
1496{
1497 unsigned long nr;
1498
1499 *nr_saved_scan += nr_to_scan;
1500 nr = *nr_saved_scan;
1501
1502 if (nr >= swap_cluster_max)
1503 *nr_saved_scan = 0;
1504 else
1505 nr = 0;
1506
1507 return nr;
1508}
1458 1509
1459/* 1510/*
1460 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. 1511 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
@@ -1468,26 +1519,30 @@ static void shrink_zone(int priority, struct zone *zone,
1468 enum lru_list l; 1519 enum lru_list l;
1469 unsigned long nr_reclaimed = sc->nr_reclaimed; 1520 unsigned long nr_reclaimed = sc->nr_reclaimed;
1470 unsigned long swap_cluster_max = sc->swap_cluster_max; 1521 unsigned long swap_cluster_max = sc->swap_cluster_max;
1522 int noswap = 0;
1471 1523
1472 get_scan_ratio(zone, sc, percent); 1524 /* If we have no swap space, do not bother scanning anon pages. */
1525 if (!sc->may_swap || (nr_swap_pages <= 0)) {
1526 noswap = 1;
1527 percent[0] = 0;
1528 percent[1] = 100;
1529 } else
1530 get_scan_ratio(zone, sc, percent);
1473 1531
1474 for_each_evictable_lru(l) { 1532 for_each_evictable_lru(l) {
1475 int file = is_file_lru(l); 1533 int file = is_file_lru(l);
1476 unsigned long scan; 1534 unsigned long scan;
1477 1535
1478 scan = zone_nr_pages(zone, sc, l); 1536 scan = zone_nr_pages(zone, sc, l);
1479 if (priority) { 1537 if (priority || noswap) {
1480 scan >>= priority; 1538 scan >>= priority;
1481 scan = (scan * percent[file]) / 100; 1539 scan = (scan * percent[file]) / 100;
1482 } 1540 }
1483 if (scanning_global_lru(sc)) { 1541 if (scanning_global_lru(sc))
1484 zone->lru[l].nr_scan += scan; 1542 nr[l] = nr_scan_try_batch(scan,
1485 nr[l] = zone->lru[l].nr_scan; 1543 &zone->lru[l].nr_saved_scan,
1486 if (nr[l] >= swap_cluster_max) 1544 swap_cluster_max);
1487 zone->lru[l].nr_scan = 0; 1545 else
1488 else
1489 nr[l] = 0;
1490 } else
1491 nr[l] = scan; 1546 nr[l] = scan;
1492 } 1547 }
1493 1548
@@ -1521,7 +1576,7 @@ static void shrink_zone(int priority, struct zone *zone,
1521 * Even if we did not try to evict anon pages at all, we want to 1576 * Even if we did not try to evict anon pages at all, we want to
1522 * rebalance the anon lru active/inactive ratio. 1577 * rebalance the anon lru active/inactive ratio.
1523 */ 1578 */
1524 if (inactive_anon_is_low(zone, sc)) 1579 if (inactive_anon_is_low(zone, sc) && nr_swap_pages > 0)
1525 shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0); 1580 shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
1526 1581
1527 throttle_vm_writeout(sc->gfp_mask); 1582 throttle_vm_writeout(sc->gfp_mask);
@@ -1532,11 +1587,13 @@ static void shrink_zone(int priority, struct zone *zone,
1532 * try to reclaim pages from zones which will satisfy the caller's allocation 1587 * try to reclaim pages from zones which will satisfy the caller's allocation
1533 * request. 1588 * request.
1534 * 1589 *
1535 * We reclaim from a zone even if that zone is over pages_high. Because: 1590 * We reclaim from a zone even if that zone is over high_wmark_pages(zone).
1591 * Because:
1536 * a) The caller may be trying to free *extra* pages to satisfy a higher-order 1592 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
1537 * allocation or 1593 * allocation or
1538 * b) The zones may be over pages_high but they must go *over* pages_high to 1594 * b) The target zone may be at high_wmark_pages(zone) but the lower zones
1539 * satisfy the `incremental min' zone defense algorithm. 1595 * must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
1596 * zone defense algorithm.
1540 * 1597 *
1541 * If a zone is deemed to be full of pinned pages then just give it a light 1598 * If a zone is deemed to be full of pinned pages then just give it a light
1542 * scan then give up on it. 1599 * scan then give up on it.
@@ -1742,7 +1799,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
1742 1799
1743/* 1800/*
1744 * For kswapd, balance_pgdat() will work across all this node's zones until 1801 * For kswapd, balance_pgdat() will work across all this node's zones until
1745 * they are all at pages_high. 1802 * they are all at high_wmark_pages(zone).
1746 * 1803 *
1747 * Returns the number of pages which were actually freed. 1804 * Returns the number of pages which were actually freed.
1748 * 1805 *
@@ -1755,11 +1812,11 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
1755 * the zone for when the problem goes away. 1812 * the zone for when the problem goes away.
1756 * 1813 *
1757 * kswapd scans the zones in the highmem->normal->dma direction. It skips 1814 * kswapd scans the zones in the highmem->normal->dma direction. It skips
1758 * zones which have free_pages > pages_high, but once a zone is found to have 1815 * zones which have free_pages > high_wmark_pages(zone), but once a zone is
1759 * free_pages <= pages_high, we scan that zone and the lower zones regardless 1816 * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the
1760 * of the number of free pages in the lower zones. This interoperates with 1817 * lower zones regardless of the number of free pages in the lower zones. This
1761 * the page allocator fallback scheme to ensure that aging of pages is balanced 1818 * interoperates with the page allocator fallback scheme to ensure that aging
1762 * across the zones. 1819 * of pages is balanced across the zones.
1763 */ 1820 */
1764static unsigned long balance_pgdat(pg_data_t *pgdat, int order) 1821static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
1765{ 1822{
@@ -1780,7 +1837,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
1780 }; 1837 };
1781 /* 1838 /*
1782 * temp_priority is used to remember the scanning priority at which 1839 * temp_priority is used to remember the scanning priority at which
1783 * this zone was successfully refilled to free_pages == pages_high. 1840 * this zone was successfully refilled to
1841 * free_pages == high_wmark_pages(zone).
1784 */ 1842 */
1785 int temp_priority[MAX_NR_ZONES]; 1843 int temp_priority[MAX_NR_ZONES];
1786 1844
@@ -1825,8 +1883,8 @@ loop_again:
1825 shrink_active_list(SWAP_CLUSTER_MAX, zone, 1883 shrink_active_list(SWAP_CLUSTER_MAX, zone,
1826 &sc, priority, 0); 1884 &sc, priority, 0);
1827 1885
1828 if (!zone_watermark_ok(zone, order, zone->pages_high, 1886 if (!zone_watermark_ok(zone, order,
1829 0, 0)) { 1887 high_wmark_pages(zone), 0, 0)) {
1830 end_zone = i; 1888 end_zone = i;
1831 break; 1889 break;
1832 } 1890 }
@@ -1860,8 +1918,8 @@ loop_again:
1860 priority != DEF_PRIORITY) 1918 priority != DEF_PRIORITY)
1861 continue; 1919 continue;
1862 1920
1863 if (!zone_watermark_ok(zone, order, zone->pages_high, 1921 if (!zone_watermark_ok(zone, order,
1864 end_zone, 0)) 1922 high_wmark_pages(zone), end_zone, 0))
1865 all_zones_ok = 0; 1923 all_zones_ok = 0;
1866 temp_priority[i] = priority; 1924 temp_priority[i] = priority;
1867 sc.nr_scanned = 0; 1925 sc.nr_scanned = 0;
@@ -1870,8 +1928,8 @@ loop_again:
1870 * We put equal pressure on every zone, unless one 1928 * We put equal pressure on every zone, unless one
1871 * zone has way too many pages free already. 1929 * zone has way too many pages free already.
1872 */ 1930 */
1873 if (!zone_watermark_ok(zone, order, 8*zone->pages_high, 1931 if (!zone_watermark_ok(zone, order,
1874 end_zone, 0)) 1932 8*high_wmark_pages(zone), end_zone, 0))
1875 shrink_zone(priority, zone, &sc); 1933 shrink_zone(priority, zone, &sc);
1876 reclaim_state->reclaimed_slab = 0; 1934 reclaim_state->reclaimed_slab = 0;
1877 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, 1935 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
@@ -2037,7 +2095,7 @@ void wakeup_kswapd(struct zone *zone, int order)
2037 return; 2095 return;
2038 2096
2039 pgdat = zone->zone_pgdat; 2097 pgdat = zone->zone_pgdat;
2040 if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0)) 2098 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
2041 return; 2099 return;
2042 if (pgdat->kswapd_max_order < order) 2100 if (pgdat->kswapd_max_order < order)
2043 pgdat->kswapd_max_order = order; 2101 pgdat->kswapd_max_order = order;
@@ -2084,11 +2142,11 @@ static void shrink_all_zones(unsigned long nr_pages, int prio,
2084 l == LRU_ACTIVE_FILE)) 2142 l == LRU_ACTIVE_FILE))
2085 continue; 2143 continue;
2086 2144
2087 zone->lru[l].nr_scan += (lru_pages >> prio) + 1; 2145 zone->lru[l].nr_saved_scan += (lru_pages >> prio) + 1;
2088 if (zone->lru[l].nr_scan >= nr_pages || pass > 3) { 2146 if (zone->lru[l].nr_saved_scan >= nr_pages || pass > 3) {
2089 unsigned long nr_to_scan; 2147 unsigned long nr_to_scan;
2090 2148
2091 zone->lru[l].nr_scan = 0; 2149 zone->lru[l].nr_saved_scan = 0;
2092 nr_to_scan = min(nr_pages, lru_pages); 2150 nr_to_scan = min(nr_pages, lru_pages);
2093 nr_reclaimed += shrink_list(l, nr_to_scan, zone, 2151 nr_reclaimed += shrink_list(l, nr_to_scan, zone,
2094 sc, prio); 2152 sc, prio);
@@ -2290,6 +2348,48 @@ int sysctl_min_unmapped_ratio = 1;
2290 */ 2348 */
2291int sysctl_min_slab_ratio = 5; 2349int sysctl_min_slab_ratio = 5;
2292 2350
2351static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
2352{
2353 unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
2354 unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
2355 zone_page_state(zone, NR_ACTIVE_FILE);
2356
2357 /*
2358 * It's possible for there to be more file mapped pages than
2359 * accounted for by the pages on the file LRU lists because
2360 * tmpfs pages accounted for as ANON can also be FILE_MAPPED
2361 */
2362 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
2363}
2364
2365/* Work out how many page cache pages we can reclaim in this reclaim_mode */
2366static long zone_pagecache_reclaimable(struct zone *zone)
2367{
2368 long nr_pagecache_reclaimable;
2369 long delta = 0;
2370
2371 /*
2372 * If RECLAIM_SWAP is set, then all file pages are considered
2373 * potentially reclaimable. Otherwise, we have to worry about
2374 * pages like swapcache and zone_unmapped_file_pages() provides
2375 * a better estimate
2376 */
2377 if (zone_reclaim_mode & RECLAIM_SWAP)
2378 nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
2379 else
2380 nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
2381
2382 /* If we can't clean pages, remove dirty pages from consideration */
2383 if (!(zone_reclaim_mode & RECLAIM_WRITE))
2384 delta += zone_page_state(zone, NR_FILE_DIRTY);
2385
2386 /* Watch for any possible underflows due to delta */
2387 if (unlikely(delta > nr_pagecache_reclaimable))
2388 delta = nr_pagecache_reclaimable;
2389
2390 return nr_pagecache_reclaimable - delta;
2391}
2392
2293/* 2393/*
2294 * Try to free up some pages from this zone through reclaim. 2394 * Try to free up some pages from this zone through reclaim.
2295 */ 2395 */
@@ -2324,9 +2424,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2324 reclaim_state.reclaimed_slab = 0; 2424 reclaim_state.reclaimed_slab = 0;
2325 p->reclaim_state = &reclaim_state; 2425 p->reclaim_state = &reclaim_state;
2326 2426
2327 if (zone_page_state(zone, NR_FILE_PAGES) - 2427 if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
2328 zone_page_state(zone, NR_FILE_MAPPED) >
2329 zone->min_unmapped_pages) {
2330 /* 2428 /*
2331 * Free memory by calling shrink zone with increasing 2429 * Free memory by calling shrink zone with increasing
2332 * priorities until we have enough memory freed. 2430 * priorities until we have enough memory freed.
@@ -2384,20 +2482,18 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2384 * if less than a specified percentage of the zone is used by 2482 * if less than a specified percentage of the zone is used by
2385 * unmapped file backed pages. 2483 * unmapped file backed pages.
2386 */ 2484 */
2387 if (zone_page_state(zone, NR_FILE_PAGES) - 2485 if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
2388 zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages 2486 zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
2389 && zone_page_state(zone, NR_SLAB_RECLAIMABLE) 2487 return ZONE_RECLAIM_FULL;
2390 <= zone->min_slab_pages)
2391 return 0;
2392 2488
2393 if (zone_is_all_unreclaimable(zone)) 2489 if (zone_is_all_unreclaimable(zone))
2394 return 0; 2490 return ZONE_RECLAIM_FULL;
2395 2491
2396 /* 2492 /*
2397 * Do not scan if the allocation should not be delayed. 2493 * Do not scan if the allocation should not be delayed.
2398 */ 2494 */
2399 if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC)) 2495 if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
2400 return 0; 2496 return ZONE_RECLAIM_NOSCAN;
2401 2497
2402 /* 2498 /*
2403 * Only run zone reclaim on the local zone or on zones that do not 2499 * Only run zone reclaim on the local zone or on zones that do not
@@ -2407,18 +2503,21 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2407 */ 2503 */
2408 node_id = zone_to_nid(zone); 2504 node_id = zone_to_nid(zone);
2409 if (node_state(node_id, N_CPU) && node_id != numa_node_id()) 2505 if (node_state(node_id, N_CPU) && node_id != numa_node_id())
2410 return 0; 2506 return ZONE_RECLAIM_NOSCAN;
2411 2507
2412 if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED)) 2508 if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
2413 return 0; 2509 return ZONE_RECLAIM_NOSCAN;
2510
2414 ret = __zone_reclaim(zone, gfp_mask, order); 2511 ret = __zone_reclaim(zone, gfp_mask, order);
2415 zone_clear_flag(zone, ZONE_RECLAIM_LOCKED); 2512 zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
2416 2513
2514 if (!ret)
2515 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
2516
2417 return ret; 2517 return ret;
2418} 2518}
2419#endif 2519#endif
2420 2520
2421#ifdef CONFIG_UNEVICTABLE_LRU
2422/* 2521/*
2423 * page_evictable - test whether a page is evictable 2522 * page_evictable - test whether a page is evictable
2424 * @page: the page to test 2523 * @page: the page to test
@@ -2665,4 +2764,3 @@ void scan_unevictable_unregister_node(struct node *node)
2665 sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages); 2764 sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
2666} 2765}
2667 2766
2668#endif
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 74d66dba0cbe..138bed53706e 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -629,10 +629,8 @@ static const char * const vmstat_text[] = {
629 "nr_active_anon", 629 "nr_active_anon",
630 "nr_inactive_file", 630 "nr_inactive_file",
631 "nr_active_file", 631 "nr_active_file",
632#ifdef CONFIG_UNEVICTABLE_LRU
633 "nr_unevictable", 632 "nr_unevictable",
634 "nr_mlock", 633 "nr_mlock",
635#endif
636 "nr_anon_pages", 634 "nr_anon_pages",
637 "nr_mapped", 635 "nr_mapped",
638 "nr_file_pages", 636 "nr_file_pages",
@@ -675,6 +673,9 @@ static const char * const vmstat_text[] = {
675 TEXTS_FOR_ZONES("pgscan_kswapd") 673 TEXTS_FOR_ZONES("pgscan_kswapd")
676 TEXTS_FOR_ZONES("pgscan_direct") 674 TEXTS_FOR_ZONES("pgscan_direct")
677 675
676#ifdef CONFIG_NUMA
677 "zone_reclaim_failed",
678#endif
678 "pginodesteal", 679 "pginodesteal",
679 "slabs_scanned", 680 "slabs_scanned",
680 "kswapd_steal", 681 "kswapd_steal",
@@ -687,7 +688,6 @@ static const char * const vmstat_text[] = {
687 "htlb_buddy_alloc_success", 688 "htlb_buddy_alloc_success",
688 "htlb_buddy_alloc_fail", 689 "htlb_buddy_alloc_fail",
689#endif 690#endif
690#ifdef CONFIG_UNEVICTABLE_LRU
691 "unevictable_pgs_culled", 691 "unevictable_pgs_culled",
692 "unevictable_pgs_scanned", 692 "unevictable_pgs_scanned",
693 "unevictable_pgs_rescued", 693 "unevictable_pgs_rescued",
@@ -697,7 +697,6 @@ static const char * const vmstat_text[] = {
697 "unevictable_pgs_stranded", 697 "unevictable_pgs_stranded",
698 "unevictable_pgs_mlockfreed", 698 "unevictable_pgs_mlockfreed",
699#endif 699#endif
700#endif
701}; 700};
702 701
703static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, 702static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
@@ -710,18 +709,14 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
710 "\n min %lu" 709 "\n min %lu"
711 "\n low %lu" 710 "\n low %lu"
712 "\n high %lu" 711 "\n high %lu"
713 "\n scanned %lu (aa: %lu ia: %lu af: %lu if: %lu)" 712 "\n scanned %lu"
714 "\n spanned %lu" 713 "\n spanned %lu"
715 "\n present %lu", 714 "\n present %lu",
716 zone_page_state(zone, NR_FREE_PAGES), 715 zone_page_state(zone, NR_FREE_PAGES),
717 zone->pages_min, 716 min_wmark_pages(zone),
718 zone->pages_low, 717 low_wmark_pages(zone),
719 zone->pages_high, 718 high_wmark_pages(zone),
720 zone->pages_scanned, 719 zone->pages_scanned,
721 zone->lru[LRU_ACTIVE_ANON].nr_scan,
722 zone->lru[LRU_INACTIVE_ANON].nr_scan,
723 zone->lru[LRU_ACTIVE_FILE].nr_scan,
724 zone->lru[LRU_INACTIVE_FILE].nr_scan,
725 zone->spanned_pages, 720 zone->spanned_pages,
726 zone->present_pages); 721 zone->present_pages);
727 722
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 8847add6ca16..5ed8931dfe98 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -124,7 +124,7 @@ svc_pool_map_choose_mode(void)
124{ 124{
125 unsigned int node; 125 unsigned int node;
126 126
127 if (num_online_nodes() > 1) { 127 if (nr_online_nodes > 1) {
128 /* 128 /*
129 * Actually have multiple NUMA nodes, 129 * Actually have multiple NUMA nodes,
130 * so split pools on NUMA node boundaries 130 * so split pools on NUMA node boundaries
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index 60dc0c48c929..3e733146cd51 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -13,7 +13,7 @@
13use strict; 13use strict;
14 14
15my $P = $0; 15my $P = $0;
16my $V = '0.15'; 16my $V = '0.16';
17 17
18use Getopt::Long qw(:config no_auto_abbrev); 18use Getopt::Long qw(:config no_auto_abbrev);
19 19
@@ -55,6 +55,10 @@ foreach my $chief (@penguin_chief) {
55} 55}
56my $penguin_chiefs = "\(" . join("|",@penguin_chief_names) . "\)"; 56my $penguin_chiefs = "\(" . join("|",@penguin_chief_names) . "\)";
57 57
58# rfc822 email address - preloaded methods go here.
59my $rfc822_lwsp = "(?:(?:\\r\\n)?[ \\t])";
60my $rfc822_char = '[\\000-\\377]';
61
58if (!GetOptions( 62if (!GetOptions(
59 'email!' => \$email, 63 'email!' => \$email,
60 'git!' => \$email_git, 64 'git!' => \$email_git,
@@ -161,7 +165,7 @@ foreach my $file (@ARGV) {
161 } 165 }
162 close(PATCH); 166 close(PATCH);
163 if ($file_cnt == @files) { 167 if ($file_cnt == @files) {
164 die "$P: file '${file}' doesn't appear to be a patch. " 168 warn "$P: file '${file}' doesn't appear to be a patch. "
165 . "Add -f to options?\n"; 169 . "Add -f to options?\n";
166 } 170 }
167 @files = sort_and_uniq(@files); 171 @files = sort_and_uniq(@files);
@@ -169,6 +173,7 @@ foreach my $file (@ARGV) {
169} 173}
170 174
171my @email_to = (); 175my @email_to = ();
176my @list_to = ();
172my @scm = (); 177my @scm = ();
173my @web = (); 178my @web = ();
174my @subsystem = (); 179my @subsystem = ();
@@ -182,7 +187,7 @@ foreach my $file (@files) {
182 187
183 my $exclude = 0; 188 my $exclude = 0;
184 foreach my $line (@typevalue) { 189 foreach my $line (@typevalue) {
185 if ($line =~ m/^(\C):(.*)/) { 190 if ($line =~ m/^(\C):\s*(.*)/) {
186 my $type = $1; 191 my $type = $1;
187 my $value = $2; 192 my $value = $2;
188 if ($type eq 'X') { 193 if ($type eq 'X') {
@@ -196,7 +201,7 @@ foreach my $file (@files) {
196 if (!$exclude) { 201 if (!$exclude) {
197 my $tvi = 0; 202 my $tvi = 0;
198 foreach my $line (@typevalue) { 203 foreach my $line (@typevalue) {
199 if ($line =~ m/^(\C):(.*)/) { 204 if ($line =~ m/^(\C):\s*(.*)/) {
200 my $type = $1; 205 my $type = $1;
201 my $value = $2; 206 my $value = $2;
202 if ($type eq 'F') { 207 if ($type eq 'F') {
@@ -215,29 +220,33 @@ foreach my $file (@files) {
215 220
216} 221}
217 222
218if ($email_git_penguin_chiefs) { 223if ($email) {
219 foreach my $chief (@penguin_chief) { 224 foreach my $chief (@penguin_chief) {
220 if ($chief =~ m/^(.*):(.*)/) { 225 if ($chief =~ m/^(.*):(.*)/) {
221 my $chief_name = $1; 226 my $email_address;
222 my $chief_addr = $2;
223 if ($email_usename) { 227 if ($email_usename) {
224 push(@email_to, format_email($chief_name, $chief_addr)); 228 $email_address = format_email($1, $2);
229 } else {
230 $email_address = $2;
231 }
232 if ($email_git_penguin_chiefs) {
233 push(@email_to, $email_address);
225 } else { 234 } else {
226 push(@email_to, $chief_addr); 235 @email_to = grep(!/${email_address}/, @email_to);
227 } 236 }
228 } 237 }
229 } 238 }
230} 239}
231 240
232if ($email) { 241if ($email || $email_list) {
233 my $address_cnt = @email_to; 242 my @to = ();
234 if ($address_cnt == 0 && $email_list) { 243 if ($email) {
235 push(@email_to, "linux-kernel\@vger.kernel.org"); 244 @to = (@to, @email_to);
236 } 245 }
237 246 if ($email_list) {
238#Don't sort email address list, but do remove duplicates 247 @to = (@to, @list_to);
239 @email_to = uniq(@email_to); 248 }
240 output(@email_to); 249 output(uniq(@to));
241} 250}
242 251
243if ($scm) { 252if ($scm) {
@@ -307,10 +316,10 @@ Output type options:
307 --multiline => print 1 entry per line 316 --multiline => print 1 entry per line
308 317
309Default options: 318Default options:
310 [--email --git --m --l --multiline] 319 [--email --git --m --n --l --multiline]
311 320
312Other options: 321Other options:
313 --version -> show version 322 --version => show version
314 --help => show this help information 323 --help => show this help information
315 324
316EOT 325EOT
@@ -347,6 +356,7 @@ sub format_email {
347 my ($name, $email) = @_; 356 my ($name, $email) = @_;
348 357
349 $name =~ s/^\s+|\s+$//g; 358 $name =~ s/^\s+|\s+$//g;
359 $name =~ s/^\"|\"$//g;
350 $email =~ s/^\s+|\s+$//g; 360 $email =~ s/^\s+|\s+$//g;
351 361
352 my $formatted_email = ""; 362 my $formatted_email = "";
@@ -366,36 +376,41 @@ sub add_categories {
366 $index = $index - 1; 376 $index = $index - 1;
367 while ($index >= 0) { 377 while ($index >= 0) {
368 my $tv = $typevalue[$index]; 378 my $tv = $typevalue[$index];
369 if ($tv =~ m/^(\C):(.*)/) { 379 if ($tv =~ m/^(\C):\s*(.*)/) {
370 my $ptype = $1; 380 my $ptype = $1;
371 my $pvalue = $2; 381 my $pvalue = $2;
372 if ($ptype eq "L") { 382 if ($ptype eq "L") {
373 my $subscr = $pvalue; 383 my $list_address = $pvalue;
374 if ($subscr =~ m/\s*\(subscribers-only\)/) { 384 my $list_additional = "";
385 if ($list_address =~ m/([^\s]+)\s+(.*)$/) {
386 $list_address = $1;
387 $list_additional = $2;
388 }
389 if ($list_additional =~ m/subscribers-only/) {
375 if ($email_subscriber_list) { 390 if ($email_subscriber_list) {
376 $subscr =~ s/\s*\(subscribers-only\)//g; 391 push(@list_to, $list_address);
377 push(@email_to, $subscr);
378 } 392 }
379 } else { 393 } else {
380 if ($email_list) { 394 if ($email_list) {
381 push(@email_to, $pvalue); 395 push(@list_to, $list_address);
382 } 396 }
383 } 397 }
384 } elsif ($ptype eq "M") { 398 } elsif ($ptype eq "M") {
385 if ($email_maintainer) { 399 my $p_used = 0;
386 if ($index >= 0) { 400 if ($index >= 0) {
387 my $tv = $typevalue[$index - 1]; 401 my $tv = $typevalue[$index - 1];
388 if ($tv =~ m/^(\C):(.*)/) { 402 if ($tv =~ m/^(\C):\s*(.*)/) {
389 if ($1 eq "P" && $email_usename) { 403 if ($1 eq "P") {
390 push(@email_to, format_email($2, $pvalue)); 404 if ($email_usename) {
391 } else { 405 push_email_address(format_email($2, $pvalue));
392 push(@email_to, $pvalue); 406 $p_used = 1;
393 } 407 }
394 } 408 }
395 } else {
396 push(@email_to, $pvalue);
397 } 409 }
398 } 410 }
411 if (!$p_used) {
412 push_email_addresses($pvalue);
413 }
399 } elsif ($ptype eq "T") { 414 } elsif ($ptype eq "T") {
400 push(@scm, $pvalue); 415 push(@scm, $pvalue);
401 } elsif ($ptype eq "W") { 416 } elsif ($ptype eq "W") {
@@ -412,10 +427,45 @@ sub add_categories {
412 } 427 }
413} 428}
414 429
430sub push_email_address {
431 my ($email_address) = @_;
432
433 my $email_name = "";
434 if ($email_address =~ m/([^<]+)<(.*\@.*)>$/) {
435 $email_name = $1;
436 $email_address = $2;
437 }
438
439 if ($email_maintainer) {
440 if ($email_usename && $email_name) {
441 push(@email_to, format_email($email_name, $email_address));
442 } else {
443 push(@email_to, $email_address);
444 }
445 }
446}
447
448sub push_email_addresses {
449 my ($address) = @_;
450
451 my @address_list = ();
452
453 if (rfc822_valid($address)) {
454 push_email_address($address);
455 } elsif (@address_list = rfc822_validlist($address)) {
456 my $array_count = shift(@address_list);
457 while (my $entry = shift(@address_list)) {
458 push_email_address($entry);
459 }
460 } else {
461 warn("Invalid MAINTAINERS address: '" . $address . "'\n");
462 }
463}
464
415sub which { 465sub which {
416 my ($bin) = @_; 466 my ($bin) = @_;
417 467
418 foreach my $path (split /:/, $ENV{PATH}) { 468 foreach my $path (split(/:/, $ENV{PATH})) {
419 if (-e "$path/$bin") { 469 if (-e "$path/$bin") {
420 return "$path/$bin"; 470 return "$path/$bin";
421 } 471 }
@@ -434,16 +484,21 @@ sub recent_git_signoffs {
434 my @lines = (); 484 my @lines = ();
435 485
436 if (which("git") eq "") { 486 if (which("git") eq "") {
437 die("$P: git not found. Add --nogit to options?\n"); 487 warn("$P: git not found. Add --nogit to options?\n");
488 return;
489 }
490 if (!(-d ".git")) {
491 warn("$P: .git directory not found. Use a git repository for better results.\n");
492 warn("$P: perhaps 'git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git'\n");
493 return;
438 } 494 }
439 495
440 $cmd = "git log --since=${email_git_since} -- ${file}"; 496 $cmd = "git log --since=${email_git_since} -- ${file}";
441 $cmd .= " | grep -Pi \"^[-_ a-z]+by:.*\\\@\""; 497 $cmd .= " | grep -Ei \"^[-_ a-z]+by:.*\\\@.*\$\"";
442 if (!$email_git_penguin_chiefs) { 498 if (!$email_git_penguin_chiefs) {
443 $cmd .= " | grep -Pv \"${penguin_chiefs}\""; 499 $cmd .= " | grep -Ev \"${penguin_chiefs}\"";
444 } 500 }
445 $cmd .= " | cut -f2- -d\":\""; 501 $cmd .= " | cut -f2- -d\":\"";
446 $cmd .= " | sed -e \"s/^\\s+//g\"";
447 $cmd .= " | sort | uniq -c | sort -rn"; 502 $cmd .= " | sort | uniq -c | sort -rn";
448 503
449 $output = `${cmd}`; 504 $output = `${cmd}`;
@@ -465,10 +520,6 @@ sub recent_git_signoffs {
465 if ($line =~ m/(.+)<(.+)>/) { 520 if ($line =~ m/(.+)<(.+)>/) {
466 my $git_name = $1; 521 my $git_name = $1;
467 my $git_addr = $2; 522 my $git_addr = $2;
468 $git_name =~ tr/^\"//;
469 $git_name =~ tr/^\\s*//;
470 $git_name =~ tr/\"$//;
471 $git_name =~ tr/\\s*$//;
472 if ($email_usename) { 523 if ($email_usename) {
473 push(@email_to, format_email($git_name, $git_addr)); 524 push(@email_to, format_email($git_name, $git_addr));
474 } else { 525 } else {
@@ -481,7 +532,6 @@ sub recent_git_signoffs {
481 push(@email_to, $line); 532 push(@email_to, $line);
482 } 533 }
483 } 534 }
484 return $output;
485} 535}
486 536
487sub uniq { 537sub uniq {
@@ -513,3 +563,97 @@ sub output {
513 print("\n"); 563 print("\n");
514 } 564 }
515} 565}
566
567my $rfc822re;
568
569sub make_rfc822re {
570# Basic lexical tokens are specials, domain_literal, quoted_string, atom, and
571# comment. We must allow for rfc822_lwsp (or comments) after each of these.
572# This regexp will only work on addresses which have had comments stripped
573# and replaced with rfc822_lwsp.
574
575 my $specials = '()<>@,;:\\\\".\\[\\]';
576 my $controls = '\\000-\\037\\177';
577
578 my $dtext = "[^\\[\\]\\r\\\\]";
579 my $domain_literal = "\\[(?:$dtext|\\\\.)*\\]$rfc822_lwsp*";
580
581 my $quoted_string = "\"(?:[^\\\"\\r\\\\]|\\\\.|$rfc822_lwsp)*\"$rfc822_lwsp*";
582
583# Use zero-width assertion to spot the limit of an atom. A simple
584# $rfc822_lwsp* causes the regexp engine to hang occasionally.
585 my $atom = "[^$specials $controls]+(?:$rfc822_lwsp+|\\Z|(?=[\\[\"$specials]))";
586 my $word = "(?:$atom|$quoted_string)";
587 my $localpart = "$word(?:\\.$rfc822_lwsp*$word)*";
588
589 my $sub_domain = "(?:$atom|$domain_literal)";
590 my $domain = "$sub_domain(?:\\.$rfc822_lwsp*$sub_domain)*";
591
592 my $addr_spec = "$localpart\@$rfc822_lwsp*$domain";
593
594 my $phrase = "$word*";
595 my $route = "(?:\@$domain(?:,\@$rfc822_lwsp*$domain)*:$rfc822_lwsp*)";
596 my $route_addr = "\\<$rfc822_lwsp*$route?$addr_spec\\>$rfc822_lwsp*";
597 my $mailbox = "(?:$addr_spec|$phrase$route_addr)";
598
599 my $group = "$phrase:$rfc822_lwsp*(?:$mailbox(?:,\\s*$mailbox)*)?;\\s*";
600 my $address = "(?:$mailbox|$group)";
601
602 return "$rfc822_lwsp*$address";
603}
604
605sub rfc822_strip_comments {
606 my $s = shift;
607# Recursively remove comments, and replace with a single space. The simpler
608# regexps in the Email Addressing FAQ are imperfect - they will miss escaped
609# chars in atoms, for example.
610
611 while ($s =~ s/^((?:[^"\\]|\\.)*
612 (?:"(?:[^"\\]|\\.)*"(?:[^"\\]|\\.)*)*)
613 \((?:[^()\\]|\\.)*\)/$1 /osx) {}
614 return $s;
615}
616
617# valid: returns true if the parameter is an RFC822 valid address
618#
619sub rfc822_valid ($) {
620 my $s = rfc822_strip_comments(shift);
621
622 if (!$rfc822re) {
623 $rfc822re = make_rfc822re();
624 }
625
626 return $s =~ m/^$rfc822re$/so && $s =~ m/^$rfc822_char*$/;
627}
628
629# validlist: In scalar context, returns true if the parameter is an RFC822
630# valid list of addresses.
631#
632# In list context, returns an empty list on failure (an invalid
633# address was found); otherwise a list whose first element is the
634# number of addresses found and whose remaining elements are the
635# addresses. This is needed to disambiguate failure (invalid)
636# from success with no addresses found, because an empty string is
637# a valid list.
638
639sub rfc822_validlist ($) {
640 my $s = rfc822_strip_comments(shift);
641
642 if (!$rfc822re) {
643 $rfc822re = make_rfc822re();
644 }
645 # * null list items are valid according to the RFC
646 # * the '1' business is to aid in distinguishing failure from no results
647
648 my @r;
649 if ($s =~ m/^(?:$rfc822re)?(?:,(?:$rfc822re)?)*$/so &&
650 $s =~ m/^$rfc822_char*$/) {
651 while ($s =~ m/(?:^|,$rfc822_lwsp*)($rfc822re)/gos) {
652 push @r, $1;
653 }
654 return wantarray ? (scalar(@r), @r) : 1;
655 }
656 else {
657 return wantarray ? () : 0;
658 }
659}
diff --git a/scripts/gfp-translate b/scripts/gfp-translate
new file mode 100644
index 000000000000..073cb6d152a0
--- /dev/null
+++ b/scripts/gfp-translate
@@ -0,0 +1,81 @@
1#!/bin/bash
2# Translate the bits making up a GFP mask
3# (c) 2009, Mel Gorman <mel@csn.ul.ie>
4# Licensed under the terms of the GNU GPL License version 2
5SOURCE=
6GFPMASK=none
7
8# Helper function to report failures and exit
9die() {
10 echo ERROR: $@
11 if [ "$TMPFILE" != "" ]; then
12 rm -f $TMPFILE
13 fi
14 exit -1
15}
16
17usage() {
18 echo "usage: gfp-translate [-h] [ --source DIRECTORY ] gfpmask"
19 exit 0
20}
21
22# Parse command-line arguements
23while [ $# -gt 0 ]; do
24 case $1 in
25 --source)
26 SOURCE=$2
27 shift 2
28 ;;
29 -h)
30 usage
31 ;;
32 --help)
33 usage
34 ;;
35 *)
36 GFPMASK=$1
37 shift
38 ;;
39 esac
40done
41
42# Guess the kernel source directory if it's not set. Preference is in order of
43# o current directory
44# o /usr/src/linux
45if [ "$SOURCE" = "" ]; then
46 if [ -r "/usr/src/linux/Makefile" ]; then
47 SOURCE=/usr/src/linux
48 fi
49 if [ -r "`pwd`/Makefile" ]; then
50 SOURCE=`pwd`
51 fi
52fi
53
54# Confirm that a source directory exists
55if [ ! -r "$SOURCE/Makefile" ]; then
56 die "Could not locate kernel source directory or it is invalid"
57fi
58
59# Confirm that a GFP mask has been specified
60if [ "$GFPMASK" = "none" ]; then
61 usage
62fi
63
64# Extract GFP flags from the kernel source
65TMPFILE=`mktemp -t gfptranslate-XXXXXX` || exit 1
66grep "^#define __GFP" $SOURCE/include/linux/gfp.h | sed -e 's/(__force gfp_t)//' | sed -e 's/u)/)/' | grep -v GFP_BITS | sed -e 's/)\//) \//' > $TMPFILE
67
68# Parse the flags
69IFS="
70"
71echo Source: $SOURCE
72echo Parsing: $GFPMASK
73for LINE in `cat $TMPFILE`; do
74 MASK=`echo $LINE | awk '{print $3}'`
75 if [ $(($GFPMASK&$MASK)) -ne 0 ]; then
76 echo $LINE
77 fi
78done
79
80rm -f $TMPFILE
81exit 0
diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
index 6aa2a2483f8d..64f5ddb09ea6 100644
--- a/scripts/pnmtologo.c
+++ b/scripts/pnmtologo.c
@@ -237,22 +237,22 @@ static void write_header(void)
237 fprintf(out, " * Linux logo %s\n", logoname); 237 fprintf(out, " * Linux logo %s\n", logoname);
238 fputs(" */\n\n", out); 238 fputs(" */\n\n", out);
239 fputs("#include <linux/linux_logo.h>\n\n", out); 239 fputs("#include <linux/linux_logo.h>\n\n", out);
240 fprintf(out, "static unsigned char %s_data[] __initdata = {\n", 240 fprintf(out, "static const unsigned char %s_data[] __initconst = {\n",
241 logoname); 241 logoname);
242} 242}
243 243
244static void write_footer(void) 244static void write_footer(void)
245{ 245{
246 fputs("\n};\n\n", out); 246 fputs("\n};\n\n", out);
247 fprintf(out, "struct linux_logo %s __initdata = {\n", logoname); 247 fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
248 fprintf(out, " .type\t= %s,\n", logo_types[logo_type]); 248 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
249 fprintf(out, " .width\t= %d,\n", logo_width); 249 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
250 fprintf(out, " .height\t= %d,\n", logo_height); 250 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
251 if (logo_type == LINUX_LOGO_CLUT224) { 251 if (logo_type == LINUX_LOGO_CLUT224) {
252 fprintf(out, " .clutsize\t= %d,\n", logo_clutsize); 252 fprintf(out, "\t.clutsize\t= %d,\n", logo_clutsize);
253 fprintf(out, " .clut\t= %s_clut,\n", logoname); 253 fprintf(out, "\t.clut\t\t= %s_clut,\n", logoname);
254 } 254 }
255 fprintf(out, " .data\t= %s_data\n", logoname); 255 fprintf(out, "\t.data\t\t= %s_data\n", logoname);
256 fputs("};\n\n", out); 256 fputs("};\n\n", out);
257 257
258 /* close logo file */ 258 /* close logo file */
@@ -374,7 +374,7 @@ static void write_logo_clut224(void)
374 fputs("\n};\n\n", out); 374 fputs("\n};\n\n", out);
375 375
376 /* write logo clut */ 376 /* write logo clut */
377 fprintf(out, "static unsigned char %s_clut[] __initdata = {\n", 377 fprintf(out, "static const unsigned char %s_clut[] __initconst = {\n",
378 logoname); 378 logoname);
379 write_hex_cnt = 0; 379 write_hex_cnt = 0;
380 for (i = 0; i < logo_clutsize; i++) { 380 for (i = 0; i < logo_clutsize; i++) {
diff --git a/sound/drivers/pcsp/pcsp.h b/sound/drivers/pcsp/pcsp.h
index cdef2664218f..174dd2ff0f22 100644
--- a/sound/drivers/pcsp/pcsp.h
+++ b/sound/drivers/pcsp/pcsp.h
@@ -10,6 +10,7 @@
10#define __PCSP_H__ 10#define __PCSP_H__
11 11
12#include <linux/hrtimer.h> 12#include <linux/hrtimer.h>
13#include <linux/timex.h>
13#if defined(CONFIG_MIPS) || defined(CONFIG_X86) 14#if defined(CONFIG_MIPS) || defined(CONFIG_X86)
14/* Use the global PIT lock ! */ 15/* Use the global PIT lock ! */
15#include <asm/i8253.h> 16#include <asm/i8253.h>
diff --git a/sound/oss/pas2_pcm.c b/sound/oss/pas2_pcm.c
index 36c3ea62086b..8f7d175767a2 100644
--- a/sound/oss/pas2_pcm.c
+++ b/sound/oss/pas2_pcm.c
@@ -17,7 +17,7 @@
17 17
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/spinlock.h> 19#include <linux/spinlock.h>
20#include <asm/timex.h> 20#include <linux/timex.h>
21#include "sound_config.h" 21#include "sound_config.h"
22 22
23#include "pas2.h" 23#include "pas2.h"