diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-16 22:50:13 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-16 22:50:13 -0400 |
commit | 517d08699b250021303f9a7cf0d758b6dc0748ed (patch) | |
tree | 5e5b0134c3fffb78fe9d8b1641a64ff28fdd7bbc /fs | |
parent | 8eeee4e2f04fc551f50c9d9847da2d73d7d33728 (diff) | |
parent | a34601c5d84134055782ee031d58d82f5440e918 (diff) |
Merge branch 'akpm'
* akpm: (182 commits)
fbdev: bf54x-lq043fb: use kzalloc over kmalloc/memset
fbdev: *bfin*: fix __dev{init,exit} markings
fbdev: *bfin*: drop unnecessary calls to memset
fbdev: bfin-t350mcqb-fb: drop unused local variables
fbdev: blackfin has __raw I/O accessors, so use them in fb.h
fbdev: s1d13xxxfb: add accelerated bitblt functions
tcx: use standard fields for framebuffer physical address and length
fbdev: add support for handoff from firmware to hw framebuffers
intelfb: fix a bug when changing video timing
fbdev: use framebuffer_release() for freeing fb_info structures
radeon: P2G2CLK_ALWAYS_ONb tested twice, should 2nd be P2G2CLK_DAC_ALWAYS_ONb?
s3c-fb: CPUFREQ frequency scaling support
s3c-fb: fix resource releasing on error during probing
carminefb: fix possible access beyond end of carmine_modedb[]
acornfb: remove fb_mmap function
mb862xxfb: use CONFIG_OF instead of CONFIG_PPC_OF
mb862xxfb: restrict compliation of platform driver to PPC
Samsung SoC Framebuffer driver: add Alpha Channel support
atmel-lcdc: fix pixclock upper bound detection
offb: use framebuffer_alloc() to allocate fb_info struct
...
Manually fix up conflicts due to kmemcheck in mm/slab.c
Diffstat (limited to 'fs')
-rw-r--r-- | fs/Kconfig | 14 | ||||
-rw-r--r-- | fs/drop_caches.c | 2 | ||||
-rw-r--r-- | fs/fs-writeback.c | 6 | ||||
-rw-r--r-- | fs/nfs/iostat.h | 6 | ||||
-rw-r--r-- | fs/ntfs/inode.c | 3 | ||||
-rw-r--r-- | fs/ntfs/logfile.c | 3 | ||||
-rw-r--r-- | fs/proc/base.c | 19 | ||||
-rw-r--r-- | fs/proc/meminfo.c | 4 | ||||
-rw-r--r-- | fs/proc/page.c | 162 | ||||
-rw-r--r-- | fs/select.c | 40 |
10 files changed, 197 insertions, 62 deletions
diff --git a/fs/Kconfig b/fs/Kconfig index 525da2e8f73b..4044f163035f 100644 --- a/fs/Kconfig +++ b/fs/Kconfig | |||
@@ -39,6 +39,13 @@ config FS_POSIX_ACL | |||
39 | bool | 39 | bool |
40 | default n | 40 | default n |
41 | 41 | ||
42 | source "fs/xfs/Kconfig" | ||
43 | source "fs/gfs2/Kconfig" | ||
44 | source "fs/ocfs2/Kconfig" | ||
45 | source "fs/btrfs/Kconfig" | ||
46 | |||
47 | endif # BLOCK | ||
48 | |||
42 | config FILE_LOCKING | 49 | config FILE_LOCKING |
43 | bool "Enable POSIX file locking API" if EMBEDDED | 50 | bool "Enable POSIX file locking API" if EMBEDDED |
44 | default y | 51 | default y |
@@ -47,13 +54,6 @@ config FILE_LOCKING | |||
47 | for filesystems like NFS and for the flock() system | 54 | for filesystems like NFS and for the flock() system |
48 | call. Disabling this option saves about 11k. | 55 | call. Disabling this option saves about 11k. |
49 | 56 | ||
50 | source "fs/xfs/Kconfig" | ||
51 | source "fs/gfs2/Kconfig" | ||
52 | source "fs/ocfs2/Kconfig" | ||
53 | source "fs/btrfs/Kconfig" | ||
54 | |||
55 | endif # BLOCK | ||
56 | |||
57 | source "fs/notify/Kconfig" | 57 | source "fs/notify/Kconfig" |
58 | 58 | ||
59 | source "fs/quota/Kconfig" | 59 | source "fs/quota/Kconfig" |
diff --git a/fs/drop_caches.c b/fs/drop_caches.c index b6a719a909f8..a2edb7913447 100644 --- a/fs/drop_caches.c +++ b/fs/drop_caches.c | |||
@@ -24,7 +24,7 @@ static void drop_pagecache_sb(struct super_block *sb) | |||
24 | continue; | 24 | continue; |
25 | __iget(inode); | 25 | __iget(inode); |
26 | spin_unlock(&inode_lock); | 26 | spin_unlock(&inode_lock); |
27 | __invalidate_mapping_pages(inode->i_mapping, 0, -1, true); | 27 | invalidate_mapping_pages(inode->i_mapping, 0, -1); |
28 | iput(toput_inode); | 28 | iput(toput_inode); |
29 | toput_inode = inode; | 29 | toput_inode = inode; |
30 | spin_lock(&inode_lock); | 30 | spin_lock(&inode_lock); |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 40308e98c6a4..caf049146ca2 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -321,7 +321,7 @@ __sync_single_inode(struct inode *inode, struct writeback_control *wbc) | |||
321 | 321 | ||
322 | spin_lock(&inode_lock); | 322 | spin_lock(&inode_lock); |
323 | inode->i_state &= ~I_SYNC; | 323 | inode->i_state &= ~I_SYNC; |
324 | if (!(inode->i_state & I_FREEING)) { | 324 | if (!(inode->i_state & (I_FREEING | I_CLEAR))) { |
325 | if (!(inode->i_state & I_DIRTY) && | 325 | if (!(inode->i_state & I_DIRTY) && |
326 | mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { | 326 | mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { |
327 | /* | 327 | /* |
@@ -492,7 +492,7 @@ void generic_sync_sb_inodes(struct super_block *sb, | |||
492 | break; | 492 | break; |
493 | } | 493 | } |
494 | 494 | ||
495 | if (inode->i_state & I_NEW) { | 495 | if (inode->i_state & (I_NEW | I_WILL_FREE)) { |
496 | requeue_io(inode); | 496 | requeue_io(inode); |
497 | continue; | 497 | continue; |
498 | } | 498 | } |
@@ -523,7 +523,7 @@ void generic_sync_sb_inodes(struct super_block *sb, | |||
523 | if (current_is_pdflush() && !writeback_acquire(bdi)) | 523 | if (current_is_pdflush() && !writeback_acquire(bdi)) |
524 | break; | 524 | break; |
525 | 525 | ||
526 | BUG_ON(inode->i_state & I_FREEING); | 526 | BUG_ON(inode->i_state & (I_FREEING | I_CLEAR)); |
527 | __iget(inode); | 527 | __iget(inode); |
528 | pages_skipped = wbc->pages_skipped; | 528 | pages_skipped = wbc->pages_skipped; |
529 | __writeback_single_inode(inode, wbc); | 529 | __writeback_single_inode(inode, wbc); |
diff --git a/fs/nfs/iostat.h b/fs/nfs/iostat.h index a2ab2529b5ca..ceda50aad73c 100644 --- a/fs/nfs/iostat.h +++ b/fs/nfs/iostat.h | |||
@@ -31,7 +31,7 @@ static inline void nfs_inc_server_stats(const struct nfs_server *server, | |||
31 | cpu = get_cpu(); | 31 | cpu = get_cpu(); |
32 | iostats = per_cpu_ptr(server->io_stats, cpu); | 32 | iostats = per_cpu_ptr(server->io_stats, cpu); |
33 | iostats->events[stat]++; | 33 | iostats->events[stat]++; |
34 | put_cpu_no_resched(); | 34 | put_cpu(); |
35 | } | 35 | } |
36 | 36 | ||
37 | static inline void nfs_inc_stats(const struct inode *inode, | 37 | static inline void nfs_inc_stats(const struct inode *inode, |
@@ -50,7 +50,7 @@ static inline void nfs_add_server_stats(const struct nfs_server *server, | |||
50 | cpu = get_cpu(); | 50 | cpu = get_cpu(); |
51 | iostats = per_cpu_ptr(server->io_stats, cpu); | 51 | iostats = per_cpu_ptr(server->io_stats, cpu); |
52 | iostats->bytes[stat] += addend; | 52 | iostats->bytes[stat] += addend; |
53 | put_cpu_no_resched(); | 53 | put_cpu(); |
54 | } | 54 | } |
55 | 55 | ||
56 | static inline void nfs_add_stats(const struct inode *inode, | 56 | static inline void nfs_add_stats(const struct inode *inode, |
@@ -71,7 +71,7 @@ static inline void nfs_add_fscache_stats(struct inode *inode, | |||
71 | cpu = get_cpu(); | 71 | cpu = get_cpu(); |
72 | iostats = per_cpu_ptr(NFS_SERVER(inode)->io_stats, cpu); | 72 | iostats = per_cpu_ptr(NFS_SERVER(inode)->io_stats, cpu); |
73 | iostats->fscache[stat] += addend; | 73 | iostats->fscache[stat] += addend; |
74 | put_cpu_no_resched(); | 74 | put_cpu(); |
75 | } | 75 | } |
76 | #endif | 76 | #endif |
77 | 77 | ||
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c index 82c5085559c6..9938034762cc 100644 --- a/fs/ntfs/inode.c +++ b/fs/ntfs/inode.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/pagemap.h> | 27 | #include <linux/pagemap.h> |
28 | #include <linux/quotaops.h> | 28 | #include <linux/quotaops.h> |
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <linux/log2.h> | ||
30 | 31 | ||
31 | #include "aops.h" | 32 | #include "aops.h" |
32 | #include "attrib.h" | 33 | #include "attrib.h" |
@@ -1570,7 +1571,7 @@ static int ntfs_read_locked_index_inode(struct inode *base_vi, struct inode *vi) | |||
1570 | ntfs_debug("Index collation rule is 0x%x.", | 1571 | ntfs_debug("Index collation rule is 0x%x.", |
1571 | le32_to_cpu(ir->collation_rule)); | 1572 | le32_to_cpu(ir->collation_rule)); |
1572 | ni->itype.index.block_size = le32_to_cpu(ir->index_block_size); | 1573 | ni->itype.index.block_size = le32_to_cpu(ir->index_block_size); |
1573 | if (ni->itype.index.block_size & (ni->itype.index.block_size - 1)) { | 1574 | if (!is_power_of_2(ni->itype.index.block_size)) { |
1574 | ntfs_error(vi->i_sb, "Index block size (%u) is not a power of " | 1575 | ntfs_error(vi->i_sb, "Index block size (%u) is not a power of " |
1575 | "two.", ni->itype.index.block_size); | 1576 | "two.", ni->itype.index.block_size); |
1576 | goto unm_err_out; | 1577 | goto unm_err_out; |
diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c index d7932e95b1fd..89b02985c054 100644 --- a/fs/ntfs/logfile.c +++ b/fs/ntfs/logfile.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/highmem.h> | 26 | #include <linux/highmem.h> |
27 | #include <linux/buffer_head.h> | 27 | #include <linux/buffer_head.h> |
28 | #include <linux/bitops.h> | 28 | #include <linux/bitops.h> |
29 | #include <linux/log2.h> | ||
29 | 30 | ||
30 | #include "attrib.h" | 31 | #include "attrib.h" |
31 | #include "aops.h" | 32 | #include "aops.h" |
@@ -65,7 +66,7 @@ static bool ntfs_check_restart_page_header(struct inode *vi, | |||
65 | logfile_log_page_size < NTFS_BLOCK_SIZE || | 66 | logfile_log_page_size < NTFS_BLOCK_SIZE || |
66 | logfile_system_page_size & | 67 | logfile_system_page_size & |
67 | (logfile_system_page_size - 1) || | 68 | (logfile_system_page_size - 1) || |
68 | logfile_log_page_size & (logfile_log_page_size - 1)) { | 69 | !is_power_of_2(logfile_log_page_size)) { |
69 | ntfs_error(vi->i_sb, "$LogFile uses unsupported page size."); | 70 | ntfs_error(vi->i_sb, "$LogFile uses unsupported page size."); |
70 | return false; | 71 | return false; |
71 | } | 72 | } |
diff --git a/fs/proc/base.c b/fs/proc/base.c index 1539e630c47d..3ce5ae9e3d2d 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -1006,7 +1006,12 @@ static ssize_t oom_adjust_read(struct file *file, char __user *buf, | |||
1006 | 1006 | ||
1007 | if (!task) | 1007 | if (!task) |
1008 | return -ESRCH; | 1008 | return -ESRCH; |
1009 | oom_adjust = task->oomkilladj; | 1009 | task_lock(task); |
1010 | if (task->mm) | ||
1011 | oom_adjust = task->mm->oom_adj; | ||
1012 | else | ||
1013 | oom_adjust = OOM_DISABLE; | ||
1014 | task_unlock(task); | ||
1010 | put_task_struct(task); | 1015 | put_task_struct(task); |
1011 | 1016 | ||
1012 | len = snprintf(buffer, sizeof(buffer), "%i\n", oom_adjust); | 1017 | len = snprintf(buffer, sizeof(buffer), "%i\n", oom_adjust); |
@@ -1035,11 +1040,19 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf, | |||
1035 | task = get_proc_task(file->f_path.dentry->d_inode); | 1040 | task = get_proc_task(file->f_path.dentry->d_inode); |
1036 | if (!task) | 1041 | if (!task) |
1037 | return -ESRCH; | 1042 | return -ESRCH; |
1038 | if (oom_adjust < task->oomkilladj && !capable(CAP_SYS_RESOURCE)) { | 1043 | task_lock(task); |
1044 | if (!task->mm) { | ||
1045 | task_unlock(task); | ||
1046 | put_task_struct(task); | ||
1047 | return -EINVAL; | ||
1048 | } | ||
1049 | if (oom_adjust < task->mm->oom_adj && !capable(CAP_SYS_RESOURCE)) { | ||
1050 | task_unlock(task); | ||
1039 | put_task_struct(task); | 1051 | put_task_struct(task); |
1040 | return -EACCES; | 1052 | return -EACCES; |
1041 | } | 1053 | } |
1042 | task->oomkilladj = oom_adjust; | 1054 | task->mm->oom_adj = oom_adjust; |
1055 | task_unlock(task); | ||
1043 | put_task_struct(task); | 1056 | put_task_struct(task); |
1044 | if (end - buffer == 0) | 1057 | if (end - buffer == 0) |
1045 | return -EIO; | 1058 | return -EIO; |
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index c6b0302af4c4..d5c410d47fae 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c | |||
@@ -64,10 +64,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v) | |||
64 | "Inactive(anon): %8lu kB\n" | 64 | "Inactive(anon): %8lu kB\n" |
65 | "Active(file): %8lu kB\n" | 65 | "Active(file): %8lu kB\n" |
66 | "Inactive(file): %8lu kB\n" | 66 | "Inactive(file): %8lu kB\n" |
67 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
68 | "Unevictable: %8lu kB\n" | 67 | "Unevictable: %8lu kB\n" |
69 | "Mlocked: %8lu kB\n" | 68 | "Mlocked: %8lu kB\n" |
70 | #endif | ||
71 | #ifdef CONFIG_HIGHMEM | 69 | #ifdef CONFIG_HIGHMEM |
72 | "HighTotal: %8lu kB\n" | 70 | "HighTotal: %8lu kB\n" |
73 | "HighFree: %8lu kB\n" | 71 | "HighFree: %8lu kB\n" |
@@ -109,10 +107,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v) | |||
109 | K(pages[LRU_INACTIVE_ANON]), | 107 | K(pages[LRU_INACTIVE_ANON]), |
110 | K(pages[LRU_ACTIVE_FILE]), | 108 | K(pages[LRU_ACTIVE_FILE]), |
111 | K(pages[LRU_INACTIVE_FILE]), | 109 | K(pages[LRU_INACTIVE_FILE]), |
112 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
113 | K(pages[LRU_UNEVICTABLE]), | 110 | K(pages[LRU_UNEVICTABLE]), |
114 | K(global_page_state(NR_MLOCK)), | 111 | K(global_page_state(NR_MLOCK)), |
115 | #endif | ||
116 | #ifdef CONFIG_HIGHMEM | 112 | #ifdef CONFIG_HIGHMEM |
117 | K(i.totalhigh), | 113 | K(i.totalhigh), |
118 | K(i.freehigh), | 114 | K(i.freehigh), |
diff --git a/fs/proc/page.c b/fs/proc/page.c index e9983837d08d..2707c6c7a20f 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c | |||
@@ -6,11 +6,13 @@ | |||
6 | #include <linux/mmzone.h> | 6 | #include <linux/mmzone.h> |
7 | #include <linux/proc_fs.h> | 7 | #include <linux/proc_fs.h> |
8 | #include <linux/seq_file.h> | 8 | #include <linux/seq_file.h> |
9 | #include <linux/hugetlb.h> | ||
9 | #include <asm/uaccess.h> | 10 | #include <asm/uaccess.h> |
10 | #include "internal.h" | 11 | #include "internal.h" |
11 | 12 | ||
12 | #define KPMSIZE sizeof(u64) | 13 | #define KPMSIZE sizeof(u64) |
13 | #define KPMMASK (KPMSIZE - 1) | 14 | #define KPMMASK (KPMSIZE - 1) |
15 | |||
14 | /* /proc/kpagecount - an array exposing page counts | 16 | /* /proc/kpagecount - an array exposing page counts |
15 | * | 17 | * |
16 | * Each entry is a u64 representing the corresponding | 18 | * Each entry is a u64 representing the corresponding |
@@ -32,20 +34,22 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf, | |||
32 | return -EINVAL; | 34 | return -EINVAL; |
33 | 35 | ||
34 | while (count > 0) { | 36 | while (count > 0) { |
35 | ppage = NULL; | ||
36 | if (pfn_valid(pfn)) | 37 | if (pfn_valid(pfn)) |
37 | ppage = pfn_to_page(pfn); | 38 | ppage = pfn_to_page(pfn); |
38 | pfn++; | 39 | else |
40 | ppage = NULL; | ||
39 | if (!ppage) | 41 | if (!ppage) |
40 | pcount = 0; | 42 | pcount = 0; |
41 | else | 43 | else |
42 | pcount = page_mapcount(ppage); | 44 | pcount = page_mapcount(ppage); |
43 | 45 | ||
44 | if (put_user(pcount, out++)) { | 46 | if (put_user(pcount, out)) { |
45 | ret = -EFAULT; | 47 | ret = -EFAULT; |
46 | break; | 48 | break; |
47 | } | 49 | } |
48 | 50 | ||
51 | pfn++; | ||
52 | out++; | ||
49 | count -= KPMSIZE; | 53 | count -= KPMSIZE; |
50 | } | 54 | } |
51 | 55 | ||
@@ -68,19 +72,122 @@ static const struct file_operations proc_kpagecount_operations = { | |||
68 | 72 | ||
69 | /* These macros are used to decouple internal flags from exported ones */ | 73 | /* These macros are used to decouple internal flags from exported ones */ |
70 | 74 | ||
71 | #define KPF_LOCKED 0 | 75 | #define KPF_LOCKED 0 |
72 | #define KPF_ERROR 1 | 76 | #define KPF_ERROR 1 |
73 | #define KPF_REFERENCED 2 | 77 | #define KPF_REFERENCED 2 |
74 | #define KPF_UPTODATE 3 | 78 | #define KPF_UPTODATE 3 |
75 | #define KPF_DIRTY 4 | 79 | #define KPF_DIRTY 4 |
76 | #define KPF_LRU 5 | 80 | #define KPF_LRU 5 |
77 | #define KPF_ACTIVE 6 | 81 | #define KPF_ACTIVE 6 |
78 | #define KPF_SLAB 7 | 82 | #define KPF_SLAB 7 |
79 | #define KPF_WRITEBACK 8 | 83 | #define KPF_WRITEBACK 8 |
80 | #define KPF_RECLAIM 9 | 84 | #define KPF_RECLAIM 9 |
81 | #define KPF_BUDDY 10 | 85 | #define KPF_BUDDY 10 |
86 | |||
87 | /* 11-20: new additions in 2.6.31 */ | ||
88 | #define KPF_MMAP 11 | ||
89 | #define KPF_ANON 12 | ||
90 | #define KPF_SWAPCACHE 13 | ||
91 | #define KPF_SWAPBACKED 14 | ||
92 | #define KPF_COMPOUND_HEAD 15 | ||
93 | #define KPF_COMPOUND_TAIL 16 | ||
94 | #define KPF_HUGE 17 | ||
95 | #define KPF_UNEVICTABLE 18 | ||
96 | #define KPF_NOPAGE 20 | ||
97 | |||
98 | /* kernel hacking assistances | ||
99 | * WARNING: subject to change, never rely on them! | ||
100 | */ | ||
101 | #define KPF_RESERVED 32 | ||
102 | #define KPF_MLOCKED 33 | ||
103 | #define KPF_MAPPEDTODISK 34 | ||
104 | #define KPF_PRIVATE 35 | ||
105 | #define KPF_PRIVATE_2 36 | ||
106 | #define KPF_OWNER_PRIVATE 37 | ||
107 | #define KPF_ARCH 38 | ||
108 | #define KPF_UNCACHED 39 | ||
109 | |||
110 | static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit) | ||
111 | { | ||
112 | return ((kflags >> kbit) & 1) << ubit; | ||
113 | } | ||
82 | 114 | ||
83 | #define kpf_copy_bit(flags, dstpos, srcpos) (((flags >> srcpos) & 1) << dstpos) | 115 | static u64 get_uflags(struct page *page) |
116 | { | ||
117 | u64 k; | ||
118 | u64 u; | ||
119 | |||
120 | /* | ||
121 | * pseudo flag: KPF_NOPAGE | ||
122 | * it differentiates a memory hole from a page with no flags | ||
123 | */ | ||
124 | if (!page) | ||
125 | return 1 << KPF_NOPAGE; | ||
126 | |||
127 | k = page->flags; | ||
128 | u = 0; | ||
129 | |||
130 | /* | ||
131 | * pseudo flags for the well known (anonymous) memory mapped pages | ||
132 | * | ||
133 | * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the | ||
134 | * simple test in page_mapped() is not enough. | ||
135 | */ | ||
136 | if (!PageSlab(page) && page_mapped(page)) | ||
137 | u |= 1 << KPF_MMAP; | ||
138 | if (PageAnon(page)) | ||
139 | u |= 1 << KPF_ANON; | ||
140 | |||
141 | /* | ||
142 | * compound pages: export both head/tail info | ||
143 | * they together define a compound page's start/end pos and order | ||
144 | */ | ||
145 | if (PageHead(page)) | ||
146 | u |= 1 << KPF_COMPOUND_HEAD; | ||
147 | if (PageTail(page)) | ||
148 | u |= 1 << KPF_COMPOUND_TAIL; | ||
149 | if (PageHuge(page)) | ||
150 | u |= 1 << KPF_HUGE; | ||
151 | |||
152 | u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked); | ||
153 | |||
154 | /* | ||
155 | * Caveats on high order pages: | ||
156 | * PG_buddy will only be set on the head page; SLUB/SLQB do the same | ||
157 | * for PG_slab; SLOB won't set PG_slab at all on compound pages. | ||
158 | */ | ||
159 | u |= kpf_copy_bit(k, KPF_SLAB, PG_slab); | ||
160 | u |= kpf_copy_bit(k, KPF_BUDDY, PG_buddy); | ||
161 | |||
162 | u |= kpf_copy_bit(k, KPF_ERROR, PG_error); | ||
163 | u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty); | ||
164 | u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate); | ||
165 | u |= kpf_copy_bit(k, KPF_WRITEBACK, PG_writeback); | ||
166 | |||
167 | u |= kpf_copy_bit(k, KPF_LRU, PG_lru); | ||
168 | u |= kpf_copy_bit(k, KPF_REFERENCED, PG_referenced); | ||
169 | u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active); | ||
170 | u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim); | ||
171 | |||
172 | u |= kpf_copy_bit(k, KPF_SWAPCACHE, PG_swapcache); | ||
173 | u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked); | ||
174 | |||
175 | u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable); | ||
176 | u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked); | ||
177 | |||
178 | #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR | ||
179 | u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached); | ||
180 | #endif | ||
181 | |||
182 | u |= kpf_copy_bit(k, KPF_RESERVED, PG_reserved); | ||
183 | u |= kpf_copy_bit(k, KPF_MAPPEDTODISK, PG_mappedtodisk); | ||
184 | u |= kpf_copy_bit(k, KPF_PRIVATE, PG_private); | ||
185 | u |= kpf_copy_bit(k, KPF_PRIVATE_2, PG_private_2); | ||
186 | u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE, PG_owner_priv_1); | ||
187 | u |= kpf_copy_bit(k, KPF_ARCH, PG_arch_1); | ||
188 | |||
189 | return u; | ||
190 | }; | ||
84 | 191 | ||
85 | static ssize_t kpageflags_read(struct file *file, char __user *buf, | 192 | static ssize_t kpageflags_read(struct file *file, char __user *buf, |
86 | size_t count, loff_t *ppos) | 193 | size_t count, loff_t *ppos) |
@@ -90,7 +197,6 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf, | |||
90 | unsigned long src = *ppos; | 197 | unsigned long src = *ppos; |
91 | unsigned long pfn; | 198 | unsigned long pfn; |
92 | ssize_t ret = 0; | 199 | ssize_t ret = 0; |
93 | u64 kflags, uflags; | ||
94 | 200 | ||
95 | pfn = src / KPMSIZE; | 201 | pfn = src / KPMSIZE; |
96 | count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src); | 202 | count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src); |
@@ -98,32 +204,18 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf, | |||
98 | return -EINVAL; | 204 | return -EINVAL; |
99 | 205 | ||
100 | while (count > 0) { | 206 | while (count > 0) { |
101 | ppage = NULL; | ||
102 | if (pfn_valid(pfn)) | 207 | if (pfn_valid(pfn)) |
103 | ppage = pfn_to_page(pfn); | 208 | ppage = pfn_to_page(pfn); |
104 | pfn++; | ||
105 | if (!ppage) | ||
106 | kflags = 0; | ||
107 | else | 209 | else |
108 | kflags = ppage->flags; | 210 | ppage = NULL; |
109 | 211 | ||
110 | uflags = kpf_copy_bit(kflags, KPF_LOCKED, PG_locked) | | 212 | if (put_user(get_uflags(ppage), out)) { |
111 | kpf_copy_bit(kflags, KPF_ERROR, PG_error) | | ||
112 | kpf_copy_bit(kflags, KPF_REFERENCED, PG_referenced) | | ||
113 | kpf_copy_bit(kflags, KPF_UPTODATE, PG_uptodate) | | ||
114 | kpf_copy_bit(kflags, KPF_DIRTY, PG_dirty) | | ||
115 | kpf_copy_bit(kflags, KPF_LRU, PG_lru) | | ||
116 | kpf_copy_bit(kflags, KPF_ACTIVE, PG_active) | | ||
117 | kpf_copy_bit(kflags, KPF_SLAB, PG_slab) | | ||
118 | kpf_copy_bit(kflags, KPF_WRITEBACK, PG_writeback) | | ||
119 | kpf_copy_bit(kflags, KPF_RECLAIM, PG_reclaim) | | ||
120 | kpf_copy_bit(kflags, KPF_BUDDY, PG_buddy); | ||
121 | |||
122 | if (put_user(uflags, out++)) { | ||
123 | ret = -EFAULT; | 213 | ret = -EFAULT; |
124 | break; | 214 | break; |
125 | } | 215 | } |
126 | 216 | ||
217 | pfn++; | ||
218 | out++; | ||
127 | count -= KPMSIZE; | 219 | count -= KPMSIZE; |
128 | } | 220 | } |
129 | 221 | ||
diff --git a/fs/select.c b/fs/select.c index 0fe0e1469df3..d870237e42c7 100644 --- a/fs/select.c +++ b/fs/select.c | |||
@@ -168,7 +168,7 @@ static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p) | |||
168 | return table->entry++; | 168 | return table->entry++; |
169 | } | 169 | } |
170 | 170 | ||
171 | static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) | 171 | static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) |
172 | { | 172 | { |
173 | struct poll_wqueues *pwq = wait->private; | 173 | struct poll_wqueues *pwq = wait->private; |
174 | DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task); | 174 | DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task); |
@@ -194,6 +194,16 @@ static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) | |||
194 | return default_wake_function(&dummy_wait, mode, sync, key); | 194 | return default_wake_function(&dummy_wait, mode, sync, key); |
195 | } | 195 | } |
196 | 196 | ||
197 | static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) | ||
198 | { | ||
199 | struct poll_table_entry *entry; | ||
200 | |||
201 | entry = container_of(wait, struct poll_table_entry, wait); | ||
202 | if (key && !((unsigned long)key & entry->key)) | ||
203 | return 0; | ||
204 | return __pollwake(wait, mode, sync, key); | ||
205 | } | ||
206 | |||
197 | /* Add a new entry */ | 207 | /* Add a new entry */ |
198 | static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, | 208 | static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, |
199 | poll_table *p) | 209 | poll_table *p) |
@@ -205,6 +215,7 @@ static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, | |||
205 | get_file(filp); | 215 | get_file(filp); |
206 | entry->filp = filp; | 216 | entry->filp = filp; |
207 | entry->wait_address = wait_address; | 217 | entry->wait_address = wait_address; |
218 | entry->key = p->key; | ||
208 | init_waitqueue_func_entry(&entry->wait, pollwake); | 219 | init_waitqueue_func_entry(&entry->wait, pollwake); |
209 | entry->wait.private = pwq; | 220 | entry->wait.private = pwq; |
210 | add_wait_queue(wait_address, &entry->wait); | 221 | add_wait_queue(wait_address, &entry->wait); |
@@ -362,6 +373,18 @@ get_max: | |||
362 | #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR) | 373 | #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR) |
363 | #define POLLEX_SET (POLLPRI) | 374 | #define POLLEX_SET (POLLPRI) |
364 | 375 | ||
376 | static inline void wait_key_set(poll_table *wait, unsigned long in, | ||
377 | unsigned long out, unsigned long bit) | ||
378 | { | ||
379 | if (wait) { | ||
380 | wait->key = POLLEX_SET; | ||
381 | if (in & bit) | ||
382 | wait->key |= POLLIN_SET; | ||
383 | if (out & bit) | ||
384 | wait->key |= POLLOUT_SET; | ||
385 | } | ||
386 | } | ||
387 | |||
365 | int do_select(int n, fd_set_bits *fds, struct timespec *end_time) | 388 | int do_select(int n, fd_set_bits *fds, struct timespec *end_time) |
366 | { | 389 | { |
367 | ktime_t expire, *to = NULL; | 390 | ktime_t expire, *to = NULL; |
@@ -418,20 +441,25 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) | |||
418 | if (file) { | 441 | if (file) { |
419 | f_op = file->f_op; | 442 | f_op = file->f_op; |
420 | mask = DEFAULT_POLLMASK; | 443 | mask = DEFAULT_POLLMASK; |
421 | if (f_op && f_op->poll) | 444 | if (f_op && f_op->poll) { |
422 | mask = (*f_op->poll)(file, retval ? NULL : wait); | 445 | wait_key_set(wait, in, out, bit); |
446 | mask = (*f_op->poll)(file, wait); | ||
447 | } | ||
423 | fput_light(file, fput_needed); | 448 | fput_light(file, fput_needed); |
424 | if ((mask & POLLIN_SET) && (in & bit)) { | 449 | if ((mask & POLLIN_SET) && (in & bit)) { |
425 | res_in |= bit; | 450 | res_in |= bit; |
426 | retval++; | 451 | retval++; |
452 | wait = NULL; | ||
427 | } | 453 | } |
428 | if ((mask & POLLOUT_SET) && (out & bit)) { | 454 | if ((mask & POLLOUT_SET) && (out & bit)) { |
429 | res_out |= bit; | 455 | res_out |= bit; |
430 | retval++; | 456 | retval++; |
457 | wait = NULL; | ||
431 | } | 458 | } |
432 | if ((mask & POLLEX_SET) && (ex & bit)) { | 459 | if ((mask & POLLEX_SET) && (ex & bit)) { |
433 | res_ex |= bit; | 460 | res_ex |= bit; |
434 | retval++; | 461 | retval++; |
462 | wait = NULL; | ||
435 | } | 463 | } |
436 | } | 464 | } |
437 | } | 465 | } |
@@ -685,8 +713,12 @@ static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait) | |||
685 | mask = POLLNVAL; | 713 | mask = POLLNVAL; |
686 | if (file != NULL) { | 714 | if (file != NULL) { |
687 | mask = DEFAULT_POLLMASK; | 715 | mask = DEFAULT_POLLMASK; |
688 | if (file->f_op && file->f_op->poll) | 716 | if (file->f_op && file->f_op->poll) { |
717 | if (pwait) | ||
718 | pwait->key = pollfd->events | | ||
719 | POLLERR | POLLHUP; | ||
689 | mask = file->f_op->poll(file, pwait); | 720 | mask = file->f_op->poll(file, pwait); |
721 | } | ||
690 | /* Mask out unneeded events. */ | 722 | /* Mask out unneeded events. */ |
691 | mask &= pollfd->events | POLLERR | POLLHUP; | 723 | mask &= pollfd->events | POLLERR | POLLHUP; |
692 | fput_light(file, fput_needed); | 724 | fput_light(file, fput_needed); |