diff options
-rw-r--r-- | Documentation/devices.txt | 2 | ||||
-rw-r--r-- | drivers/char/vt_ioctl.c | 8 | ||||
-rw-r--r-- | fs/xfs/xfs_buf_item.h | 5 | ||||
-rw-r--r-- | fs/xfs/xfs_log_recover.c | 51 | ||||
-rw-r--r-- | fs/xfs/xfs_trans_buf.c | 1 | ||||
-rw-r--r-- | kernel/futex.c | 26 | ||||
-rw-r--r-- | kernel/futex_compat.c | 28 | ||||
-rw-r--r-- | kernel/sys.c | 2 | ||||
-rw-r--r-- | mm/hugetlb.c | 2 |
9 files changed, 47 insertions, 78 deletions
diff --git a/Documentation/devices.txt b/Documentation/devices.txt index 8de132a02b..6c46730c63 100644 --- a/Documentation/devices.txt +++ b/Documentation/devices.txt | |||
@@ -94,6 +94,8 @@ Your cooperation is appreciated. | |||
94 | 9 = /dev/urandom Faster, less secure random number gen. | 94 | 9 = /dev/urandom Faster, less secure random number gen. |
95 | 10 = /dev/aio Asynchronous I/O notification interface | 95 | 10 = /dev/aio Asynchronous I/O notification interface |
96 | 11 = /dev/kmsg Writes to this come out as printk's | 96 | 11 = /dev/kmsg Writes to this come out as printk's |
97 | 12 = /dev/oldmem Used by crashdump kernels to access | ||
98 | the memory of the kernel that crashed. | ||
97 | 99 | ||
98 | 1 block RAM disk | 100 | 1 block RAM disk |
99 | 0 = /dev/ram0 First RAM disk | 101 | 0 = /dev/ram0 First RAM disk |
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c index 045e6888d1..c799b7f7bb 100644 --- a/drivers/char/vt_ioctl.c +++ b/drivers/char/vt_ioctl.c | |||
@@ -770,6 +770,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
770 | /* | 770 | /* |
771 | * Switching-from response | 771 | * Switching-from response |
772 | */ | 772 | */ |
773 | acquire_console_sem(); | ||
773 | if (vc->vt_newvt >= 0) { | 774 | if (vc->vt_newvt >= 0) { |
774 | if (arg == 0) | 775 | if (arg == 0) |
775 | /* | 776 | /* |
@@ -784,7 +785,6 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
784 | * complete the switch. | 785 | * complete the switch. |
785 | */ | 786 | */ |
786 | int newvt; | 787 | int newvt; |
787 | acquire_console_sem(); | ||
788 | newvt = vc->vt_newvt; | 788 | newvt = vc->vt_newvt; |
789 | vc->vt_newvt = -1; | 789 | vc->vt_newvt = -1; |
790 | i = vc_allocate(newvt); | 790 | i = vc_allocate(newvt); |
@@ -798,7 +798,6 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
798 | * other console switches.. | 798 | * other console switches.. |
799 | */ | 799 | */ |
800 | complete_change_console(vc_cons[newvt].d); | 800 | complete_change_console(vc_cons[newvt].d); |
801 | release_console_sem(); | ||
802 | } | 801 | } |
803 | } | 802 | } |
804 | 803 | ||
@@ -810,9 +809,12 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
810 | /* | 809 | /* |
811 | * If it's just an ACK, ignore it | 810 | * If it's just an ACK, ignore it |
812 | */ | 811 | */ |
813 | if (arg != VT_ACKACQ) | 812 | if (arg != VT_ACKACQ) { |
813 | release_console_sem(); | ||
814 | return -EINVAL; | 814 | return -EINVAL; |
815 | } | ||
815 | } | 816 | } |
817 | release_console_sem(); | ||
816 | 818 | ||
817 | return 0; | 819 | return 0; |
818 | 820 | ||
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h index fa25b7dcc6..d7e1361430 100644 --- a/fs/xfs/xfs_buf_item.h +++ b/fs/xfs/xfs_buf_item.h | |||
@@ -52,11 +52,6 @@ typedef struct xfs_buf_log_format_t { | |||
52 | #define XFS_BLI_UDQUOT_BUF 0x4 | 52 | #define XFS_BLI_UDQUOT_BUF 0x4 |
53 | #define XFS_BLI_PDQUOT_BUF 0x8 | 53 | #define XFS_BLI_PDQUOT_BUF 0x8 |
54 | #define XFS_BLI_GDQUOT_BUF 0x10 | 54 | #define XFS_BLI_GDQUOT_BUF 0x10 |
55 | /* | ||
56 | * This flag indicates that the buffer contains newly allocated | ||
57 | * inodes. | ||
58 | */ | ||
59 | #define XFS_BLI_INODE_NEW_BUF 0x20 | ||
60 | 55 | ||
61 | #define XFS_BLI_CHUNK 128 | 56 | #define XFS_BLI_CHUNK 128 |
62 | #define XFS_BLI_SHIFT 7 | 57 | #define XFS_BLI_SHIFT 7 |
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 7174991f4b..8ae6e8e5f3 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c | |||
@@ -1874,7 +1874,6 @@ xlog_recover_do_inode_buffer( | |||
1874 | /*ARGSUSED*/ | 1874 | /*ARGSUSED*/ |
1875 | STATIC void | 1875 | STATIC void |
1876 | xlog_recover_do_reg_buffer( | 1876 | xlog_recover_do_reg_buffer( |
1877 | xfs_mount_t *mp, | ||
1878 | xlog_recover_item_t *item, | 1877 | xlog_recover_item_t *item, |
1879 | xfs_buf_t *bp, | 1878 | xfs_buf_t *bp, |
1880 | xfs_buf_log_format_t *buf_f) | 1879 | xfs_buf_log_format_t *buf_f) |
@@ -1885,50 +1884,6 @@ xlog_recover_do_reg_buffer( | |||
1885 | unsigned int *data_map = NULL; | 1884 | unsigned int *data_map = NULL; |
1886 | unsigned int map_size = 0; | 1885 | unsigned int map_size = 0; |
1887 | int error; | 1886 | int error; |
1888 | int stale_buf = 1; | ||
1889 | |||
1890 | /* | ||
1891 | * Scan through the on-disk inode buffer and attempt to | ||
1892 | * determine if it has been written to since it was logged. | ||
1893 | * | ||
1894 | * - If any of the magic numbers are incorrect then the buffer is stale | ||
1895 | * - If any of the modes are non-zero then the buffer is not stale | ||
1896 | * - If all of the modes are zero and at least one of the generation | ||
1897 | * counts is non-zero then the buffer is stale | ||
1898 | * | ||
1899 | * If the end result is a stale buffer then the log buffer is replayed | ||
1900 | * otherwise it is skipped. | ||
1901 | * | ||
1902 | * This heuristic is not perfect. It can be improved by scanning the | ||
1903 | * entire inode chunk for evidence that any of the inode clusters have | ||
1904 | * been updated. To fix this problem completely we will need a major | ||
1905 | * architectural change to the logging system. | ||
1906 | */ | ||
1907 | if (buf_f->blf_flags & XFS_BLI_INODE_NEW_BUF) { | ||
1908 | xfs_dinode_t *dip; | ||
1909 | int inodes_per_buf; | ||
1910 | int mode_count = 0; | ||
1911 | int gen_count = 0; | ||
1912 | |||
1913 | stale_buf = 0; | ||
1914 | inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog; | ||
1915 | for (i = 0; i < inodes_per_buf; i++) { | ||
1916 | dip = (xfs_dinode_t *)xfs_buf_offset(bp, | ||
1917 | i * mp->m_sb.sb_inodesize); | ||
1918 | if (be16_to_cpu(dip->di_core.di_magic) != | ||
1919 | XFS_DINODE_MAGIC) { | ||
1920 | stale_buf = 1; | ||
1921 | break; | ||
1922 | } | ||
1923 | if (dip->di_core.di_mode) | ||
1924 | mode_count++; | ||
1925 | if (dip->di_core.di_gen) | ||
1926 | gen_count++; | ||
1927 | } | ||
1928 | |||
1929 | if (!mode_count && gen_count) | ||
1930 | stale_buf = 1; | ||
1931 | } | ||
1932 | 1887 | ||
1933 | switch (buf_f->blf_type) { | 1888 | switch (buf_f->blf_type) { |
1934 | case XFS_LI_BUF: | 1889 | case XFS_LI_BUF: |
@@ -1962,7 +1917,7 @@ xlog_recover_do_reg_buffer( | |||
1962 | -1, 0, XFS_QMOPT_DOWARN, | 1917 | -1, 0, XFS_QMOPT_DOWARN, |
1963 | "dquot_buf_recover"); | 1918 | "dquot_buf_recover"); |
1964 | } | 1919 | } |
1965 | if (!error && stale_buf) | 1920 | if (!error) |
1966 | memcpy(xfs_buf_offset(bp, | 1921 | memcpy(xfs_buf_offset(bp, |
1967 | (uint)bit << XFS_BLI_SHIFT), /* dest */ | 1922 | (uint)bit << XFS_BLI_SHIFT), /* dest */ |
1968 | item->ri_buf[i].i_addr, /* source */ | 1923 | item->ri_buf[i].i_addr, /* source */ |
@@ -2134,7 +2089,7 @@ xlog_recover_do_dquot_buffer( | |||
2134 | if (log->l_quotaoffs_flag & type) | 2089 | if (log->l_quotaoffs_flag & type) |
2135 | return; | 2090 | return; |
2136 | 2091 | ||
2137 | xlog_recover_do_reg_buffer(mp, item, bp, buf_f); | 2092 | xlog_recover_do_reg_buffer(item, bp, buf_f); |
2138 | } | 2093 | } |
2139 | 2094 | ||
2140 | /* | 2095 | /* |
@@ -2235,7 +2190,7 @@ xlog_recover_do_buffer_trans( | |||
2235 | (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) { | 2190 | (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) { |
2236 | xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f); | 2191 | xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f); |
2237 | } else { | 2192 | } else { |
2238 | xlog_recover_do_reg_buffer(mp, item, bp, buf_f); | 2193 | xlog_recover_do_reg_buffer(item, bp, buf_f); |
2239 | } | 2194 | } |
2240 | if (error) | 2195 | if (error) |
2241 | return XFS_ERROR(error); | 2196 | return XFS_ERROR(error); |
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index 95fff6872a..60b6b89802 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c | |||
@@ -966,7 +966,6 @@ xfs_trans_inode_alloc_buf( | |||
966 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | 966 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
967 | 967 | ||
968 | bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF; | 968 | bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF; |
969 | bip->bli_format.blf_flags |= XFS_BLI_INODE_NEW_BUF; | ||
970 | } | 969 | } |
971 | 970 | ||
972 | 971 | ||
diff --git a/kernel/futex.c b/kernel/futex.c index e8935b195e..fcc94e7b40 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -1943,9 +1943,10 @@ static inline int fetch_robust_entry(struct robust_list __user **entry, | |||
1943 | void exit_robust_list(struct task_struct *curr) | 1943 | void exit_robust_list(struct task_struct *curr) |
1944 | { | 1944 | { |
1945 | struct robust_list_head __user *head = curr->robust_list; | 1945 | struct robust_list_head __user *head = curr->robust_list; |
1946 | struct robust_list __user *entry, *pending; | 1946 | struct robust_list __user *entry, *next_entry, *pending; |
1947 | unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; | 1947 | unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip; |
1948 | unsigned long futex_offset; | 1948 | unsigned long futex_offset; |
1949 | int rc; | ||
1949 | 1950 | ||
1950 | /* | 1951 | /* |
1951 | * Fetch the list head (which was registered earlier, via | 1952 | * Fetch the list head (which was registered earlier, via |
@@ -1965,12 +1966,14 @@ void exit_robust_list(struct task_struct *curr) | |||
1965 | if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) | 1966 | if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) |
1966 | return; | 1967 | return; |
1967 | 1968 | ||
1968 | if (pending) | 1969 | next_entry = NULL; /* avoid warning with gcc */ |
1969 | handle_futex_death((void __user *)pending + futex_offset, | ||
1970 | curr, pip); | ||
1971 | |||
1972 | while (entry != &head->list) { | 1970 | while (entry != &head->list) { |
1973 | /* | 1971 | /* |
1972 | * Fetch the next entry in the list before calling | ||
1973 | * handle_futex_death: | ||
1974 | */ | ||
1975 | rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi); | ||
1976 | /* | ||
1974 | * A pending lock might already be on the list, so | 1977 | * A pending lock might already be on the list, so |
1975 | * don't process it twice: | 1978 | * don't process it twice: |
1976 | */ | 1979 | */ |
@@ -1978,11 +1981,10 @@ void exit_robust_list(struct task_struct *curr) | |||
1978 | if (handle_futex_death((void __user *)entry + futex_offset, | 1981 | if (handle_futex_death((void __user *)entry + futex_offset, |
1979 | curr, pi)) | 1982 | curr, pi)) |
1980 | return; | 1983 | return; |
1981 | /* | 1984 | if (rc) |
1982 | * Fetch the next entry in the list: | ||
1983 | */ | ||
1984 | if (fetch_robust_entry(&entry, &entry->next, &pi)) | ||
1985 | return; | 1985 | return; |
1986 | entry = next_entry; | ||
1987 | pi = next_pi; | ||
1986 | /* | 1988 | /* |
1987 | * Avoid excessively long or circular lists: | 1989 | * Avoid excessively long or circular lists: |
1988 | */ | 1990 | */ |
@@ -1991,6 +1993,10 @@ void exit_robust_list(struct task_struct *curr) | |||
1991 | 1993 | ||
1992 | cond_resched(); | 1994 | cond_resched(); |
1993 | } | 1995 | } |
1996 | |||
1997 | if (pending) | ||
1998 | handle_futex_death((void __user *)pending + futex_offset, | ||
1999 | curr, pip); | ||
1994 | } | 2000 | } |
1995 | 2001 | ||
1996 | long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, | 2002 | long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, |
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index 7e52eb051f..2c2e2954b7 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c | |||
@@ -38,10 +38,11 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, | |||
38 | void compat_exit_robust_list(struct task_struct *curr) | 38 | void compat_exit_robust_list(struct task_struct *curr) |
39 | { | 39 | { |
40 | struct compat_robust_list_head __user *head = curr->compat_robust_list; | 40 | struct compat_robust_list_head __user *head = curr->compat_robust_list; |
41 | struct robust_list __user *entry, *pending; | 41 | struct robust_list __user *entry, *next_entry, *pending; |
42 | unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; | 42 | unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip; |
43 | compat_uptr_t uentry, upending; | 43 | compat_uptr_t uentry, next_uentry, upending; |
44 | compat_long_t futex_offset; | 44 | compat_long_t futex_offset; |
45 | int rc; | ||
45 | 46 | ||
46 | /* | 47 | /* |
47 | * Fetch the list head (which was registered earlier, via | 48 | * Fetch the list head (which was registered earlier, via |
@@ -61,11 +62,16 @@ void compat_exit_robust_list(struct task_struct *curr) | |||
61 | if (fetch_robust_entry(&upending, &pending, | 62 | if (fetch_robust_entry(&upending, &pending, |
62 | &head->list_op_pending, &pip)) | 63 | &head->list_op_pending, &pip)) |
63 | return; | 64 | return; |
64 | if (pending) | ||
65 | handle_futex_death((void __user *)pending + futex_offset, curr, pip); | ||
66 | 65 | ||
66 | next_entry = NULL; /* avoid warning with gcc */ | ||
67 | while (entry != (struct robust_list __user *) &head->list) { | 67 | while (entry != (struct robust_list __user *) &head->list) { |
68 | /* | 68 | /* |
69 | * Fetch the next entry in the list before calling | ||
70 | * handle_futex_death: | ||
71 | */ | ||
72 | rc = fetch_robust_entry(&next_uentry, &next_entry, | ||
73 | (compat_uptr_t __user *)&entry->next, &next_pi); | ||
74 | /* | ||
69 | * A pending lock might already be on the list, so | 75 | * A pending lock might already be on the list, so |
70 | * dont process it twice: | 76 | * dont process it twice: |
71 | */ | 77 | */ |
@@ -74,12 +80,11 @@ void compat_exit_robust_list(struct task_struct *curr) | |||
74 | curr, pi)) | 80 | curr, pi)) |
75 | return; | 81 | return; |
76 | 82 | ||
77 | /* | 83 | if (rc) |
78 | * Fetch the next entry in the list: | ||
79 | */ | ||
80 | if (fetch_robust_entry(&uentry, &entry, | ||
81 | (compat_uptr_t __user *)&entry->next, &pi)) | ||
82 | return; | 84 | return; |
85 | uentry = next_uentry; | ||
86 | entry = next_entry; | ||
87 | pi = next_pi; | ||
83 | /* | 88 | /* |
84 | * Avoid excessively long or circular lists: | 89 | * Avoid excessively long or circular lists: |
85 | */ | 90 | */ |
@@ -88,6 +93,9 @@ void compat_exit_robust_list(struct task_struct *curr) | |||
88 | 93 | ||
89 | cond_resched(); | 94 | cond_resched(); |
90 | } | 95 | } |
96 | if (pending) | ||
97 | handle_futex_death((void __user *)pending + futex_offset, | ||
98 | curr, pip); | ||
91 | } | 99 | } |
92 | 100 | ||
93 | asmlinkage long | 101 | asmlinkage long |
diff --git a/kernel/sys.c b/kernel/sys.c index 1b33b05d34..8ae2e636eb 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/getcpu.h> | 32 | #include <linux/getcpu.h> |
33 | #include <linux/task_io_accounting_ops.h> | 33 | #include <linux/task_io_accounting_ops.h> |
34 | #include <linux/seccomp.h> | 34 | #include <linux/seccomp.h> |
35 | #include <linux/cpu.h> | ||
35 | 36 | ||
36 | #include <linux/compat.h> | 37 | #include <linux/compat.h> |
37 | #include <linux/syscalls.h> | 38 | #include <linux/syscalls.h> |
@@ -878,6 +879,7 @@ void kernel_power_off(void) | |||
878 | kernel_shutdown_prepare(SYSTEM_POWER_OFF); | 879 | kernel_shutdown_prepare(SYSTEM_POWER_OFF); |
879 | if (pm_power_off_prepare) | 880 | if (pm_power_off_prepare) |
880 | pm_power_off_prepare(); | 881 | pm_power_off_prepare(); |
882 | disable_nonboot_cpus(); | ||
881 | sysdev_shutdown(); | 883 | sysdev_shutdown(); |
882 | printk(KERN_EMERG "Power down.\n"); | 884 | printk(KERN_EMERG "Power down.\n"); |
883 | machine_power_off(); | 885 | machine_power_off(); |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 84c795ee2d..eab8c428cc 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -42,7 +42,7 @@ static void clear_huge_page(struct page *page, unsigned long addr) | |||
42 | might_sleep(); | 42 | might_sleep(); |
43 | for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) { | 43 | for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) { |
44 | cond_resched(); | 44 | cond_resched(); |
45 | clear_user_highpage(page + i, addr); | 45 | clear_user_highpage(page + i, addr + i * PAGE_SIZE); |
46 | } | 46 | } |
47 | } | 47 | } |
48 | 48 | ||