aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/Kconfig.binfmt4
-rw-r--r--fs/eventpoll.c35
-rw-r--r--fs/exec.c168
-rw-r--r--fs/fcntl.c62
-rw-r--r--fs/fuse/dev.c7
-rw-r--r--fs/isofs/inode.c40
-rw-r--r--fs/lockd/svc.c11
-rw-r--r--fs/lockd/svclock.c6
-rw-r--r--fs/lockd/svcsubs.c9
-rw-r--r--fs/locks.c57
-rw-r--r--fs/nfs/Kconfig1
-rw-r--r--fs/nfsd/Kconfig1
-rw-r--r--fs/nfsd/nfs4state.c26
-rw-r--r--fs/proc/base.c8
-rw-r--r--fs/proc/softirqs.c4
-rw-r--r--fs/proc/stat.c14
-rw-r--r--fs/proc/task_mmu.c6
-rw-r--r--fs/select.c6
19 files changed, 287 insertions, 179 deletions
diff --git a/fs/Kconfig b/fs/Kconfig
index b5e582bd769d..97673c955484 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -53,7 +53,6 @@ config EXPORTFS
53config FILE_LOCKING 53config FILE_LOCKING
54 bool "Enable POSIX file locking API" if EMBEDDED 54 bool "Enable POSIX file locking API" if EMBEDDED
55 default y 55 default y
56 select BKL # while lockd still uses it.
57 help 56 help
58 This option enables standard file locking support, required 57 This option enables standard file locking support, required
59 for filesystems like NFS and for the flock() system 58 for filesystems like NFS and for the flock() system
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index bb4cc5b8abc8..79e2ca7973b7 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -42,7 +42,7 @@ config BINFMT_ELF_FDPIC
42 42
43config CORE_DUMP_DEFAULT_ELF_HEADERS 43config CORE_DUMP_DEFAULT_ELF_HEADERS
44 bool "Write ELF core dumps with partial segments" 44 bool "Write ELF core dumps with partial segments"
45 default n 45 default y
46 depends on BINFMT_ELF && ELF_CORE 46 depends on BINFMT_ELF && ELF_CORE
47 help 47 help
48 ELF core dump files describe each memory mapping of the crashed 48 ELF core dump files describe each memory mapping of the crashed
@@ -60,7 +60,7 @@ config CORE_DUMP_DEFAULT_ELF_HEADERS
60 inherited. See Documentation/filesystems/proc.txt for details. 60 inherited. See Documentation/filesystems/proc.txt for details.
61 61
62 This config option changes the default setting of coredump_filter 62 This config option changes the default setting of coredump_filter
63 seen at boot time. If unsure, say N. 63 seen at boot time. If unsure, say Y.
64 64
65config BINFMT_FLAT 65config BINFMT_FLAT
66 bool "Kernel support for flat binaries" 66 bool "Kernel support for flat binaries"
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 256bb7bb102a..8cf07242067d 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -77,9 +77,6 @@
77/* Maximum number of nesting allowed inside epoll sets */ 77/* Maximum number of nesting allowed inside epoll sets */
78#define EP_MAX_NESTS 4 78#define EP_MAX_NESTS 4
79 79
80/* Maximum msec timeout value storeable in a long int */
81#define EP_MAX_MSTIMEO min(1000ULL * MAX_SCHEDULE_TIMEOUT / HZ, (LONG_MAX - 999ULL) / HZ)
82
83#define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event)) 80#define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
84 81
85#define EP_UNACTIVE_PTR ((void *) -1L) 82#define EP_UNACTIVE_PTR ((void *) -1L)
@@ -1117,18 +1114,22 @@ static int ep_send_events(struct eventpoll *ep,
1117static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, 1114static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
1118 int maxevents, long timeout) 1115 int maxevents, long timeout)
1119{ 1116{
1120 int res, eavail; 1117 int res, eavail, timed_out = 0;
1121 unsigned long flags; 1118 unsigned long flags;
1122 long jtimeout; 1119 long slack;
1123 wait_queue_t wait; 1120 wait_queue_t wait;
1124 1121 struct timespec end_time;
1125 /* 1122 ktime_t expires, *to = NULL;
1126 * Calculate the timeout by checking for the "infinite" value (-1) 1123
1127 * and the overflow condition. The passed timeout is in milliseconds, 1124 if (timeout > 0) {
1128 * that why (t * HZ) / 1000. 1125 ktime_get_ts(&end_time);
1129 */ 1126 timespec_add_ns(&end_time, (u64)timeout * NSEC_PER_MSEC);
1130 jtimeout = (timeout < 0 || timeout >= EP_MAX_MSTIMEO) ? 1127 slack = select_estimate_accuracy(&end_time);
1131 MAX_SCHEDULE_TIMEOUT : (timeout * HZ + 999) / 1000; 1128 to = &expires;
1129 *to = timespec_to_ktime(end_time);
1130 } else if (timeout == 0) {
1131 timed_out = 1;
1132 }
1132 1133
1133retry: 1134retry:
1134 spin_lock_irqsave(&ep->lock, flags); 1135 spin_lock_irqsave(&ep->lock, flags);
@@ -1150,7 +1151,7 @@ retry:
1150 * to TASK_INTERRUPTIBLE before doing the checks. 1151 * to TASK_INTERRUPTIBLE before doing the checks.
1151 */ 1152 */
1152 set_current_state(TASK_INTERRUPTIBLE); 1153 set_current_state(TASK_INTERRUPTIBLE);
1153 if (!list_empty(&ep->rdllist) || !jtimeout) 1154 if (!list_empty(&ep->rdllist) || timed_out)
1154 break; 1155 break;
1155 if (signal_pending(current)) { 1156 if (signal_pending(current)) {
1156 res = -EINTR; 1157 res = -EINTR;
@@ -1158,7 +1159,9 @@ retry:
1158 } 1159 }
1159 1160
1160 spin_unlock_irqrestore(&ep->lock, flags); 1161 spin_unlock_irqrestore(&ep->lock, flags);
1161 jtimeout = schedule_timeout(jtimeout); 1162 if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
1163 timed_out = 1;
1164
1162 spin_lock_irqsave(&ep->lock, flags); 1165 spin_lock_irqsave(&ep->lock, flags);
1163 } 1166 }
1164 __remove_wait_queue(&ep->wq, &wait); 1167 __remove_wait_queue(&ep->wq, &wait);
@@ -1176,7 +1179,7 @@ retry:
1176 * more luck. 1179 * more luck.
1177 */ 1180 */
1178 if (!res && eavail && 1181 if (!res && eavail &&
1179 !(res = ep_send_events(ep, events, maxevents)) && jtimeout) 1182 !(res = ep_send_events(ep, events, maxevents)) && !timed_out)
1180 goto retry; 1183 goto retry;
1181 1184
1182 return res; 1185 return res;
diff --git a/fs/exec.c b/fs/exec.c
index 3aa75b8888a1..99d33a1371e9 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -66,6 +66,12 @@ char core_pattern[CORENAME_MAX_SIZE] = "core";
66unsigned int core_pipe_limit; 66unsigned int core_pipe_limit;
67int suid_dumpable = 0; 67int suid_dumpable = 0;
68 68
69struct core_name {
70 char *corename;
71 int used, size;
72};
73static atomic_t call_count = ATOMIC_INIT(1);
74
69/* The maximal length of core_pattern is also specified in sysctl.c */ 75/* The maximal length of core_pattern is also specified in sysctl.c */
70 76
71static LIST_HEAD(formats); 77static LIST_HEAD(formats);
@@ -1003,7 +1009,7 @@ int flush_old_exec(struct linux_binprm * bprm)
1003 1009
1004 bprm->mm = NULL; /* We're using it now */ 1010 bprm->mm = NULL; /* We're using it now */
1005 1011
1006 current->flags &= ~PF_RANDOMIZE; 1012 current->flags &= ~(PF_RANDOMIZE | PF_KTHREAD);
1007 flush_thread(); 1013 flush_thread();
1008 current->personality &= ~bprm->per_clear; 1014 current->personality &= ~bprm->per_clear;
1009 1015
@@ -1083,14 +1089,14 @@ EXPORT_SYMBOL(setup_new_exec);
1083 */ 1089 */
1084int prepare_bprm_creds(struct linux_binprm *bprm) 1090int prepare_bprm_creds(struct linux_binprm *bprm)
1085{ 1091{
1086 if (mutex_lock_interruptible(&current->cred_guard_mutex)) 1092 if (mutex_lock_interruptible(&current->signal->cred_guard_mutex))
1087 return -ERESTARTNOINTR; 1093 return -ERESTARTNOINTR;
1088 1094
1089 bprm->cred = prepare_exec_creds(); 1095 bprm->cred = prepare_exec_creds();
1090 if (likely(bprm->cred)) 1096 if (likely(bprm->cred))
1091 return 0; 1097 return 0;
1092 1098
1093 mutex_unlock(&current->cred_guard_mutex); 1099 mutex_unlock(&current->signal->cred_guard_mutex);
1094 return -ENOMEM; 1100 return -ENOMEM;
1095} 1101}
1096 1102
@@ -1098,7 +1104,7 @@ void free_bprm(struct linux_binprm *bprm)
1098{ 1104{
1099 free_arg_pages(bprm); 1105 free_arg_pages(bprm);
1100 if (bprm->cred) { 1106 if (bprm->cred) {
1101 mutex_unlock(&current->cred_guard_mutex); 1107 mutex_unlock(&current->signal->cred_guard_mutex);
1102 abort_creds(bprm->cred); 1108 abort_creds(bprm->cred);
1103 } 1109 }
1104 kfree(bprm); 1110 kfree(bprm);
@@ -1119,13 +1125,13 @@ void install_exec_creds(struct linux_binprm *bprm)
1119 * credentials; any time after this it may be unlocked. 1125 * credentials; any time after this it may be unlocked.
1120 */ 1126 */
1121 security_bprm_committed_creds(bprm); 1127 security_bprm_committed_creds(bprm);
1122 mutex_unlock(&current->cred_guard_mutex); 1128 mutex_unlock(&current->signal->cred_guard_mutex);
1123} 1129}
1124EXPORT_SYMBOL(install_exec_creds); 1130EXPORT_SYMBOL(install_exec_creds);
1125 1131
1126/* 1132/*
1127 * determine how safe it is to execute the proposed program 1133 * determine how safe it is to execute the proposed program
1128 * - the caller must hold current->cred_guard_mutex to protect against 1134 * - the caller must hold ->cred_guard_mutex to protect against
1129 * PTRACE_ATTACH 1135 * PTRACE_ATTACH
1130 */ 1136 */
1131int check_unsafe_exec(struct linux_binprm *bprm) 1137int check_unsafe_exec(struct linux_binprm *bprm)
@@ -1406,7 +1412,6 @@ int do_execve(const char * filename,
1406 if (retval < 0) 1412 if (retval < 0)
1407 goto out; 1413 goto out;
1408 1414
1409 current->flags &= ~PF_KTHREAD;
1410 retval = search_binary_handler(bprm,regs); 1415 retval = search_binary_handler(bprm,regs);
1411 if (retval < 0) 1416 if (retval < 0)
1412 goto out; 1417 goto out;
@@ -1459,127 +1464,148 @@ void set_binfmt(struct linux_binfmt *new)
1459 1464
1460EXPORT_SYMBOL(set_binfmt); 1465EXPORT_SYMBOL(set_binfmt);
1461 1466
1467static int expand_corename(struct core_name *cn)
1468{
1469 char *old_corename = cn->corename;
1470
1471 cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
1472 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
1473
1474 if (!cn->corename) {
1475 kfree(old_corename);
1476 return -ENOMEM;
1477 }
1478
1479 return 0;
1480}
1481
1482static int cn_printf(struct core_name *cn, const char *fmt, ...)
1483{
1484 char *cur;
1485 int need;
1486 int ret;
1487 va_list arg;
1488
1489 va_start(arg, fmt);
1490 need = vsnprintf(NULL, 0, fmt, arg);
1491 va_end(arg);
1492
1493 if (likely(need < cn->size - cn->used - 1))
1494 goto out_printf;
1495
1496 ret = expand_corename(cn);
1497 if (ret)
1498 goto expand_fail;
1499
1500out_printf:
1501 cur = cn->corename + cn->used;
1502 va_start(arg, fmt);
1503 vsnprintf(cur, need + 1, fmt, arg);
1504 va_end(arg);
1505 cn->used += need;
1506 return 0;
1507
1508expand_fail:
1509 return ret;
1510}
1511
1462/* format_corename will inspect the pattern parameter, and output a 1512/* format_corename will inspect the pattern parameter, and output a
1463 * name into corename, which must have space for at least 1513 * name into corename, which must have space for at least
1464 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator. 1514 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
1465 */ 1515 */
1466static int format_corename(char *corename, long signr) 1516static int format_corename(struct core_name *cn, long signr)
1467{ 1517{
1468 const struct cred *cred = current_cred(); 1518 const struct cred *cred = current_cred();
1469 const char *pat_ptr = core_pattern; 1519 const char *pat_ptr = core_pattern;
1470 int ispipe = (*pat_ptr == '|'); 1520 int ispipe = (*pat_ptr == '|');
1471 char *out_ptr = corename;
1472 char *const out_end = corename + CORENAME_MAX_SIZE;
1473 int rc;
1474 int pid_in_pattern = 0; 1521 int pid_in_pattern = 0;
1522 int err = 0;
1523
1524 cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
1525 cn->corename = kmalloc(cn->size, GFP_KERNEL);
1526 cn->used = 0;
1527
1528 if (!cn->corename)
1529 return -ENOMEM;
1475 1530
1476 /* Repeat as long as we have more pattern to process and more output 1531 /* Repeat as long as we have more pattern to process and more output
1477 space */ 1532 space */
1478 while (*pat_ptr) { 1533 while (*pat_ptr) {
1479 if (*pat_ptr != '%') { 1534 if (*pat_ptr != '%') {
1480 if (out_ptr == out_end) 1535 if (*pat_ptr == 0)
1481 goto out; 1536 goto out;
1482 *out_ptr++ = *pat_ptr++; 1537 err = cn_printf(cn, "%c", *pat_ptr++);
1483 } else { 1538 } else {
1484 switch (*++pat_ptr) { 1539 switch (*++pat_ptr) {
1540 /* single % at the end, drop that */
1485 case 0: 1541 case 0:
1486 goto out; 1542 goto out;
1487 /* Double percent, output one percent */ 1543 /* Double percent, output one percent */
1488 case '%': 1544 case '%':
1489 if (out_ptr == out_end) 1545 err = cn_printf(cn, "%c", '%');
1490 goto out;
1491 *out_ptr++ = '%';
1492 break; 1546 break;
1493 /* pid */ 1547 /* pid */
1494 case 'p': 1548 case 'p':
1495 pid_in_pattern = 1; 1549 pid_in_pattern = 1;
1496 rc = snprintf(out_ptr, out_end - out_ptr, 1550 err = cn_printf(cn, "%d",
1497 "%d", task_tgid_vnr(current)); 1551 task_tgid_vnr(current));
1498 if (rc > out_end - out_ptr)
1499 goto out;
1500 out_ptr += rc;
1501 break; 1552 break;
1502 /* uid */ 1553 /* uid */
1503 case 'u': 1554 case 'u':
1504 rc = snprintf(out_ptr, out_end - out_ptr, 1555 err = cn_printf(cn, "%d", cred->uid);
1505 "%d", cred->uid);
1506 if (rc > out_end - out_ptr)
1507 goto out;
1508 out_ptr += rc;
1509 break; 1556 break;
1510 /* gid */ 1557 /* gid */
1511 case 'g': 1558 case 'g':
1512 rc = snprintf(out_ptr, out_end - out_ptr, 1559 err = cn_printf(cn, "%d", cred->gid);
1513 "%d", cred->gid);
1514 if (rc > out_end - out_ptr)
1515 goto out;
1516 out_ptr += rc;
1517 break; 1560 break;
1518 /* signal that caused the coredump */ 1561 /* signal that caused the coredump */
1519 case 's': 1562 case 's':
1520 rc = snprintf(out_ptr, out_end - out_ptr, 1563 err = cn_printf(cn, "%ld", signr);
1521 "%ld", signr);
1522 if (rc > out_end - out_ptr)
1523 goto out;
1524 out_ptr += rc;
1525 break; 1564 break;
1526 /* UNIX time of coredump */ 1565 /* UNIX time of coredump */
1527 case 't': { 1566 case 't': {
1528 struct timeval tv; 1567 struct timeval tv;
1529 do_gettimeofday(&tv); 1568 do_gettimeofday(&tv);
1530 rc = snprintf(out_ptr, out_end - out_ptr, 1569 err = cn_printf(cn, "%lu", tv.tv_sec);
1531 "%lu", tv.tv_sec);
1532 if (rc > out_end - out_ptr)
1533 goto out;
1534 out_ptr += rc;
1535 break; 1570 break;
1536 } 1571 }
1537 /* hostname */ 1572 /* hostname */
1538 case 'h': 1573 case 'h':
1539 down_read(&uts_sem); 1574 down_read(&uts_sem);
1540 rc = snprintf(out_ptr, out_end - out_ptr, 1575 err = cn_printf(cn, "%s",
1541 "%s", utsname()->nodename); 1576 utsname()->nodename);
1542 up_read(&uts_sem); 1577 up_read(&uts_sem);
1543 if (rc > out_end - out_ptr)
1544 goto out;
1545 out_ptr += rc;
1546 break; 1578 break;
1547 /* executable */ 1579 /* executable */
1548 case 'e': 1580 case 'e':
1549 rc = snprintf(out_ptr, out_end - out_ptr, 1581 err = cn_printf(cn, "%s", current->comm);
1550 "%s", current->comm);
1551 if (rc > out_end - out_ptr)
1552 goto out;
1553 out_ptr += rc;
1554 break; 1582 break;
1555 /* core limit size */ 1583 /* core limit size */
1556 case 'c': 1584 case 'c':
1557 rc = snprintf(out_ptr, out_end - out_ptr, 1585 err = cn_printf(cn, "%lu",
1558 "%lu", rlimit(RLIMIT_CORE)); 1586 rlimit(RLIMIT_CORE));
1559 if (rc > out_end - out_ptr)
1560 goto out;
1561 out_ptr += rc;
1562 break; 1587 break;
1563 default: 1588 default:
1564 break; 1589 break;
1565 } 1590 }
1566 ++pat_ptr; 1591 ++pat_ptr;
1567 } 1592 }
1593
1594 if (err)
1595 return err;
1568 } 1596 }
1597
1569 /* Backward compatibility with core_uses_pid: 1598 /* Backward compatibility with core_uses_pid:
1570 * 1599 *
1571 * If core_pattern does not include a %p (as is the default) 1600 * If core_pattern does not include a %p (as is the default)
1572 * and core_uses_pid is set, then .%pid will be appended to 1601 * and core_uses_pid is set, then .%pid will be appended to
1573 * the filename. Do not do this for piped commands. */ 1602 * the filename. Do not do this for piped commands. */
1574 if (!ispipe && !pid_in_pattern && core_uses_pid) { 1603 if (!ispipe && !pid_in_pattern && core_uses_pid) {
1575 rc = snprintf(out_ptr, out_end - out_ptr, 1604 err = cn_printf(cn, ".%d", task_tgid_vnr(current));
1576 ".%d", task_tgid_vnr(current)); 1605 if (err)
1577 if (rc > out_end - out_ptr) 1606 return err;
1578 goto out;
1579 out_ptr += rc;
1580 } 1607 }
1581out: 1608out:
1582 *out_ptr = 0;
1583 return ispipe; 1609 return ispipe;
1584} 1610}
1585 1611
@@ -1856,7 +1882,7 @@ static int umh_pipe_setup(struct subprocess_info *info)
1856void do_coredump(long signr, int exit_code, struct pt_regs *regs) 1882void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1857{ 1883{
1858 struct core_state core_state; 1884 struct core_state core_state;
1859 char corename[CORENAME_MAX_SIZE + 1]; 1885 struct core_name cn;
1860 struct mm_struct *mm = current->mm; 1886 struct mm_struct *mm = current->mm;
1861 struct linux_binfmt * binfmt; 1887 struct linux_binfmt * binfmt;
1862 const struct cred *old_cred; 1888 const struct cred *old_cred;
@@ -1911,7 +1937,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1911 */ 1937 */
1912 clear_thread_flag(TIF_SIGPENDING); 1938 clear_thread_flag(TIF_SIGPENDING);
1913 1939
1914 ispipe = format_corename(corename, signr); 1940 ispipe = format_corename(&cn, signr);
1941
1942 if (ispipe == -ENOMEM) {
1943 printk(KERN_WARNING "format_corename failed\n");
1944 printk(KERN_WARNING "Aborting core\n");
1945 goto fail_corename;
1946 }
1915 1947
1916 if (ispipe) { 1948 if (ispipe) {
1917 int dump_count; 1949 int dump_count;
@@ -1948,7 +1980,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1948 goto fail_dropcount; 1980 goto fail_dropcount;
1949 } 1981 }
1950 1982
1951 helper_argv = argv_split(GFP_KERNEL, corename+1, NULL); 1983 helper_argv = argv_split(GFP_KERNEL, cn.corename+1, NULL);
1952 if (!helper_argv) { 1984 if (!helper_argv) {
1953 printk(KERN_WARNING "%s failed to allocate memory\n", 1985 printk(KERN_WARNING "%s failed to allocate memory\n",
1954 __func__); 1986 __func__);
@@ -1961,7 +1993,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1961 argv_free(helper_argv); 1993 argv_free(helper_argv);
1962 if (retval) { 1994 if (retval) {
1963 printk(KERN_INFO "Core dump to %s pipe failed\n", 1995 printk(KERN_INFO "Core dump to %s pipe failed\n",
1964 corename); 1996 cn.corename);
1965 goto close_fail; 1997 goto close_fail;
1966 } 1998 }
1967 } else { 1999 } else {
@@ -1970,7 +2002,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1970 if (cprm.limit < binfmt->min_coredump) 2002 if (cprm.limit < binfmt->min_coredump)
1971 goto fail_unlock; 2003 goto fail_unlock;
1972 2004
1973 cprm.file = filp_open(corename, 2005 cprm.file = filp_open(cn.corename,
1974 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag, 2006 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
1975 0600); 2007 0600);
1976 if (IS_ERR(cprm.file)) 2008 if (IS_ERR(cprm.file))
@@ -2012,6 +2044,8 @@ fail_dropcount:
2012 if (ispipe) 2044 if (ispipe)
2013 atomic_dec(&core_dump_count); 2045 atomic_dec(&core_dump_count);
2014fail_unlock: 2046fail_unlock:
2047 kfree(cn.corename);
2048fail_corename:
2015 coredump_finish(mm); 2049 coredump_finish(mm);
2016 revert_creds(old_cred); 2050 revert_creds(old_cred);
2017fail_creds: 2051fail_creds:
diff --git a/fs/fcntl.c b/fs/fcntl.c
index f8cc34f542c3..ecc8b3954ed6 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -640,7 +640,7 @@ static void fasync_free_rcu(struct rcu_head *head)
640 * match the state "is the filp on a fasync list". 640 * match the state "is the filp on a fasync list".
641 * 641 *
642 */ 642 */
643static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp) 643int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
644{ 644{
645 struct fasync_struct *fa, **fp; 645 struct fasync_struct *fa, **fp;
646 int result = 0; 646 int result = 0;
@@ -666,21 +666,31 @@ static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
666 return result; 666 return result;
667} 667}
668 668
669struct fasync_struct *fasync_alloc(void)
670{
671 return kmem_cache_alloc(fasync_cache, GFP_KERNEL);
672}
673
669/* 674/*
670 * Add a fasync entry. Return negative on error, positive if 675 * NOTE! This can be used only for unused fasync entries:
671 * added, and zero if did nothing but change an existing one. 676 * entries that actually got inserted on the fasync list
677 * need to be released by rcu - see fasync_remove_entry.
678 */
679void fasync_free(struct fasync_struct *new)
680{
681 kmem_cache_free(fasync_cache, new);
682}
683
684/*
685 * Insert a new entry into the fasync list. Return the pointer to the
686 * old one if we didn't use the new one.
672 * 687 *
673 * NOTE! It is very important that the FASYNC flag always 688 * NOTE! It is very important that the FASYNC flag always
674 * match the state "is the filp on a fasync list". 689 * match the state "is the filp on a fasync list".
675 */ 690 */
676static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp) 691struct fasync_struct *fasync_insert_entry(int fd, struct file *filp, struct fasync_struct **fapp, struct fasync_struct *new)
677{ 692{
678 struct fasync_struct *new, *fa, **fp; 693 struct fasync_struct *fa, **fp;
679 int result = 0;
680
681 new = kmem_cache_alloc(fasync_cache, GFP_KERNEL);
682 if (!new)
683 return -ENOMEM;
684 694
685 spin_lock(&filp->f_lock); 695 spin_lock(&filp->f_lock);
686 spin_lock(&fasync_lock); 696 spin_lock(&fasync_lock);
@@ -691,8 +701,6 @@ static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fa
691 spin_lock_irq(&fa->fa_lock); 701 spin_lock_irq(&fa->fa_lock);
692 fa->fa_fd = fd; 702 fa->fa_fd = fd;
693 spin_unlock_irq(&fa->fa_lock); 703 spin_unlock_irq(&fa->fa_lock);
694
695 kmem_cache_free(fasync_cache, new);
696 goto out; 704 goto out;
697 } 705 }
698 706
@@ -702,13 +710,39 @@ static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fa
702 new->fa_fd = fd; 710 new->fa_fd = fd;
703 new->fa_next = *fapp; 711 new->fa_next = *fapp;
704 rcu_assign_pointer(*fapp, new); 712 rcu_assign_pointer(*fapp, new);
705 result = 1;
706 filp->f_flags |= FASYNC; 713 filp->f_flags |= FASYNC;
707 714
708out: 715out:
709 spin_unlock(&fasync_lock); 716 spin_unlock(&fasync_lock);
710 spin_unlock(&filp->f_lock); 717 spin_unlock(&filp->f_lock);
711 return result; 718 return fa;
719}
720
721/*
722 * Add a fasync entry. Return negative on error, positive if
723 * added, and zero if did nothing but change an existing one.
724 */
725static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp)
726{
727 struct fasync_struct *new;
728
729 new = fasync_alloc();
730 if (!new)
731 return -ENOMEM;
732
733 /*
734 * fasync_insert_entry() returns the old (update) entry if
735 * it existed.
736 *
737 * So free the (unused) new entry and return 0 to let the
738 * caller know that we didn't add any new fasync entries.
739 */
740 if (fasync_insert_entry(fd, filp, fapp, new)) {
741 fasync_free(new);
742 return 0;
743 }
744
745 return 1;
712} 746}
713 747
714/* 748/*
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index b98664275f02..6e07696308dc 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -1334,12 +1334,7 @@ out_finish:
1334 1334
1335static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req) 1335static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
1336{ 1336{
1337 int i; 1337 release_pages(req->pages, req->num_pages, 0);
1338
1339 for (i = 0; i < req->num_pages; i++) {
1340 struct page *page = req->pages[i];
1341 page_cache_release(page);
1342 }
1343} 1338}
1344 1339
1345static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, 1340static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 60c2b944d762..79cf7f616bbe 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -544,6 +544,34 @@ static unsigned int isofs_get_last_session(struct super_block *sb, s32 session)
544} 544}
545 545
546/* 546/*
547 * Check if root directory is empty (has less than 3 files).
548 *
549 * Used to detect broken CDs where ISO root directory is empty but Joliet root
550 * directory is OK. If such CD has Rock Ridge extensions, they will be disabled
551 * (and Joliet used instead) or else no files would be visible.
552 */
553static bool rootdir_empty(struct super_block *sb, unsigned long block)
554{
555 int offset = 0, files = 0, de_len;
556 struct iso_directory_record *de;
557 struct buffer_head *bh;
558
559 bh = sb_bread(sb, block);
560 if (!bh)
561 return true;
562 while (files < 3) {
563 de = (struct iso_directory_record *) (bh->b_data + offset);
564 de_len = *(unsigned char *) de;
565 if (de_len == 0)
566 break;
567 files++;
568 offset += de_len;
569 }
570 brelse(bh);
571 return files < 3;
572}
573
574/*
547 * Initialize the superblock and read the root inode. 575 * Initialize the superblock and read the root inode.
548 * 576 *
549 * Note: a check_disk_change() has been done immediately prior 577 * Note: a check_disk_change() has been done immediately prior
@@ -843,6 +871,18 @@ root_found:
843 goto out_no_root; 871 goto out_no_root;
844 872
845 /* 873 /*
874 * Fix for broken CDs with Rock Ridge and empty ISO root directory but
875 * correct Joliet root directory.
876 */
877 if (sbi->s_rock == 1 && joliet_level &&
878 rootdir_empty(s, sbi->s_firstdatazone)) {
879 printk(KERN_NOTICE
880 "ISOFS: primary root directory is empty. "
881 "Disabling Rock Ridge and switching to Joliet.");
882 sbi->s_rock = 0;
883 }
884
885 /*
846 * If this disk has both Rock Ridge and Joliet on it, then we 886 * If this disk has both Rock Ridge and Joliet on it, then we
847 * want to use Rock Ridge by default. This can be overridden 887 * want to use Rock Ridge by default. This can be overridden
848 * by using the norock mount option. There is still one other 888 * by using the norock mount option. There is still one other
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index b13aabc12298..abfff9d7979d 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -22,7 +22,6 @@
22#include <linux/in.h> 22#include <linux/in.h>
23#include <linux/uio.h> 23#include <linux/uio.h>
24#include <linux/smp.h> 24#include <linux/smp.h>
25#include <linux/smp_lock.h>
26#include <linux/mutex.h> 25#include <linux/mutex.h>
27#include <linux/kthread.h> 26#include <linux/kthread.h>
28#include <linux/freezer.h> 27#include <linux/freezer.h>
@@ -130,15 +129,6 @@ lockd(void *vrqstp)
130 129
131 dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); 130 dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");
132 131
133 /*
134 * FIXME: it would be nice if lockd didn't spend its entire life
135 * running under the BKL. At the very least, it would be good to
136 * have someone clarify what it's intended to protect here. I've
137 * seen some handwavy posts about posix locking needing to be
138 * done under the BKL, but it's far from clear.
139 */
140 lock_kernel();
141
142 if (!nlm_timeout) 132 if (!nlm_timeout)
143 nlm_timeout = LOCKD_DFLT_TIMEO; 133 nlm_timeout = LOCKD_DFLT_TIMEO;
144 nlmsvc_timeout = nlm_timeout * HZ; 134 nlmsvc_timeout = nlm_timeout * HZ;
@@ -195,7 +185,6 @@ lockd(void *vrqstp)
195 if (nlmsvc_ops) 185 if (nlmsvc_ops)
196 nlmsvc_invalidate_all(); 186 nlmsvc_invalidate_all();
197 nlm_shutdown_hosts(); 187 nlm_shutdown_hosts();
198 unlock_kernel();
199 return 0; 188 return 0;
200} 189}
201 190
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index 6f1ef000975a..c462d346acbd 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -700,14 +700,16 @@ nlmsvc_notify_blocked(struct file_lock *fl)
700 struct nlm_block *block; 700 struct nlm_block *block;
701 701
702 dprintk("lockd: VFS unblock notification for block %p\n", fl); 702 dprintk("lockd: VFS unblock notification for block %p\n", fl);
703 spin_lock(&nlm_blocked_lock);
703 list_for_each_entry(block, &nlm_blocked, b_list) { 704 list_for_each_entry(block, &nlm_blocked, b_list) {
704 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) { 705 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
705 nlmsvc_insert_block(block, 0); 706 nlmsvc_insert_block_locked(block, 0);
707 spin_unlock(&nlm_blocked_lock);
706 svc_wake_up(block->b_daemon); 708 svc_wake_up(block->b_daemon);
707 return; 709 return;
708 } 710 }
709 } 711 }
710 712 spin_unlock(&nlm_blocked_lock);
711 printk(KERN_WARNING "lockd: notification for unknown block!\n"); 713 printk(KERN_WARNING "lockd: notification for unknown block!\n");
712} 714}
713 715
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index d0ef94cfb3da..1ca0679c80bf 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -170,6 +170,7 @@ nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file,
170 170
171again: 171again:
172 file->f_locks = 0; 172 file->f_locks = 0;
173 lock_flocks(); /* protects i_flock list */
173 for (fl = inode->i_flock; fl; fl = fl->fl_next) { 174 for (fl = inode->i_flock; fl; fl = fl->fl_next) {
174 if (fl->fl_lmops != &nlmsvc_lock_operations) 175 if (fl->fl_lmops != &nlmsvc_lock_operations)
175 continue; 176 continue;
@@ -181,6 +182,7 @@ again:
181 if (match(lockhost, host)) { 182 if (match(lockhost, host)) {
182 struct file_lock lock = *fl; 183 struct file_lock lock = *fl;
183 184
185 unlock_flocks();
184 lock.fl_type = F_UNLCK; 186 lock.fl_type = F_UNLCK;
185 lock.fl_start = 0; 187 lock.fl_start = 0;
186 lock.fl_end = OFFSET_MAX; 188 lock.fl_end = OFFSET_MAX;
@@ -192,6 +194,7 @@ again:
192 goto again; 194 goto again;
193 } 195 }
194 } 196 }
197 unlock_flocks();
195 198
196 return 0; 199 return 0;
197} 200}
@@ -226,10 +229,14 @@ nlm_file_inuse(struct nlm_file *file)
226 if (file->f_count || !list_empty(&file->f_blocks) || file->f_shares) 229 if (file->f_count || !list_empty(&file->f_blocks) || file->f_shares)
227 return 1; 230 return 1;
228 231
232 lock_flocks();
229 for (fl = inode->i_flock; fl; fl = fl->fl_next) { 233 for (fl = inode->i_flock; fl; fl = fl->fl_next) {
230 if (fl->fl_lmops == &nlmsvc_lock_operations) 234 if (fl->fl_lmops == &nlmsvc_lock_operations) {
235 unlock_flocks();
231 return 1; 236 return 1;
237 }
232 } 238 }
239 unlock_flocks();
233 file->f_locks = 0; 240 file->f_locks = 0;
234 return 0; 241 return 0;
235} 242}
diff --git a/fs/locks.c b/fs/locks.c
index 4de3a2666810..50ec15927aab 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -142,6 +142,7 @@ int lease_break_time = 45;
142 142
143static LIST_HEAD(file_lock_list); 143static LIST_HEAD(file_lock_list);
144static LIST_HEAD(blocked_list); 144static LIST_HEAD(blocked_list);
145static DEFINE_SPINLOCK(file_lock_lock);
145 146
146/* 147/*
147 * Protects the two list heads above, plus the inode->i_flock list 148 * Protects the two list heads above, plus the inode->i_flock list
@@ -149,23 +150,24 @@ static LIST_HEAD(blocked_list);
149 */ 150 */
150void lock_flocks(void) 151void lock_flocks(void)
151{ 152{
152 lock_kernel(); 153 spin_lock(&file_lock_lock);
153} 154}
154EXPORT_SYMBOL_GPL(lock_flocks); 155EXPORT_SYMBOL_GPL(lock_flocks);
155 156
156void unlock_flocks(void) 157void unlock_flocks(void)
157{ 158{
158 unlock_kernel(); 159 spin_unlock(&file_lock_lock);
159} 160}
160EXPORT_SYMBOL_GPL(unlock_flocks); 161EXPORT_SYMBOL_GPL(unlock_flocks);
161 162
162static struct kmem_cache *filelock_cache __read_mostly; 163static struct kmem_cache *filelock_cache __read_mostly;
163 164
164/* Allocate an empty lock structure. */ 165/* Allocate an empty lock structure. */
165static struct file_lock *locks_alloc_lock(void) 166struct file_lock *locks_alloc_lock(void)
166{ 167{
167 return kmem_cache_alloc(filelock_cache, GFP_KERNEL); 168 return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
168} 169}
170EXPORT_SYMBOL_GPL(locks_alloc_lock);
169 171
170void locks_release_private(struct file_lock *fl) 172void locks_release_private(struct file_lock *fl)
171{ 173{
@@ -1365,7 +1367,6 @@ int fcntl_getlease(struct file *filp)
1365int generic_setlease(struct file *filp, long arg, struct file_lock **flp) 1367int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
1366{ 1368{
1367 struct file_lock *fl, **before, **my_before = NULL, *lease; 1369 struct file_lock *fl, **before, **my_before = NULL, *lease;
1368 struct file_lock *new_fl = NULL;
1369 struct dentry *dentry = filp->f_path.dentry; 1370 struct dentry *dentry = filp->f_path.dentry;
1370 struct inode *inode = dentry->d_inode; 1371 struct inode *inode = dentry->d_inode;
1371 int error, rdlease_count = 0, wrlease_count = 0; 1372 int error, rdlease_count = 0, wrlease_count = 0;
@@ -1385,11 +1386,6 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
1385 lease = *flp; 1386 lease = *flp;
1386 1387
1387 if (arg != F_UNLCK) { 1388 if (arg != F_UNLCK) {
1388 error = -ENOMEM;
1389 new_fl = locks_alloc_lock();
1390 if (new_fl == NULL)
1391 goto out;
1392
1393 error = -EAGAIN; 1389 error = -EAGAIN;
1394 if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0)) 1390 if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1395 goto out; 1391 goto out;
@@ -1434,7 +1430,6 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
1434 goto out; 1430 goto out;
1435 } 1431 }
1436 1432
1437 error = 0;
1438 if (arg == F_UNLCK) 1433 if (arg == F_UNLCK)
1439 goto out; 1434 goto out;
1440 1435
@@ -1442,15 +1437,11 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
1442 if (!leases_enable) 1437 if (!leases_enable)
1443 goto out; 1438 goto out;
1444 1439
1445 locks_copy_lock(new_fl, lease); 1440 locks_insert_lock(before, lease);
1446 locks_insert_lock(before, new_fl);
1447
1448 *flp = new_fl;
1449 return 0; 1441 return 0;
1450 1442
1451out: 1443out:
1452 if (new_fl != NULL) 1444 locks_free_lock(lease);
1453 locks_free_lock(new_fl);
1454 return error; 1445 return error;
1455} 1446}
1456EXPORT_SYMBOL(generic_setlease); 1447EXPORT_SYMBOL(generic_setlease);
@@ -1514,26 +1505,38 @@ EXPORT_SYMBOL_GPL(vfs_setlease);
1514 */ 1505 */
1515int fcntl_setlease(unsigned int fd, struct file *filp, long arg) 1506int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1516{ 1507{
1517 struct file_lock fl, *flp = &fl; 1508 struct file_lock *fl;
1509 struct fasync_struct *new;
1518 struct inode *inode = filp->f_path.dentry->d_inode; 1510 struct inode *inode = filp->f_path.dentry->d_inode;
1519 int error; 1511 int error;
1520 1512
1521 locks_init_lock(&fl); 1513 fl = lease_alloc(filp, arg);
1522 error = lease_init(filp, arg, &fl); 1514 if (IS_ERR(fl))
1523 if (error) 1515 return PTR_ERR(fl);
1524 return error;
1525 1516
1517 new = fasync_alloc();
1518 if (!new) {
1519 locks_free_lock(fl);
1520 return -ENOMEM;
1521 }
1526 lock_flocks(); 1522 lock_flocks();
1527 1523 error = __vfs_setlease(filp, arg, &fl);
1528 error = __vfs_setlease(filp, arg, &flp);
1529 if (error || arg == F_UNLCK) 1524 if (error || arg == F_UNLCK)
1530 goto out_unlock; 1525 goto out_unlock;
1531 1526
1532 error = fasync_helper(fd, filp, 1, &flp->fl_fasync); 1527 /*
1528 * fasync_insert_entry() returns the old entry if any.
1529 * If there was no old entry, then it used 'new' and
1530 * inserted it into the fasync list. Clear new so that
1531 * we don't release it here.
1532 */
1533 if (!fasync_insert_entry(fd, filp, &fl->fl_fasync, new))
1534 new = NULL;
1535
1533 if (error < 0) { 1536 if (error < 0) {
1534 /* remove lease just inserted by setlease */ 1537 /* remove lease just inserted by setlease */
1535 flp->fl_type = F_UNLCK | F_INPROGRESS; 1538 fl->fl_type = F_UNLCK | F_INPROGRESS;
1536 flp->fl_break_time = jiffies - 10; 1539 fl->fl_break_time = jiffies - 10;
1537 time_out_leases(inode); 1540 time_out_leases(inode);
1538 goto out_unlock; 1541 goto out_unlock;
1539 } 1542 }
@@ -1541,6 +1544,8 @@ int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1541 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); 1544 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1542out_unlock: 1545out_unlock:
1543 unlock_flocks(); 1546 unlock_flocks();
1547 if (new)
1548 fasync_free(new);
1544 return error; 1549 return error;
1545} 1550}
1546 1551
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index fd667652c502..ba306658a6db 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -1,7 +1,6 @@
1config NFS_FS 1config NFS_FS
2 tristate "NFS client support" 2 tristate "NFS client support"
3 depends on INET && FILE_LOCKING 3 depends on INET && FILE_LOCKING
4 depends on BKL # fix as soon as lockd is done
5 select LOCKD 4 select LOCKD
6 select SUNRPC 5 select SUNRPC
7 select NFS_ACL_SUPPORT if NFS_V3_ACL 6 select NFS_ACL_SUPPORT if NFS_V3_ACL
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index 31a78fce4732..18b3e8975fe0 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -2,7 +2,6 @@ config NFSD
2 tristate "NFS server support" 2 tristate "NFS server support"
3 depends on INET 3 depends on INET
4 depends on FILE_LOCKING 4 depends on FILE_LOCKING
5 depends on BKL # fix as soon as lockd is done
6 select LOCKD 5 select LOCKD
7 select SUNRPC 6 select SUNRPC
8 select EXPORTFS 7 select EXPORTFS
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 9019e8ec9dc8..56347e0ac88d 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -2614,7 +2614,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta
2614 struct nfs4_delegation *dp; 2614 struct nfs4_delegation *dp;
2615 struct nfs4_stateowner *sop = stp->st_stateowner; 2615 struct nfs4_stateowner *sop = stp->st_stateowner;
2616 int cb_up = atomic_read(&sop->so_client->cl_cb_set); 2616 int cb_up = atomic_read(&sop->so_client->cl_cb_set);
2617 struct file_lock fl, *flp = &fl; 2617 struct file_lock *fl;
2618 int status, flag = 0; 2618 int status, flag = 0;
2619 2619
2620 flag = NFS4_OPEN_DELEGATE_NONE; 2620 flag = NFS4_OPEN_DELEGATE_NONE;
@@ -2648,20 +2648,24 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta
2648 flag = NFS4_OPEN_DELEGATE_NONE; 2648 flag = NFS4_OPEN_DELEGATE_NONE;
2649 goto out; 2649 goto out;
2650 } 2650 }
2651 locks_init_lock(&fl); 2651 status = -ENOMEM;
2652 fl.fl_lmops = &nfsd_lease_mng_ops; 2652 fl = locks_alloc_lock();
2653 fl.fl_flags = FL_LEASE; 2653 if (!fl)
2654 fl.fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; 2654 goto out;
2655 fl.fl_end = OFFSET_MAX; 2655 locks_init_lock(fl);
2656 fl.fl_owner = (fl_owner_t)dp; 2656 fl->fl_lmops = &nfsd_lease_mng_ops;
2657 fl.fl_file = find_readable_file(stp->st_file); 2657 fl->fl_flags = FL_LEASE;
2658 BUG_ON(!fl.fl_file); 2658 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
2659 fl.fl_pid = current->tgid; 2659 fl->fl_end = OFFSET_MAX;
2660 fl->fl_owner = (fl_owner_t)dp;
2661 fl->fl_file = find_readable_file(stp->st_file);
2662 BUG_ON(!fl->fl_file);
2663 fl->fl_pid = current->tgid;
2660 2664
2661 /* vfs_setlease checks to see if delegation should be handed out. 2665 /* vfs_setlease checks to see if delegation should be handed out.
2662 * the lock_manager callbacks fl_mylease and fl_change are used 2666 * the lock_manager callbacks fl_mylease and fl_change are used
2663 */ 2667 */
2664 if ((status = vfs_setlease(fl.fl_file, fl.fl_type, &flp))) { 2668 if ((status = vfs_setlease(fl->fl_file, fl->fl_type, &fl))) {
2665 dprintk("NFSD: setlease failed [%d], no delegation\n", status); 2669 dprintk("NFSD: setlease failed [%d], no delegation\n", status);
2666 unhash_delegation(dp); 2670 unhash_delegation(dp);
2667 flag = NFS4_OPEN_DELEGATE_NONE; 2671 flag = NFS4_OPEN_DELEGATE_NONE;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 9b094c1c8465..f3d02ca461ec 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -226,7 +226,7 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
226{ 226{
227 struct mm_struct *mm; 227 struct mm_struct *mm;
228 228
229 if (mutex_lock_killable(&task->cred_guard_mutex)) 229 if (mutex_lock_killable(&task->signal->cred_guard_mutex))
230 return NULL; 230 return NULL;
231 231
232 mm = get_task_mm(task); 232 mm = get_task_mm(task);
@@ -235,7 +235,7 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
235 mmput(mm); 235 mmput(mm);
236 mm = NULL; 236 mm = NULL;
237 } 237 }
238 mutex_unlock(&task->cred_guard_mutex); 238 mutex_unlock(&task->signal->cred_guard_mutex);
239 239
240 return mm; 240 return mm;
241} 241}
@@ -2354,14 +2354,14 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
2354 goto out_free; 2354 goto out_free;
2355 2355
2356 /* Guard against adverse ptrace interaction */ 2356 /* Guard against adverse ptrace interaction */
2357 length = mutex_lock_interruptible(&task->cred_guard_mutex); 2357 length = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
2358 if (length < 0) 2358 if (length < 0)
2359 goto out_free; 2359 goto out_free;
2360 2360
2361 length = security_setprocattr(task, 2361 length = security_setprocattr(task,
2362 (char*)file->f_path.dentry->d_name.name, 2362 (char*)file->f_path.dentry->d_name.name,
2363 (void*)page, count); 2363 (void*)page, count);
2364 mutex_unlock(&task->cred_guard_mutex); 2364 mutex_unlock(&task->signal->cred_guard_mutex);
2365out_free: 2365out_free:
2366 free_page((unsigned long) page); 2366 free_page((unsigned long) page);
2367out: 2367out:
diff --git a/fs/proc/softirqs.c b/fs/proc/softirqs.c
index 1807c2419f17..37994737c983 100644
--- a/fs/proc/softirqs.c
+++ b/fs/proc/softirqs.c
@@ -10,13 +10,13 @@ static int show_softirqs(struct seq_file *p, void *v)
10{ 10{
11 int i, j; 11 int i, j;
12 12
13 seq_printf(p, " "); 13 seq_printf(p, " ");
14 for_each_possible_cpu(i) 14 for_each_possible_cpu(i)
15 seq_printf(p, "CPU%-8d", i); 15 seq_printf(p, "CPU%-8d", i);
16 seq_printf(p, "\n"); 16 seq_printf(p, "\n");
17 17
18 for (i = 0; i < NR_SOFTIRQS; i++) { 18 for (i = 0; i < NR_SOFTIRQS; i++) {
19 seq_printf(p, "%8s:", softirq_to_name[i]); 19 seq_printf(p, "%12s:", softirq_to_name[i]);
20 for_each_possible_cpu(j) 20 for_each_possible_cpu(j)
21 seq_printf(p, " %10u", kstat_softirqs_cpu(i, j)); 21 seq_printf(p, " %10u", kstat_softirqs_cpu(i, j));
22 seq_printf(p, "\n"); 22 seq_printf(p, "\n");
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index bf31b03fc275..e15a19c93bae 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -31,7 +31,6 @@ static int show_stat(struct seq_file *p, void *v)
31 u64 sum_softirq = 0; 31 u64 sum_softirq = 0;
32 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0}; 32 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
33 struct timespec boottime; 33 struct timespec boottime;
34 unsigned int per_irq_sum;
35 34
36 user = nice = system = idle = iowait = 35 user = nice = system = idle = iowait =
37 irq = softirq = steal = cputime64_zero; 36 irq = softirq = steal = cputime64_zero;
@@ -52,9 +51,7 @@ static int show_stat(struct seq_file *p, void *v)
52 guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest); 51 guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
53 guest_nice = cputime64_add(guest_nice, 52 guest_nice = cputime64_add(guest_nice,
54 kstat_cpu(i).cpustat.guest_nice); 53 kstat_cpu(i).cpustat.guest_nice);
55 for_each_irq_nr(j) { 54 sum += kstat_cpu_irqs_sum(i);
56 sum += kstat_irqs_cpu(j, i);
57 }
58 sum += arch_irq_stat_cpu(i); 55 sum += arch_irq_stat_cpu(i);
59 56
60 for (j = 0; j < NR_SOFTIRQS; j++) { 57 for (j = 0; j < NR_SOFTIRQS; j++) {
@@ -110,13 +107,8 @@ static int show_stat(struct seq_file *p, void *v)
110 seq_printf(p, "intr %llu", (unsigned long long)sum); 107 seq_printf(p, "intr %llu", (unsigned long long)sum);
111 108
112 /* sum again ? it could be updated? */ 109 /* sum again ? it could be updated? */
113 for_each_irq_nr(j) { 110 for_each_irq_nr(j)
114 per_irq_sum = 0; 111 seq_printf(p, " %u", kstat_irqs(j));
115 for_each_possible_cpu(i)
116 per_irq_sum += kstat_irqs_cpu(j, i);
117
118 seq_printf(p, " %u", per_irq_sum);
119 }
120 112
121 seq_printf(p, 113 seq_printf(p,
122 "\nctxt %llu\n" 114 "\nctxt %llu\n"
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 871e25ed0069..da6b01d70f01 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -327,6 +327,7 @@ struct mem_size_stats {
327 unsigned long private_clean; 327 unsigned long private_clean;
328 unsigned long private_dirty; 328 unsigned long private_dirty;
329 unsigned long referenced; 329 unsigned long referenced;
330 unsigned long anonymous;
330 unsigned long swap; 331 unsigned long swap;
331 u64 pss; 332 u64 pss;
332}; 333};
@@ -357,6 +358,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
357 if (!page) 358 if (!page)
358 continue; 359 continue;
359 360
361 if (PageAnon(page))
362 mss->anonymous += PAGE_SIZE;
363
360 mss->resident += PAGE_SIZE; 364 mss->resident += PAGE_SIZE;
361 /* Accumulate the size in pages that have been accessed. */ 365 /* Accumulate the size in pages that have been accessed. */
362 if (pte_young(ptent) || PageReferenced(page)) 366 if (pte_young(ptent) || PageReferenced(page))
@@ -410,6 +414,7 @@ static int show_smap(struct seq_file *m, void *v)
410 "Private_Clean: %8lu kB\n" 414 "Private_Clean: %8lu kB\n"
411 "Private_Dirty: %8lu kB\n" 415 "Private_Dirty: %8lu kB\n"
412 "Referenced: %8lu kB\n" 416 "Referenced: %8lu kB\n"
417 "Anonymous: %8lu kB\n"
413 "Swap: %8lu kB\n" 418 "Swap: %8lu kB\n"
414 "KernelPageSize: %8lu kB\n" 419 "KernelPageSize: %8lu kB\n"
415 "MMUPageSize: %8lu kB\n", 420 "MMUPageSize: %8lu kB\n",
@@ -421,6 +426,7 @@ static int show_smap(struct seq_file *m, void *v)
421 mss.private_clean >> 10, 426 mss.private_clean >> 10,
422 mss.private_dirty >> 10, 427 mss.private_dirty >> 10,
423 mss.referenced >> 10, 428 mss.referenced >> 10,
429 mss.anonymous >> 10,
424 mss.swap >> 10, 430 mss.swap >> 10,
425 vma_kernel_pagesize(vma) >> 10, 431 vma_kernel_pagesize(vma) >> 10,
426 vma_mmu_pagesize(vma) >> 10); 432 vma_mmu_pagesize(vma) >> 10);
diff --git a/fs/select.c b/fs/select.c
index 500a669f7790..b7b10aa30861 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -67,7 +67,7 @@ static long __estimate_accuracy(struct timespec *tv)
67 return slack; 67 return slack;
68} 68}
69 69
70static long estimate_accuracy(struct timespec *tv) 70long select_estimate_accuracy(struct timespec *tv)
71{ 71{
72 unsigned long ret; 72 unsigned long ret;
73 struct timespec now; 73 struct timespec now;
@@ -417,7 +417,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
417 } 417 }
418 418
419 if (end_time && !timed_out) 419 if (end_time && !timed_out)
420 slack = estimate_accuracy(end_time); 420 slack = select_estimate_accuracy(end_time);
421 421
422 retval = 0; 422 retval = 0;
423 for (;;) { 423 for (;;) {
@@ -769,7 +769,7 @@ static int do_poll(unsigned int nfds, struct poll_list *list,
769 } 769 }
770 770
771 if (end_time && !timed_out) 771 if (end_time && !timed_out)
772 slack = estimate_accuracy(end_time); 772 slack = select_estimate_accuracy(end_time);
773 773
774 for (;;) { 774 for (;;) {
775 struct poll_list *walk; 775 struct poll_list *walk;