aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-01-31 12:25:20 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-01-31 12:25:20 -0500
commit19e7b5f99474107e8d0b4b3e4652fa19ddb87efc (patch)
tree49f15b76c07b4c90d6fbd17b49d69017c81a4b58
parent26064ea409b4d4acb05903a36f3fe2fdccb3d8aa (diff)
parentce4c253573ad184603e0fa77876ba155b0cde46d (diff)
Merge branch 'work.misc' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull misc vfs updates from Al Viro: "All kinds of misc stuff, without any unifying topic, from various people. Neil's d_anon patch, several bugfixes, introduction of kvmalloc analogue of kmemdup_user(), extending bitfield.h to deal with fixed-endians, assorted cleanups all over the place..." * 'work.misc' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (28 commits) alpha: osf_sys.c: use timespec64 where appropriate alpha: osf_sys.c: fix put_tv32 regression jffs2: Fix use-after-free bug in jffs2_iget()'s error handling path dcache: delete unused d_hash_mask dcache: subtract d_hash_shift from 32 in advance fs/buffer.c: fold init_buffer() into init_page_buffers() fs: fold __inode_permission() into inode_permission() fs: add RWF_APPEND sctp: use vmemdup_user() rather than badly open-coding memdup_user() snd_ctl_elem_init_enum_names(): switch to vmemdup_user() replace_user_tlv(): switch to vmemdup_user() new primitive: vmemdup_user() memdup_user(): switch to GFP_USER eventfd: fold eventfd_ctx_get() into eventfd_ctx_fileget() eventfd: fold eventfd_ctx_read() into eventfd_read() eventfd: convert to use anon_inode_getfd() nfs4file: get rid of pointless include of btrfs.h uvc_v4l2: clean copyin/copyout up vme_user: don't use __copy_..._user() usx2y: don't bother with memdup_user() for 16-byte structure ...
-rw-r--r--Documentation/filesystems/nfs/Exporting27
-rw-r--r--arch/alpha/kernel/osf_sys.c72
-rw-r--r--drivers/gpu/drm/r128/r128_state.c23
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c55
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h10
-rw-r--r--drivers/staging/vme/devices/vme_user.c8
-rw-r--r--fs/buffer.c10
-rw-r--r--fs/dcache.c32
-rw-r--r--fs/eventfd.c127
-rw-r--r--fs/file.c5
-rw-r--r--fs/file_table.c1
-rw-r--r--fs/jffs2/fs.c1
-rw-r--r--fs/namei.c71
-rw-r--r--fs/nfs/nfs4file.c1
-rw-r--r--fs/super.c2
-rw-r--r--include/linux/bitfield.h46
-rw-r--r--include/linux/buffer_head.h1
-rw-r--r--include/linux/eventfd.h14
-rw-r--r--include/linux/fs.h5
-rw-r--r--include/linux/string.h1
-rw-r--r--include/uapi/linux/fs.h6
-rw-r--r--lib/usercopy.c2
-rw-r--r--mm/util.c36
-rw-r--r--net/sctp/socket.c59
-rw-r--r--sound/core/control.c15
-rw-r--r--sound/core/hwdep.c2
-rw-r--r--sound/usb/usx2y/us122l.c43
-rw-r--r--sound/usb/usx2y/usX2Yhwdep.c28
28 files changed, 298 insertions, 405 deletions
diff --git a/Documentation/filesystems/nfs/Exporting b/Documentation/filesystems/nfs/Exporting
index 520a4becb75c..63889149f532 100644
--- a/Documentation/filesystems/nfs/Exporting
+++ b/Documentation/filesystems/nfs/Exporting
@@ -56,13 +56,25 @@ a/ A dentry flag DCACHE_DISCONNECTED which is set on
56 any dentry that might not be part of the proper prefix. 56 any dentry that might not be part of the proper prefix.
57 This is set when anonymous dentries are created, and cleared when a 57 This is set when anonymous dentries are created, and cleared when a
58 dentry is noticed to be a child of a dentry which is in the proper 58 dentry is noticed to be a child of a dentry which is in the proper
59 prefix. 59 prefix. If the refcount on a dentry with this flag set
60 60 becomes zero, the dentry is immediately discarded, rather than being
61b/ A per-superblock list "s_anon" of dentries which are the roots of 61 kept in the dcache. If a dentry that is not already in the dcache
62 subtrees that are not in the proper prefix. These dentries, as 62 is repeatedly accessed by filehandle (as NFSD might do), an new dentry
63 well as the proper prefix, need to be released at unmount time. As 63 will be a allocated for each access, and discarded at the end of
64 these dentries will not be hashed, they are linked together on the 64 the access.
65 d_hash list_head. 65
66 Note that such a dentry can acquire children, name, ancestors, etc.
67 without losing DCACHE_DISCONNECTED - that flag is only cleared when
68 subtree is successfully reconnected to root. Until then dentries
69 in such subtree are retained only as long as there are references;
70 refcount reaching zero means immediate eviction, same as for unhashed
71 dentries. That guarantees that we won't need to hunt them down upon
72 umount.
73
74b/ A primitive for creation of secondary roots - d_obtain_root(inode).
75 Those do _not_ bear DCACHE_DISCONNECTED. They are placed on the
76 per-superblock list (->s_roots), so they can be located at umount
77 time for eviction purposes.
66 78
67c/ Helper routines to allocate anonymous dentries, and to help attach 79c/ Helper routines to allocate anonymous dentries, and to help attach
68 loose directory dentries at lookup time. They are: 80 loose directory dentries at lookup time. They are:
@@ -77,7 +89,6 @@ c/ Helper routines to allocate anonymous dentries, and to help attach
77 (such as an anonymous one created by d_obtain_alias), if appropriate. 89 (such as an anonymous one created by d_obtain_alias), if appropriate.
78 It returns NULL when the passed-in dentry is used, following the calling 90 It returns NULL when the passed-in dentry is used, following the calling
79 convention of ->lookup. 91 convention of ->lookup.
80
81 92
82Filesystem Issues 93Filesystem Issues
83----------------- 94-----------------
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index ce3a675c0c4b..fa1a392ca9a2 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -950,22 +950,31 @@ struct itimerval32
950}; 950};
951 951
952static inline long 952static inline long
953get_tv32(struct timeval *o, struct timeval32 __user *i) 953get_tv32(struct timespec64 *o, struct timeval32 __user *i)
954{ 954{
955 struct timeval32 tv; 955 struct timeval32 tv;
956 if (copy_from_user(&tv, i, sizeof(struct timeval32))) 956 if (copy_from_user(&tv, i, sizeof(struct timeval32)))
957 return -EFAULT; 957 return -EFAULT;
958 o->tv_sec = tv.tv_sec; 958 o->tv_sec = tv.tv_sec;
959 o->tv_usec = tv.tv_usec; 959 o->tv_nsec = tv.tv_usec * NSEC_PER_USEC;
960 return 0; 960 return 0;
961} 961}
962 962
963static inline long 963static inline long
964put_tv32(struct timeval32 __user *o, struct timeval *i) 964put_tv32(struct timeval32 __user *o, struct timespec64 *i)
965{ 965{
966 return copy_to_user(o, &(struct timeval32){ 966 return copy_to_user(o, &(struct timeval32){
967 .tv_sec = o->tv_sec, 967 .tv_sec = i->tv_sec,
968 .tv_usec = o->tv_usec}, 968 .tv_usec = i->tv_nsec / NSEC_PER_USEC},
969 sizeof(struct timeval32));
970}
971
972static inline long
973put_tv_to_tv32(struct timeval32 __user *o, struct timeval *i)
974{
975 return copy_to_user(o, &(struct timeval32){
976 .tv_sec = i->tv_sec,
977 .tv_usec = i->tv_usec},
969 sizeof(struct timeval32)); 978 sizeof(struct timeval32));
970} 979}
971 980
@@ -1004,9 +1013,10 @@ SYSCALL_DEFINE2(osf_gettimeofday, struct timeval32 __user *, tv,
1004 struct timezone __user *, tz) 1013 struct timezone __user *, tz)
1005{ 1014{
1006 if (tv) { 1015 if (tv) {
1007 struct timeval ktv; 1016 struct timespec64 kts;
1008 do_gettimeofday(&ktv); 1017
1009 if (put_tv32(tv, &ktv)) 1018 ktime_get_real_ts64(&kts);
1019 if (put_tv32(tv, &kts))
1010 return -EFAULT; 1020 return -EFAULT;
1011 } 1021 }
1012 if (tz) { 1022 if (tz) {
@@ -1019,22 +1029,19 @@ SYSCALL_DEFINE2(osf_gettimeofday, struct timeval32 __user *, tv,
1019SYSCALL_DEFINE2(osf_settimeofday, struct timeval32 __user *, tv, 1029SYSCALL_DEFINE2(osf_settimeofday, struct timeval32 __user *, tv,
1020 struct timezone __user *, tz) 1030 struct timezone __user *, tz)
1021{ 1031{
1022 struct timespec64 kts64; 1032 struct timespec64 kts;
1023 struct timespec kts;
1024 struct timezone ktz; 1033 struct timezone ktz;
1025 1034
1026 if (tv) { 1035 if (tv) {
1027 if (get_tv32((struct timeval *)&kts, tv)) 1036 if (get_tv32(&kts, tv))
1028 return -EFAULT; 1037 return -EFAULT;
1029 kts.tv_nsec *= 1000;
1030 kts64 = timespec_to_timespec64(kts);
1031 } 1038 }
1032 if (tz) { 1039 if (tz) {
1033 if (copy_from_user(&ktz, tz, sizeof(*tz))) 1040 if (copy_from_user(&ktz, tz, sizeof(*tz)))
1034 return -EFAULT; 1041 return -EFAULT;
1035 } 1042 }
1036 1043
1037 return do_sys_settimeofday64(tv ? &kts64 : NULL, tz ? &ktz : NULL); 1044 return do_sys_settimeofday64(tv ? &kts : NULL, tz ? &ktz : NULL);
1038} 1045}
1039 1046
1040asmlinkage long sys_ni_posix_timers(void); 1047asmlinkage long sys_ni_posix_timers(void);
@@ -1083,22 +1090,16 @@ SYSCALL_DEFINE3(osf_setitimer, int, which, struct itimerval32 __user *, in,
1083SYSCALL_DEFINE2(osf_utimes, const char __user *, filename, 1090SYSCALL_DEFINE2(osf_utimes, const char __user *, filename,
1084 struct timeval32 __user *, tvs) 1091 struct timeval32 __user *, tvs)
1085{ 1092{
1086 struct timespec tv[2]; 1093 struct timespec64 tv[2];
1087 1094
1088 if (tvs) { 1095 if (tvs) {
1089 struct timeval ktvs[2]; 1096 if (get_tv32(&tv[0], &tvs[0]) ||
1090 if (get_tv32(&ktvs[0], &tvs[0]) || 1097 get_tv32(&tv[1], &tvs[1]))
1091 get_tv32(&ktvs[1], &tvs[1]))
1092 return -EFAULT; 1098 return -EFAULT;
1093 1099
1094 if (ktvs[0].tv_usec < 0 || ktvs[0].tv_usec >= 1000000 || 1100 if (tv[0].tv_nsec < 0 || tv[0].tv_nsec >= 1000000000 ||
1095 ktvs[1].tv_usec < 0 || ktvs[1].tv_usec >= 1000000) 1101 tv[1].tv_nsec < 0 || tv[1].tv_nsec >= 1000000000)
1096 return -EINVAL; 1102 return -EINVAL;
1097
1098 tv[0].tv_sec = ktvs[0].tv_sec;
1099 tv[0].tv_nsec = 1000 * ktvs[0].tv_usec;
1100 tv[1].tv_sec = ktvs[1].tv_sec;
1101 tv[1].tv_nsec = 1000 * ktvs[1].tv_usec;
1102 } 1103 }
1103 1104
1104 return do_utimes(AT_FDCWD, filename, tvs ? tv : NULL, 0); 1105 return do_utimes(AT_FDCWD, filename, tvs ? tv : NULL, 0);
@@ -1107,19 +1108,18 @@ SYSCALL_DEFINE2(osf_utimes, const char __user *, filename,
1107SYSCALL_DEFINE5(osf_select, int, n, fd_set __user *, inp, fd_set __user *, outp, 1108SYSCALL_DEFINE5(osf_select, int, n, fd_set __user *, inp, fd_set __user *, outp,
1108 fd_set __user *, exp, struct timeval32 __user *, tvp) 1109 fd_set __user *, exp, struct timeval32 __user *, tvp)
1109{ 1110{
1110 struct timespec end_time, *to = NULL; 1111 struct timespec64 end_time, *to = NULL;
1111 if (tvp) { 1112 if (tvp) {
1112 struct timeval tv; 1113 struct timespec64 tv;
1113 to = &end_time; 1114 to = &end_time;
1114 1115
1115 if (get_tv32(&tv, tvp)) 1116 if (get_tv32(&tv, tvp))
1116 return -EFAULT; 1117 return -EFAULT;
1117 1118
1118 if (tv.tv_sec < 0 || tv.tv_usec < 0) 1119 if (tv.tv_sec < 0 || tv.tv_nsec < 0)
1119 return -EINVAL; 1120 return -EINVAL;
1120 1121
1121 if (poll_select_set_timeout(to, tv.tv_sec, 1122 if (poll_select_set_timeout(to, tv.tv_sec, tv.tv_nsec))
1122 tv.tv_usec * NSEC_PER_USEC))
1123 return -EINVAL; 1123 return -EINVAL;
1124 1124
1125 } 1125 }
@@ -1192,9 +1192,9 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
1192 return -EFAULT; 1192 return -EFAULT;
1193 if (!ur) 1193 if (!ur)
1194 return err; 1194 return err;
1195 if (put_tv32(&ur->ru_utime, &r.ru_utime)) 1195 if (put_tv_to_tv32(&ur->ru_utime, &r.ru_utime))
1196 return -EFAULT; 1196 return -EFAULT;
1197 if (put_tv32(&ur->ru_stime, &r.ru_stime)) 1197 if (put_tv_to_tv32(&ur->ru_stime, &r.ru_stime))
1198 return -EFAULT; 1198 return -EFAULT;
1199 if (copy_to_user(&ur->ru_maxrss, &r.ru_maxrss, 1199 if (copy_to_user(&ur->ru_maxrss, &r.ru_maxrss,
1200 sizeof(struct rusage32) - offsetof(struct rusage32, ru_maxrss))) 1200 sizeof(struct rusage32) - offsetof(struct rusage32, ru_maxrss)))
@@ -1210,18 +1210,18 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
1210SYSCALL_DEFINE2(osf_usleep_thread, struct timeval32 __user *, sleep, 1210SYSCALL_DEFINE2(osf_usleep_thread, struct timeval32 __user *, sleep,
1211 struct timeval32 __user *, remain) 1211 struct timeval32 __user *, remain)
1212{ 1212{
1213 struct timeval tmp; 1213 struct timespec64 tmp;
1214 unsigned long ticks; 1214 unsigned long ticks;
1215 1215
1216 if (get_tv32(&tmp, sleep)) 1216 if (get_tv32(&tmp, sleep))
1217 goto fault; 1217 goto fault;
1218 1218
1219 ticks = timeval_to_jiffies(&tmp); 1219 ticks = timespec64_to_jiffies(&tmp);
1220 1220
1221 ticks = schedule_timeout_interruptible(ticks); 1221 ticks = schedule_timeout_interruptible(ticks);
1222 1222
1223 if (remain) { 1223 if (remain) {
1224 jiffies_to_timeval(ticks, &tmp); 1224 jiffies_to_timespec64(ticks, &tmp);
1225 if (put_tv32(remain, &tmp)) 1225 if (put_tv32(remain, &tmp))
1226 goto fault; 1226 goto fault;
1227 } 1227 }
@@ -1280,7 +1280,7 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
1280 if (copy_to_user(txc_p, &txc, offsetof(struct timex32, time)) || 1280 if (copy_to_user(txc_p, &txc, offsetof(struct timex32, time)) ||
1281 (copy_to_user(&txc_p->tick, &txc.tick, sizeof(struct timex32) - 1281 (copy_to_user(&txc_p->tick, &txc.tick, sizeof(struct timex32) -
1282 offsetof(struct timex32, tick))) || 1282 offsetof(struct timex32, tick))) ||
1283 (put_tv32(&txc_p->time, &txc.time))) 1283 (put_tv_to_tv32(&txc_p->time, &txc.time)))
1284 return -EFAULT; 1284 return -EFAULT;
1285 1285
1286 return ret; 1286 return ret;
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index 8fdc56c1c953..b9bfa806d346 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -982,25 +982,14 @@ static int r128_cce_dispatch_write_pixels(struct drm_device *dev,
982 982
983 xbuf_size = count * sizeof(*x); 983 xbuf_size = count * sizeof(*x);
984 ybuf_size = count * sizeof(*y); 984 ybuf_size = count * sizeof(*y);
985 x = kmalloc(xbuf_size, GFP_KERNEL); 985 x = memdup_user(depth->x, xbuf_size);
986 if (x == NULL) 986 if (IS_ERR(x))
987 return -ENOMEM; 987 return PTR_ERR(x);
988 y = kmalloc(ybuf_size, GFP_KERNEL); 988 y = memdup_user(depth->y, ybuf_size);
989 if (y == NULL) { 989 if (IS_ERR(y)) {
990 kfree(x);
991 return -ENOMEM;
992 }
993 if (copy_from_user(x, depth->x, xbuf_size)) {
994 kfree(x);
995 kfree(y);
996 return -EFAULT;
997 }
998 if (copy_from_user(y, depth->y, xbuf_size)) {
999 kfree(x); 990 kfree(x);
1000 kfree(y); 991 return PTR_ERR(y);
1001 return -EFAULT;
1002 } 992 }
1003
1004 buffer_size = depth->n * sizeof(u32); 993 buffer_size = depth->n * sizeof(u32);
1005 buffer = memdup_user(depth->buffer, buffer_size); 994 buffer = memdup_user(depth->buffer, buffer_size);
1006 if (IS_ERR(buffer)) { 995 if (IS_ERR(buffer)) {
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index ed3bf05e2462..381f614b2f4c 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -1284,36 +1284,30 @@ struct uvc_xu_control_mapping32 {
1284static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp, 1284static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp,
1285 const struct uvc_xu_control_mapping32 __user *up) 1285 const struct uvc_xu_control_mapping32 __user *up)
1286{ 1286{
1287 compat_caddr_t p; 1287 struct uvc_xu_control_mapping32 *p = (void *)kp;
1288 compat_caddr_t info;
1289 u32 count;
1288 1290
1289 if (!access_ok(VERIFY_READ, up, sizeof(*up)) || 1291 if (copy_from_user(p, up, sizeof(*p)))
1290 __copy_from_user(kp, up, offsetof(typeof(*up), menu_info)) ||
1291 __get_user(kp->menu_count, &up->menu_count))
1292 return -EFAULT; 1292 return -EFAULT;
1293 1293
1294 memset(kp->reserved, 0, sizeof(kp->reserved)); 1294 count = p->menu_count;
1295 1295 info = p->menu_info;
1296 if (kp->menu_count == 0) {
1297 kp->menu_info = NULL;
1298 return 0;
1299 }
1300
1301 if (__get_user(p, &up->menu_info))
1302 return -EFAULT;
1303 kp->menu_info = compat_ptr(p);
1304 1296
1297 memset(kp->reserved, 0, sizeof(kp->reserved));
1298 kp->menu_info = count ? compat_ptr(info) : NULL;
1299 kp->menu_count = count;
1305 return 0; 1300 return 0;
1306} 1301}
1307 1302
1308static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp, 1303static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp,
1309 struct uvc_xu_control_mapping32 __user *up) 1304 struct uvc_xu_control_mapping32 __user *up)
1310{ 1305{
1311 if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || 1306 if (copy_to_user(up, kp, offsetof(typeof(*up), menu_info)) ||
1312 __copy_to_user(up, kp, offsetof(typeof(*up), menu_info)) || 1307 put_user(kp->menu_count, &up->menu_count))
1313 __put_user(kp->menu_count, &up->menu_count))
1314 return -EFAULT; 1308 return -EFAULT;
1315 1309
1316 if (__clear_user(up->reserved, sizeof(up->reserved))) 1310 if (clear_user(up->reserved, sizeof(up->reserved)))
1317 return -EFAULT; 1311 return -EFAULT;
1318 1312
1319 return 0; 1313 return 0;
@@ -1330,31 +1324,26 @@ struct uvc_xu_control_query32 {
1330static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp, 1324static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp,
1331 const struct uvc_xu_control_query32 __user *up) 1325 const struct uvc_xu_control_query32 __user *up)
1332{ 1326{
1333 compat_caddr_t p; 1327 struct uvc_xu_control_query32 v;
1334 1328
1335 if (!access_ok(VERIFY_READ, up, sizeof(*up)) || 1329 if (copy_from_user(&v, up, sizeof(v)))
1336 __copy_from_user(kp, up, offsetof(typeof(*up), data)))
1337 return -EFAULT; 1330 return -EFAULT;
1338 1331
1339 if (kp->size == 0) { 1332 *kp = (struct uvc_xu_control_query){
1340 kp->data = NULL; 1333 .unit = v.unit,
1341 return 0; 1334 .selector = v.selector,
1342 } 1335 .query = v.query,
1343 1336 .size = v.size,
1344 if (__get_user(p, &up->data)) 1337 .data = v.size ? compat_ptr(v.data) : NULL
1345 return -EFAULT; 1338 };
1346 kp->data = compat_ptr(p);
1347
1348 return 0; 1339 return 0;
1349} 1340}
1350 1341
1351static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp, 1342static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp,
1352 struct uvc_xu_control_query32 __user *up) 1343 struct uvc_xu_control_query32 __user *up)
1353{ 1344{
1354 if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || 1345 if (copy_to_user(up, kp, offsetof(typeof(*up), data)))
1355 __copy_to_user(up, kp, offsetof(typeof(*up), data)))
1356 return -EFAULT; 1346 return -EFAULT;
1357
1358 return 0; 1347 return 0;
1359} 1348}
1360 1349
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index b133fd00c08c..0d62fcf016dc 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -1296,15 +1296,7 @@ static inline void d_lustre_invalidate(struct dentry *dentry, int nested)
1296 spin_lock_nested(&dentry->d_lock, 1296 spin_lock_nested(&dentry->d_lock,
1297 nested ? DENTRY_D_LOCK_NESTED : DENTRY_D_LOCK_NORMAL); 1297 nested ? DENTRY_D_LOCK_NESTED : DENTRY_D_LOCK_NORMAL);
1298 ll_d2d(dentry)->lld_invalid = 1; 1298 ll_d2d(dentry)->lld_invalid = 1;
1299 /* 1299 if (d_count(dentry) == 0)
1300 * We should be careful about dentries created by d_obtain_alias().
1301 * These dentries are not put in the dentry tree, instead they are
1302 * linked to sb->s_anon through dentry->d_hash.
1303 * shrink_dcache_for_umount() shrinks the tree and sb->s_anon list.
1304 * If we unhashed such a dentry, unmount would not be able to find
1305 * it and busy inodes would be reported.
1306 */
1307 if (d_count(dentry) == 0 && !(dentry->d_flags & DCACHE_DISCONNECTED))
1308 __d_drop(dentry); 1300 __d_drop(dentry);
1309 spin_unlock(&dentry->d_lock); 1301 spin_unlock(&dentry->d_lock);
1310} 1302}
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index a3d4610fbdbe..4c8c6fa0a79f 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -134,7 +134,7 @@ static ssize_t resource_to_user(int minor, char __user *buf, size_t count,
134 if (copied < 0) 134 if (copied < 0)
135 return (int)copied; 135 return (int)copied;
136 136
137 if (__copy_to_user(buf, image[minor].kern_buf, (unsigned long)copied)) 137 if (copy_to_user(buf, image[minor].kern_buf, (unsigned long)copied))
138 return -EFAULT; 138 return -EFAULT;
139 139
140 return copied; 140 return copied;
@@ -146,7 +146,7 @@ static ssize_t resource_from_user(unsigned int minor, const char __user *buf,
146 if (count > image[minor].size_buf) 146 if (count > image[minor].size_buf)
147 count = image[minor].size_buf; 147 count = image[minor].size_buf;
148 148
149 if (__copy_from_user(image[minor].kern_buf, buf, (unsigned long)count)) 149 if (copy_from_user(image[minor].kern_buf, buf, (unsigned long)count))
150 return -EFAULT; 150 return -EFAULT;
151 151
152 return vme_master_write(image[minor].resource, image[minor].kern_buf, 152 return vme_master_write(image[minor].resource, image[minor].kern_buf,
@@ -159,7 +159,7 @@ static ssize_t buffer_to_user(unsigned int minor, char __user *buf,
159 void *image_ptr; 159 void *image_ptr;
160 160
161 image_ptr = image[minor].kern_buf + *ppos; 161 image_ptr = image[minor].kern_buf + *ppos;
162 if (__copy_to_user(buf, image_ptr, (unsigned long)count)) 162 if (copy_to_user(buf, image_ptr, (unsigned long)count))
163 return -EFAULT; 163 return -EFAULT;
164 164
165 return count; 165 return count;
@@ -171,7 +171,7 @@ static ssize_t buffer_from_user(unsigned int minor, const char __user *buf,
171 void *image_ptr; 171 void *image_ptr;
172 172
173 image_ptr = image[minor].kern_buf + *ppos; 173 image_ptr = image[minor].kern_buf + *ppos;
174 if (__copy_from_user(image_ptr, buf, (unsigned long)count)) 174 if (copy_from_user(image_ptr, buf, (unsigned long)count))
175 return -EFAULT; 175 return -EFAULT;
176 176
177 return count; 177 return count;
diff --git a/fs/buffer.c b/fs/buffer.c
index 8b26295a56fe..9a73924db22f 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -53,13 +53,6 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
53 53
54#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) 54#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
55 55
56void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
57{
58 bh->b_end_io = handler;
59 bh->b_private = private;
60}
61EXPORT_SYMBOL(init_buffer);
62
63inline void touch_buffer(struct buffer_head *bh) 56inline void touch_buffer(struct buffer_head *bh)
64{ 57{
65 trace_block_touch_buffer(bh); 58 trace_block_touch_buffer(bh);
@@ -922,7 +915,8 @@ init_page_buffers(struct page *page, struct block_device *bdev,
922 915
923 do { 916 do {
924 if (!buffer_mapped(bh)) { 917 if (!buffer_mapped(bh)) {
925 init_buffer(bh, NULL, NULL); 918 bh->b_end_io = NULL;
919 bh->b_private = NULL;
926 bh->b_bdev = bdev; 920 bh->b_bdev = bdev;
927 bh->b_blocknr = block; 921 bh->b_blocknr = block;
928 if (uptodate) 922 if (uptodate)
diff --git a/fs/dcache.c b/fs/dcache.c
index 379dce86f001..c6d996ee2d61 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -32,7 +32,6 @@
32#include <linux/swap.h> 32#include <linux/swap.h>
33#include <linux/bootmem.h> 33#include <linux/bootmem.h>
34#include <linux/fs_struct.h> 34#include <linux/fs_struct.h>
35#include <linux/hardirq.h>
36#include <linux/bit_spinlock.h> 35#include <linux/bit_spinlock.h>
37#include <linux/rculist_bl.h> 36#include <linux/rculist_bl.h>
38#include <linux/prefetch.h> 37#include <linux/prefetch.h>
@@ -49,8 +48,8 @@
49 * - i_dentry, d_u.d_alias, d_inode of aliases 48 * - i_dentry, d_u.d_alias, d_inode of aliases
50 * dcache_hash_bucket lock protects: 49 * dcache_hash_bucket lock protects:
51 * - the dcache hash table 50 * - the dcache hash table
52 * s_anon bl list spinlock protects: 51 * s_roots bl list spinlock protects:
53 * - the s_anon list (see __d_drop) 52 * - the s_roots list (see __d_drop)
54 * dentry->d_sb->s_dentry_lru_lock protects: 53 * dentry->d_sb->s_dentry_lru_lock protects:
55 * - the dcache lru lists and counters 54 * - the dcache lru lists and counters
56 * d_lock protects: 55 * d_lock protects:
@@ -68,7 +67,7 @@
68 * dentry->d_lock 67 * dentry->d_lock
69 * dentry->d_sb->s_dentry_lru_lock 68 * dentry->d_sb->s_dentry_lru_lock
70 * dcache_hash_bucket lock 69 * dcache_hash_bucket lock
71 * s_anon lock 70 * s_roots lock
72 * 71 *
73 * If there is an ancestor relationship: 72 * If there is an ancestor relationship:
74 * dentry->d_parent->...->d_parent->d_lock 73 * dentry->d_parent->...->d_parent->d_lock
@@ -104,14 +103,13 @@ EXPORT_SYMBOL(slash_name);
104 * information, yet avoid using a prime hash-size or similar. 103 * information, yet avoid using a prime hash-size or similar.
105 */ 104 */
106 105
107static unsigned int d_hash_mask __read_mostly;
108static unsigned int d_hash_shift __read_mostly; 106static unsigned int d_hash_shift __read_mostly;
109 107
110static struct hlist_bl_head *dentry_hashtable __read_mostly; 108static struct hlist_bl_head *dentry_hashtable __read_mostly;
111 109
112static inline struct hlist_bl_head *d_hash(unsigned int hash) 110static inline struct hlist_bl_head *d_hash(unsigned int hash)
113{ 111{
114 return dentry_hashtable + (hash >> (32 - d_hash_shift)); 112 return dentry_hashtable + (hash >> d_hash_shift);
115} 113}
116 114
117#define IN_LOOKUP_SHIFT 10 115#define IN_LOOKUP_SHIFT 10
@@ -477,10 +475,10 @@ void __d_drop(struct dentry *dentry)
477 /* 475 /*
478 * Hashed dentries are normally on the dentry hashtable, 476 * Hashed dentries are normally on the dentry hashtable,
479 * with the exception of those newly allocated by 477 * with the exception of those newly allocated by
480 * d_obtain_alias, which are always IS_ROOT: 478 * d_obtain_root, which are always IS_ROOT:
481 */ 479 */
482 if (unlikely(IS_ROOT(dentry))) 480 if (unlikely(IS_ROOT(dentry)))
483 b = &dentry->d_sb->s_anon; 481 b = &dentry->d_sb->s_roots;
484 else 482 else
485 b = d_hash(dentry->d_name.hash); 483 b = d_hash(dentry->d_name.hash);
486 484
@@ -1500,8 +1498,8 @@ void shrink_dcache_for_umount(struct super_block *sb)
1500 sb->s_root = NULL; 1498 sb->s_root = NULL;
1501 do_one_tree(dentry); 1499 do_one_tree(dentry);
1502 1500
1503 while (!hlist_bl_empty(&sb->s_anon)) { 1501 while (!hlist_bl_empty(&sb->s_roots)) {
1504 dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash)); 1502 dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_roots), struct dentry, d_hash));
1505 do_one_tree(dentry); 1503 do_one_tree(dentry);
1506 } 1504 }
1507} 1505}
@@ -1964,9 +1962,11 @@ static struct dentry *__d_obtain_alias(struct inode *inode, int disconnected)
1964 spin_lock(&tmp->d_lock); 1962 spin_lock(&tmp->d_lock);
1965 __d_set_inode_and_type(tmp, inode, add_flags); 1963 __d_set_inode_and_type(tmp, inode, add_flags);
1966 hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry); 1964 hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry);
1967 hlist_bl_lock(&tmp->d_sb->s_anon); 1965 if (!disconnected) {
1968 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon); 1966 hlist_bl_lock(&tmp->d_sb->s_roots);
1969 hlist_bl_unlock(&tmp->d_sb->s_anon); 1967 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_roots);
1968 hlist_bl_unlock(&tmp->d_sb->s_roots);
1969 }
1970 spin_unlock(&tmp->d_lock); 1970 spin_unlock(&tmp->d_lock);
1971 spin_unlock(&inode->i_lock); 1971 spin_unlock(&inode->i_lock);
1972 1972
@@ -3585,9 +3585,10 @@ static void __init dcache_init_early(void)
3585 13, 3585 13,
3586 HASH_EARLY | HASH_ZERO, 3586 HASH_EARLY | HASH_ZERO,
3587 &d_hash_shift, 3587 &d_hash_shift,
3588 &d_hash_mask, 3588 NULL,
3589 0, 3589 0,
3590 0); 3590 0);
3591 d_hash_shift = 32 - d_hash_shift;
3591} 3592}
3592 3593
3593static void __init dcache_init(void) 3594static void __init dcache_init(void)
@@ -3611,9 +3612,10 @@ static void __init dcache_init(void)
3611 13, 3612 13,
3612 HASH_ZERO, 3613 HASH_ZERO,
3613 &d_hash_shift, 3614 &d_hash_shift,
3614 &d_hash_mask, 3615 NULL,
3615 0, 3616 0,
3616 0); 3617 0);
3618 d_hash_shift = 32 - d_hash_shift;
3617} 3619}
3618 3620
3619/* SLAB cache for __getname() consumers */ 3621/* SLAB cache for __getname() consumers */
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 6318a9b57e53..04fd824142a1 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -80,24 +80,11 @@ static void eventfd_free(struct kref *kref)
80} 80}
81 81
82/** 82/**
83 * eventfd_ctx_get - Acquires a reference to the internal eventfd context.
84 * @ctx: [in] Pointer to the eventfd context.
85 *
86 * Returns: In case of success, returns a pointer to the eventfd context.
87 */
88struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx)
89{
90 kref_get(&ctx->kref);
91 return ctx;
92}
93EXPORT_SYMBOL_GPL(eventfd_ctx_get);
94
95/**
96 * eventfd_ctx_put - Releases a reference to the internal eventfd context. 83 * eventfd_ctx_put - Releases a reference to the internal eventfd context.
97 * @ctx: [in] Pointer to eventfd context. 84 * @ctx: [in] Pointer to eventfd context.
98 * 85 *
99 * The eventfd context reference must have been previously acquired either 86 * The eventfd context reference must have been previously acquired either
100 * with eventfd_ctx_get() or eventfd_ctx_fdget(). 87 * with eventfd_ctx_fdget() or eventfd_ctx_fileget().
101 */ 88 */
102void eventfd_ctx_put(struct eventfd_ctx *ctx) 89void eventfd_ctx_put(struct eventfd_ctx *ctx)
103{ 90{
@@ -207,36 +194,27 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *w
207} 194}
208EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue); 195EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue);
209 196
210/** 197static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
211 * eventfd_ctx_read - Reads the eventfd counter or wait if it is zero. 198 loff_t *ppos)
212 * @ctx: [in] Pointer to eventfd context.
213 * @no_wait: [in] Different from zero if the operation should not block.
214 * @cnt: [out] Pointer to the 64-bit counter value.
215 *
216 * Returns %0 if successful, or the following error codes:
217 *
218 * - -EAGAIN : The operation would have blocked but @no_wait was non-zero.
219 * - -ERESTARTSYS : A signal interrupted the wait operation.
220 *
221 * If @no_wait is zero, the function might sleep until the eventfd internal
222 * counter becomes greater than zero.
223 */
224ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt)
225{ 199{
200 struct eventfd_ctx *ctx = file->private_data;
226 ssize_t res; 201 ssize_t res;
202 __u64 ucnt = 0;
227 DECLARE_WAITQUEUE(wait, current); 203 DECLARE_WAITQUEUE(wait, current);
228 204
205 if (count < sizeof(ucnt))
206 return -EINVAL;
207
229 spin_lock_irq(&ctx->wqh.lock); 208 spin_lock_irq(&ctx->wqh.lock);
230 *cnt = 0;
231 res = -EAGAIN; 209 res = -EAGAIN;
232 if (ctx->count > 0) 210 if (ctx->count > 0)
233 res = 0; 211 res = sizeof(ucnt);
234 else if (!no_wait) { 212 else if (!(file->f_flags & O_NONBLOCK)) {
235 __add_wait_queue(&ctx->wqh, &wait); 213 __add_wait_queue(&ctx->wqh, &wait);
236 for (;;) { 214 for (;;) {
237 set_current_state(TASK_INTERRUPTIBLE); 215 set_current_state(TASK_INTERRUPTIBLE);
238 if (ctx->count > 0) { 216 if (ctx->count > 0) {
239 res = 0; 217 res = sizeof(ucnt);
240 break; 218 break;
241 } 219 }
242 if (signal_pending(current)) { 220 if (signal_pending(current)) {
@@ -250,31 +228,17 @@ ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt)
250 __remove_wait_queue(&ctx->wqh, &wait); 228 __remove_wait_queue(&ctx->wqh, &wait);
251 __set_current_state(TASK_RUNNING); 229 __set_current_state(TASK_RUNNING);
252 } 230 }
253 if (likely(res == 0)) { 231 if (likely(res > 0)) {
254 eventfd_ctx_do_read(ctx, cnt); 232 eventfd_ctx_do_read(ctx, &ucnt);
255 if (waitqueue_active(&ctx->wqh)) 233 if (waitqueue_active(&ctx->wqh))
256 wake_up_locked_poll(&ctx->wqh, POLLOUT); 234 wake_up_locked_poll(&ctx->wqh, POLLOUT);
257 } 235 }
258 spin_unlock_irq(&ctx->wqh.lock); 236 spin_unlock_irq(&ctx->wqh.lock);
259 237
260 return res; 238 if (res > 0 && put_user(ucnt, (__u64 __user *)buf))
261} 239 return -EFAULT;
262EXPORT_SYMBOL_GPL(eventfd_ctx_read);
263
264static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
265 loff_t *ppos)
266{
267 struct eventfd_ctx *ctx = file->private_data;
268 ssize_t res;
269 __u64 cnt;
270
271 if (count < sizeof(cnt))
272 return -EINVAL;
273 res = eventfd_ctx_read(ctx, file->f_flags & O_NONBLOCK, &cnt);
274 if (res < 0)
275 return res;
276 240
277 return put_user(cnt, (__u64 __user *) buf) ? -EFAULT : sizeof(cnt); 241 return res;
278} 242}
279 243
280static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count, 244static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
@@ -405,79 +369,44 @@ EXPORT_SYMBOL_GPL(eventfd_ctx_fdget);
405 */ 369 */
406struct eventfd_ctx *eventfd_ctx_fileget(struct file *file) 370struct eventfd_ctx *eventfd_ctx_fileget(struct file *file)
407{ 371{
372 struct eventfd_ctx *ctx;
373
408 if (file->f_op != &eventfd_fops) 374 if (file->f_op != &eventfd_fops)
409 return ERR_PTR(-EINVAL); 375 return ERR_PTR(-EINVAL);
410 376
411 return eventfd_ctx_get(file->private_data); 377 ctx = file->private_data;
378 kref_get(&ctx->kref);
379 return ctx;
412} 380}
413EXPORT_SYMBOL_GPL(eventfd_ctx_fileget); 381EXPORT_SYMBOL_GPL(eventfd_ctx_fileget);
414 382
415/** 383SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
416 * eventfd_file_create - Creates an eventfd file pointer.
417 * @count: Initial eventfd counter value.
418 * @flags: Flags for the eventfd file.
419 *
420 * This function creates an eventfd file pointer, w/out installing it into
421 * the fd table. This is useful when the eventfd file is used during the
422 * initialization of data structures that require extra setup after the eventfd
423 * creation. So the eventfd creation is split into the file pointer creation
424 * phase, and the file descriptor installation phase.
425 * In this way races with userspace closing the newly installed file descriptor
426 * can be avoided.
427 * Returns an eventfd file pointer, or a proper error pointer.
428 */
429struct file *eventfd_file_create(unsigned int count, int flags)
430{ 384{
431 struct file *file;
432 struct eventfd_ctx *ctx; 385 struct eventfd_ctx *ctx;
386 int fd;
433 387
434 /* Check the EFD_* constants for consistency. */ 388 /* Check the EFD_* constants for consistency. */
435 BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC); 389 BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC);
436 BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK); 390 BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK);
437 391
438 if (flags & ~EFD_FLAGS_SET) 392 if (flags & ~EFD_FLAGS_SET)
439 return ERR_PTR(-EINVAL); 393 return -EINVAL;
440 394
441 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 395 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
442 if (!ctx) 396 if (!ctx)
443 return ERR_PTR(-ENOMEM); 397 return -ENOMEM;
444 398
445 kref_init(&ctx->kref); 399 kref_init(&ctx->kref);
446 init_waitqueue_head(&ctx->wqh); 400 init_waitqueue_head(&ctx->wqh);
447 ctx->count = count; 401 ctx->count = count;
448 ctx->flags = flags; 402 ctx->flags = flags;
449 403
450 file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, 404 fd = anon_inode_getfd("[eventfd]", &eventfd_fops, ctx,
451 O_RDWR | (flags & EFD_SHARED_FCNTL_FLAGS)); 405 O_RDWR | (flags & EFD_SHARED_FCNTL_FLAGS));
452 if (IS_ERR(file)) 406 if (fd < 0)
453 eventfd_free_ctx(ctx); 407 eventfd_free_ctx(ctx);
454 408
455 return file;
456}
457
458SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
459{
460 int fd, error;
461 struct file *file;
462
463 error = get_unused_fd_flags(flags & EFD_SHARED_FCNTL_FLAGS);
464 if (error < 0)
465 return error;
466 fd = error;
467
468 file = eventfd_file_create(count, flags);
469 if (IS_ERR(file)) {
470 error = PTR_ERR(file);
471 goto err_put_unused_fd;
472 }
473 fd_install(fd, file);
474
475 return fd; 409 return fd;
476
477err_put_unused_fd:
478 put_unused_fd(fd);
479
480 return error;
481} 410}
482 411
483SYSCALL_DEFINE1(eventfd, unsigned int, count) 412SYSCALL_DEFINE1(eventfd, unsigned int, count)
diff --git a/fs/file.c b/fs/file.c
index fc0eeb812e2c..42f0db4bd0fb 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -11,18 +11,13 @@
11#include <linux/export.h> 11#include <linux/export.h>
12#include <linux/fs.h> 12#include <linux/fs.h>
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/mmzone.h>
15#include <linux/time.h>
16#include <linux/sched/signal.h> 14#include <linux/sched/signal.h>
17#include <linux/slab.h> 15#include <linux/slab.h>
18#include <linux/vmalloc.h>
19#include <linux/file.h> 16#include <linux/file.h>
20#include <linux/fdtable.h> 17#include <linux/fdtable.h>
21#include <linux/bitops.h> 18#include <linux/bitops.h>
22#include <linux/interrupt.h>
23#include <linux/spinlock.h> 19#include <linux/spinlock.h>
24#include <linux/rcupdate.h> 20#include <linux/rcupdate.h>
25#include <linux/workqueue.h>
26 21
27unsigned int sysctl_nr_open __read_mostly = 1024*1024; 22unsigned int sysctl_nr_open __read_mostly = 1024*1024;
28unsigned int sysctl_nr_open_min = BITS_PER_LONG; 23unsigned int sysctl_nr_open_min = BITS_PER_LONG;
diff --git a/fs/file_table.c b/fs/file_table.c
index 2dc9f38bd195..7ec0b3e5f05d 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -23,7 +23,6 @@
23#include <linux/sysctl.h> 23#include <linux/sysctl.h>
24#include <linux/percpu_counter.h> 24#include <linux/percpu_counter.h>
25#include <linux/percpu.h> 25#include <linux/percpu.h>
26#include <linux/hardirq.h>
27#include <linux/task_work.h> 26#include <linux/task_work.h>
28#include <linux/ima.h> 27#include <linux/ima.h>
29#include <linux/swap.h> 28#include <linux/swap.h>
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index d8c274d39ddb..eab04eca95a3 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -362,7 +362,6 @@ error_io:
362 ret = -EIO; 362 ret = -EIO;
363error: 363error:
364 mutex_unlock(&f->sem); 364 mutex_unlock(&f->sem);
365 jffs2_do_clear_inode(c, f);
366 iget_failed(inode); 365 iget_failed(inode);
367 return ERR_PTR(ret); 366 return ERR_PTR(ret);
368} 367}
diff --git a/fs/namei.c b/fs/namei.c
index 7c221fb0836b..921ae32dbc80 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -391,50 +391,6 @@ static inline int do_inode_permission(struct inode *inode, int mask)
391} 391}
392 392
393/** 393/**
394 * __inode_permission - Check for access rights to a given inode
395 * @inode: Inode to check permission on
396 * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
397 *
398 * Check for read/write/execute permissions on an inode.
399 *
400 * When checking for MAY_APPEND, MAY_WRITE must also be set in @mask.
401 *
402 * This does not check for a read-only file system. You probably want
403 * inode_permission().
404 */
405int __inode_permission(struct inode *inode, int mask)
406{
407 int retval;
408
409 if (unlikely(mask & MAY_WRITE)) {
410 /*
411 * Nobody gets write access to an immutable file.
412 */
413 if (IS_IMMUTABLE(inode))
414 return -EPERM;
415
416 /*
417 * Updating mtime will likely cause i_uid and i_gid to be
418 * written back improperly if their true value is unknown
419 * to the vfs.
420 */
421 if (HAS_UNMAPPED_ID(inode))
422 return -EACCES;
423 }
424
425 retval = do_inode_permission(inode, mask);
426 if (retval)
427 return retval;
428
429 retval = devcgroup_inode_permission(inode, mask);
430 if (retval)
431 return retval;
432
433 return security_inode_permission(inode, mask);
434}
435EXPORT_SYMBOL(__inode_permission);
436
437/**
438 * sb_permission - Check superblock-level permissions 394 * sb_permission - Check superblock-level permissions
439 * @sb: Superblock of inode to check permission on 395 * @sb: Superblock of inode to check permission on
440 * @inode: Inode to check permission on 396 * @inode: Inode to check permission on
@@ -472,7 +428,32 @@ int inode_permission(struct inode *inode, int mask)
472 retval = sb_permission(inode->i_sb, inode, mask); 428 retval = sb_permission(inode->i_sb, inode, mask);
473 if (retval) 429 if (retval)
474 return retval; 430 return retval;
475 return __inode_permission(inode, mask); 431
432 if (unlikely(mask & MAY_WRITE)) {
433 /*
434 * Nobody gets write access to an immutable file.
435 */
436 if (IS_IMMUTABLE(inode))
437 return -EPERM;
438
439 /*
440 * Updating mtime will likely cause i_uid and i_gid to be
441 * written back improperly if their true value is unknown
442 * to the vfs.
443 */
444 if (HAS_UNMAPPED_ID(inode))
445 return -EACCES;
446 }
447
448 retval = do_inode_permission(inode, mask);
449 if (retval)
450 return retval;
451
452 retval = devcgroup_inode_permission(inode, mask);
453 if (retval)
454 return retval;
455
456 return security_inode_permission(inode, mask);
476} 457}
477EXPORT_SYMBOL(inode_permission); 458EXPORT_SYMBOL(inode_permission);
478 459
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 626d1382002e..6b3b372b59b9 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -8,7 +8,6 @@
8#include <linux/file.h> 8#include <linux/file.h>
9#include <linux/falloc.h> 9#include <linux/falloc.h>
10#include <linux/nfs_fs.h> 10#include <linux/nfs_fs.h>
11#include <uapi/linux/btrfs.h> /* BTRFS_IOC_CLONE/BTRFS_IOC_CLONE_RANGE */
12#include "delegation.h" 11#include "delegation.h"
13#include "internal.h" 12#include "internal.h"
14#include "iostat.h" 13#include "iostat.h"
diff --git a/fs/super.c b/fs/super.c
index 06bd25d90ba5..672538ca9831 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -225,7 +225,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
225 if (s->s_user_ns != &init_user_ns) 225 if (s->s_user_ns != &init_user_ns)
226 s->s_iflags |= SB_I_NODEV; 226 s->s_iflags |= SB_I_NODEV;
227 INIT_HLIST_NODE(&s->s_instances); 227 INIT_HLIST_NODE(&s->s_instances);
228 INIT_HLIST_BL_HEAD(&s->s_anon); 228 INIT_HLIST_BL_HEAD(&s->s_roots);
229 mutex_init(&s->s_sync_lock); 229 mutex_init(&s->s_sync_lock);
230 INIT_LIST_HEAD(&s->s_inodes); 230 INIT_LIST_HEAD(&s->s_inodes);
231 spin_lock_init(&s->s_inode_list_lock); 231 spin_lock_init(&s->s_inode_list_lock);
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index 1030651f8309..cf2588d81148 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -16,6 +16,7 @@
16#define _LINUX_BITFIELD_H 16#define _LINUX_BITFIELD_H
17 17
18#include <linux/build_bug.h> 18#include <linux/build_bug.h>
19#include <asm/byteorder.h>
19 20
20/* 21/*
21 * Bitfield access macros 22 * Bitfield access macros
@@ -103,4 +104,49 @@
103 (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \ 104 (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
104 }) 105 })
105 106
107extern void __compiletime_warning("value doesn't fit into mask")
108__field_overflow(void);
109extern void __compiletime_error("bad bitfield mask")
110__bad_mask(void);
111static __always_inline u64 field_multiplier(u64 field)
112{
113 if ((field | (field - 1)) & ((field | (field - 1)) + 1))
114 __bad_mask();
115 return field & -field;
116}
117static __always_inline u64 field_mask(u64 field)
118{
119 return field / field_multiplier(field);
120}
121#define ____MAKE_OP(type,base,to,from) \
122static __always_inline __##type type##_encode_bits(base v, base field) \
123{ \
124 if (__builtin_constant_p(v) && (v & ~field_multiplier(field))) \
125 __field_overflow(); \
126 return to((v & field_mask(field)) * field_multiplier(field)); \
127} \
128static __always_inline __##type type##_replace_bits(__##type old, \
129 base val, base field) \
130{ \
131 return (old & ~to(field)) | type##_encode_bits(val, field); \
132} \
133static __always_inline void type##p_replace_bits(__##type *p, \
134 base val, base field) \
135{ \
136 *p = (*p & ~to(field)) | type##_encode_bits(val, field); \
137} \
138static __always_inline base type##_get_bits(__##type v, base field) \
139{ \
140 return (from(v) & field)/field_multiplier(field); \
141}
142#define __MAKE_OP(size) \
143 ____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \
144 ____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \
145 ____MAKE_OP(u##size,u##size,,)
146__MAKE_OP(16)
147__MAKE_OP(32)
148__MAKE_OP(64)
149#undef __MAKE_OP
150#undef ____MAKE_OP
151
106#endif 152#endif
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 8b1bf8d3d4a2..58a82f58e44e 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -151,7 +151,6 @@ void buffer_check_dirty_writeback(struct page *page,
151 151
152void mark_buffer_dirty(struct buffer_head *bh); 152void mark_buffer_dirty(struct buffer_head *bh);
153void mark_buffer_write_io_error(struct buffer_head *bh); 153void mark_buffer_write_io_error(struct buffer_head *bh);
154void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
155void touch_buffer(struct buffer_head *bh); 154void touch_buffer(struct buffer_head *bh);
156void set_bh_page(struct buffer_head *bh, 155void set_bh_page(struct buffer_head *bh,
157 struct page *page, unsigned long offset); 156 struct page *page, unsigned long offset);
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index 60b2985e8a18..7094718b653b 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -26,18 +26,16 @@
26#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) 26#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
27#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE) 27#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
28 28
29struct eventfd_ctx;
29struct file; 30struct file;
30 31
31#ifdef CONFIG_EVENTFD 32#ifdef CONFIG_EVENTFD
32 33
33struct file *eventfd_file_create(unsigned int count, int flags);
34struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx);
35void eventfd_ctx_put(struct eventfd_ctx *ctx); 34void eventfd_ctx_put(struct eventfd_ctx *ctx);
36struct file *eventfd_fget(int fd); 35struct file *eventfd_fget(int fd);
37struct eventfd_ctx *eventfd_ctx_fdget(int fd); 36struct eventfd_ctx *eventfd_ctx_fdget(int fd);
38struct eventfd_ctx *eventfd_ctx_fileget(struct file *file); 37struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
39__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n); 38__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
40ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt);
41int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, 39int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
42 __u64 *cnt); 40 __u64 *cnt);
43 41
@@ -47,10 +45,6 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *w
47 * Ugly ugly ugly error layer to support modules that uses eventfd but 45 * Ugly ugly ugly error layer to support modules that uses eventfd but
48 * pretend to work in !CONFIG_EVENTFD configurations. Namely, AIO. 46 * pretend to work in !CONFIG_EVENTFD configurations. Namely, AIO.
49 */ 47 */
50static inline struct file *eventfd_file_create(unsigned int count, int flags)
51{
52 return ERR_PTR(-ENOSYS);
53}
54 48
55static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd) 49static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
56{ 50{
@@ -67,12 +61,6 @@ static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
67 61
68} 62}
69 63
70static inline ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait,
71 __u64 *cnt)
72{
73 return -ENOSYS;
74}
75
76static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, 64static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx,
77 wait_queue_entry_t *wait, __u64 *cnt) 65 wait_queue_entry_t *wait, __u64 *cnt)
78{ 66{
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 9798a133e718..8f6654c21711 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1359,7 +1359,7 @@ struct super_block {
1359 1359
1360 const struct fscrypt_operations *s_cop; 1360 const struct fscrypt_operations *s_cop;
1361 1361
1362 struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */ 1362 struct hlist_bl_head s_roots; /* alternate root dentries for NFS */
1363 struct list_head s_mounts; /* list of mounts; _not_ for fs use */ 1363 struct list_head s_mounts; /* list of mounts; _not_ for fs use */
1364 struct block_device *s_bdev; 1364 struct block_device *s_bdev;
1365 struct backing_dev_info *s_bdi; 1365 struct backing_dev_info *s_bdi;
@@ -2688,7 +2688,6 @@ extern sector_t bmap(struct inode *, sector_t);
2688#endif 2688#endif
2689extern int notify_change(struct dentry *, struct iattr *, struct inode **); 2689extern int notify_change(struct dentry *, struct iattr *, struct inode **);
2690extern int inode_permission(struct inode *, int); 2690extern int inode_permission(struct inode *, int);
2691extern int __inode_permission(struct inode *, int);
2692extern int generic_permission(struct inode *, int); 2691extern int generic_permission(struct inode *, int);
2693extern int __check_sticky(struct inode *dir, struct inode *inode); 2692extern int __check_sticky(struct inode *dir, struct inode *inode);
2694 2693
@@ -3228,6 +3227,8 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags)
3228 ki->ki_flags |= IOCB_DSYNC; 3227 ki->ki_flags |= IOCB_DSYNC;
3229 if (flags & RWF_SYNC) 3228 if (flags & RWF_SYNC)
3230 ki->ki_flags |= (IOCB_DSYNC | IOCB_SYNC); 3229 ki->ki_flags |= (IOCB_DSYNC | IOCB_SYNC);
3230 if (flags & RWF_APPEND)
3231 ki->ki_flags |= IOCB_APPEND;
3231 return 0; 3232 return 0;
3232} 3233}
3233 3234
diff --git a/include/linux/string.h b/include/linux/string.h
index 96115bf561b4..dd39a690c841 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -11,6 +11,7 @@
11 11
12extern char *strndup_user(const char __user *, long); 12extern char *strndup_user(const char __user *, long);
13extern void *memdup_user(const void __user *, size_t); 13extern void *memdup_user(const void __user *, size_t);
14extern void *vmemdup_user(const void __user *, size_t);
14extern void *memdup_user_nul(const void __user *, size_t); 15extern void *memdup_user_nul(const void __user *, size_t);
15 16
16/* 17/*
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 4199f8acbce5..d2a8313fabd7 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -377,7 +377,11 @@ typedef int __bitwise __kernel_rwf_t;
377/* per-IO, return -EAGAIN if operation would block */ 377/* per-IO, return -EAGAIN if operation would block */
378#define RWF_NOWAIT ((__force __kernel_rwf_t)0x00000008) 378#define RWF_NOWAIT ((__force __kernel_rwf_t)0x00000008)
379 379
380/* per-IO O_APPEND */
381#define RWF_APPEND ((__force __kernel_rwf_t)0x00000010)
382
380/* mask of flags supported by the kernel */ 383/* mask of flags supported by the kernel */
381#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT) 384#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT |\
385 RWF_APPEND)
382 386
383#endif /* _UAPI_LINUX_FS_H */ 387#endif /* _UAPI_LINUX_FS_H */
diff --git a/lib/usercopy.c b/lib/usercopy.c
index 15e2e6fb060e..3744b2a8e591 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -20,7 +20,7 @@ EXPORT_SYMBOL(_copy_from_user);
20#endif 20#endif
21 21
22#ifndef INLINE_COPY_TO_USER 22#ifndef INLINE_COPY_TO_USER
23unsigned long _copy_to_user(void *to, const void __user *from, unsigned long n) 23unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
24{ 24{
25 might_fault(); 25 might_fault();
26 if (likely(access_ok(VERIFY_WRITE, to, n))) { 26 if (likely(access_ok(VERIFY_WRITE, to, n))) {
diff --git a/mm/util.c b/mm/util.c
index 34e57fae959d..c1250501364f 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -150,18 +150,14 @@ EXPORT_SYMBOL(kmemdup_nul);
150 * @src: source address in user space 150 * @src: source address in user space
151 * @len: number of bytes to copy 151 * @len: number of bytes to copy
152 * 152 *
153 * Returns an ERR_PTR() on failure. 153 * Returns an ERR_PTR() on failure. Result is physically
154 * contiguous, to be freed by kfree().
154 */ 155 */
155void *memdup_user(const void __user *src, size_t len) 156void *memdup_user(const void __user *src, size_t len)
156{ 157{
157 void *p; 158 void *p;
158 159
159 /* 160 p = kmalloc_track_caller(len, GFP_USER);
160 * Always use GFP_KERNEL, since copy_from_user() can sleep and
161 * cause pagefault, which makes it pointless to use GFP_NOFS
162 * or GFP_ATOMIC.
163 */
164 p = kmalloc_track_caller(len, GFP_KERNEL);
165 if (!p) 161 if (!p)
166 return ERR_PTR(-ENOMEM); 162 return ERR_PTR(-ENOMEM);
167 163
@@ -174,6 +170,32 @@ void *memdup_user(const void __user *src, size_t len)
174} 170}
175EXPORT_SYMBOL(memdup_user); 171EXPORT_SYMBOL(memdup_user);
176 172
173/**
174 * vmemdup_user - duplicate memory region from user space
175 *
176 * @src: source address in user space
177 * @len: number of bytes to copy
178 *
179 * Returns an ERR_PTR() on failure. Result may be not
180 * physically contiguous. Use kvfree() to free.
181 */
182void *vmemdup_user(const void __user *src, size_t len)
183{
184 void *p;
185
186 p = kvmalloc(len, GFP_USER);
187 if (!p)
188 return ERR_PTR(-ENOMEM);
189
190 if (copy_from_user(p, src, len)) {
191 kvfree(p);
192 return ERR_PTR(-EFAULT);
193 }
194
195 return p;
196}
197EXPORT_SYMBOL(vmemdup_user);
198
177/* 199/*
178 * strndup_user - duplicate an existing string from user space 200 * strndup_user - duplicate an existing string from user space
179 * @s: The string to duplicate 201 * @s: The string to duplicate
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 37382317fba4..737e551fbf67 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -968,13 +968,6 @@ int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw)
968 * This is used for tunneling the sctp_bindx() request through sctp_setsockopt() 968 * This is used for tunneling the sctp_bindx() request through sctp_setsockopt()
969 * from userspace. 969 * from userspace.
970 * 970 *
971 * We don't use copy_from_user() for optimization: we first do the
972 * sanity checks (buffer size -fast- and access check-healthy
973 * pointer); if all of those succeed, then we can alloc the memory
974 * (expensive operation) needed to copy the data to kernel. Then we do
975 * the copying without checking the user space area
976 * (__copy_from_user()).
977 *
978 * On exit there is no need to do sockfd_put(), sys_setsockopt() does 971 * On exit there is no need to do sockfd_put(), sys_setsockopt() does
979 * it. 972 * it.
980 * 973 *
@@ -1004,25 +997,15 @@ static int sctp_setsockopt_bindx(struct sock *sk,
1004 if (unlikely(addrs_size <= 0)) 997 if (unlikely(addrs_size <= 0))
1005 return -EINVAL; 998 return -EINVAL;
1006 999
1007 /* Check the user passed a healthy pointer. */ 1000 kaddrs = vmemdup_user(addrs, addrs_size);
1008 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) 1001 if (unlikely(IS_ERR(kaddrs)))
1009 return -EFAULT; 1002 return PTR_ERR(kaddrs);
1010
1011 /* Alloc space for the address array in kernel memory. */
1012 kaddrs = kmalloc(addrs_size, GFP_USER | __GFP_NOWARN);
1013 if (unlikely(!kaddrs))
1014 return -ENOMEM;
1015
1016 if (__copy_from_user(kaddrs, addrs, addrs_size)) {
1017 kfree(kaddrs);
1018 return -EFAULT;
1019 }
1020 1003
1021 /* Walk through the addrs buffer and count the number of addresses. */ 1004 /* Walk through the addrs buffer and count the number of addresses. */
1022 addr_buf = kaddrs; 1005 addr_buf = kaddrs;
1023 while (walk_size < addrs_size) { 1006 while (walk_size < addrs_size) {
1024 if (walk_size + sizeof(sa_family_t) > addrs_size) { 1007 if (walk_size + sizeof(sa_family_t) > addrs_size) {
1025 kfree(kaddrs); 1008 kvfree(kaddrs);
1026 return -EINVAL; 1009 return -EINVAL;
1027 } 1010 }
1028 1011
@@ -1033,7 +1016,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
1033 * causes the address buffer to overflow return EINVAL. 1016 * causes the address buffer to overflow return EINVAL.
1034 */ 1017 */
1035 if (!af || (walk_size + af->sockaddr_len) > addrs_size) { 1018 if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
1036 kfree(kaddrs); 1019 kvfree(kaddrs);
1037 return -EINVAL; 1020 return -EINVAL;
1038 } 1021 }
1039 addrcnt++; 1022 addrcnt++;
@@ -1063,7 +1046,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
1063 } 1046 }
1064 1047
1065out: 1048out:
1066 kfree(kaddrs); 1049 kvfree(kaddrs);
1067 1050
1068 return err; 1051 return err;
1069} 1052}
@@ -1321,13 +1304,6 @@ out_free:
1321 * land and invoking either sctp_connectx(). This is used for tunneling 1304 * land and invoking either sctp_connectx(). This is used for tunneling
1322 * the sctp_connectx() request through sctp_setsockopt() from userspace. 1305 * the sctp_connectx() request through sctp_setsockopt() from userspace.
1323 * 1306 *
1324 * We don't use copy_from_user() for optimization: we first do the
1325 * sanity checks (buffer size -fast- and access check-healthy
1326 * pointer); if all of those succeed, then we can alloc the memory
1327 * (expensive operation) needed to copy the data to kernel. Then we do
1328 * the copying without checking the user space area
1329 * (__copy_from_user()).
1330 *
1331 * On exit there is no need to do sockfd_put(), sys_setsockopt() does 1307 * On exit there is no need to do sockfd_put(), sys_setsockopt() does
1332 * it. 1308 * it.
1333 * 1309 *
@@ -1343,7 +1319,6 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
1343 sctp_assoc_t *assoc_id) 1319 sctp_assoc_t *assoc_id)
1344{ 1320{
1345 struct sockaddr *kaddrs; 1321 struct sockaddr *kaddrs;
1346 gfp_t gfp = GFP_KERNEL;
1347 int err = 0; 1322 int err = 0;
1348 1323
1349 pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n", 1324 pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n",
@@ -1352,24 +1327,12 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
1352 if (unlikely(addrs_size <= 0)) 1327 if (unlikely(addrs_size <= 0))
1353 return -EINVAL; 1328 return -EINVAL;
1354 1329
1355 /* Check the user passed a healthy pointer. */ 1330 kaddrs = vmemdup_user(addrs, addrs_size);
1356 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) 1331 if (unlikely(IS_ERR(kaddrs)))
1357 return -EFAULT; 1332 return PTR_ERR(kaddrs);
1358
1359 /* Alloc space for the address array in kernel memory. */
1360 if (sk->sk_socket->file)
1361 gfp = GFP_USER | __GFP_NOWARN;
1362 kaddrs = kmalloc(addrs_size, gfp);
1363 if (unlikely(!kaddrs))
1364 return -ENOMEM;
1365
1366 if (__copy_from_user(kaddrs, addrs, addrs_size)) {
1367 err = -EFAULT;
1368 } else {
1369 err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id);
1370 }
1371 1333
1372 kfree(kaddrs); 1334 err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id);
1335 kvfree(kaddrs);
1373 1336
1374 return err; 1337 return err;
1375} 1338}
diff --git a/sound/core/control.c b/sound/core/control.c
index 494389fb966c..50fa16022f1f 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -25,6 +25,7 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
27#include <linux/time.h> 27#include <linux/time.h>
28#include <linux/mm.h>
28#include <linux/sched/signal.h> 29#include <linux/sched/signal.h>
29#include <sound/core.h> 30#include <sound/core.h>
30#include <sound/minors.h> 31#include <sound/minors.h>
@@ -1129,7 +1130,7 @@ static int replace_user_tlv(struct snd_kcontrol *kctl, unsigned int __user *buf,
1129 if (size > 1024 * 128) /* sane value */ 1130 if (size > 1024 * 128) /* sane value */
1130 return -EINVAL; 1131 return -EINVAL;
1131 1132
1132 container = memdup_user(buf, size); 1133 container = vmemdup_user(buf, size);
1133 if (IS_ERR(container)) 1134 if (IS_ERR(container))
1134 return PTR_ERR(container); 1135 return PTR_ERR(container);
1135 1136
@@ -1137,7 +1138,7 @@ static int replace_user_tlv(struct snd_kcontrol *kctl, unsigned int __user *buf,
1137 if (!change) 1138 if (!change)
1138 change = memcmp(ue->tlv_data, container, size) != 0; 1139 change = memcmp(ue->tlv_data, container, size) != 0;
1139 if (!change) { 1140 if (!change) {
1140 kfree(container); 1141 kvfree(container);
1141 return 0; 1142 return 0;
1142 } 1143 }
1143 1144
@@ -1148,7 +1149,7 @@ static int replace_user_tlv(struct snd_kcontrol *kctl, unsigned int __user *buf,
1148 mask = SNDRV_CTL_EVENT_MASK_INFO; 1149 mask = SNDRV_CTL_EVENT_MASK_INFO;
1149 } 1150 }
1150 1151
1151 kfree(ue->tlv_data); 1152 kvfree(ue->tlv_data);
1152 ue->tlv_data = container; 1153 ue->tlv_data = container;
1153 ue->tlv_data_size = size; 1154 ue->tlv_data_size = size;
1154 1155
@@ -1197,7 +1198,7 @@ static int snd_ctl_elem_init_enum_names(struct user_element *ue)
1197 if (ue->info.value.enumerated.names_length > 64 * 1024) 1198 if (ue->info.value.enumerated.names_length > 64 * 1024)
1198 return -EINVAL; 1199 return -EINVAL;
1199 1200
1200 names = memdup_user((const void __user *)user_ptrval, 1201 names = vmemdup_user((const void __user *)user_ptrval,
1201 ue->info.value.enumerated.names_length); 1202 ue->info.value.enumerated.names_length);
1202 if (IS_ERR(names)) 1203 if (IS_ERR(names))
1203 return PTR_ERR(names); 1204 return PTR_ERR(names);
@@ -1208,7 +1209,7 @@ static int snd_ctl_elem_init_enum_names(struct user_element *ue)
1208 for (i = 0; i < ue->info.value.enumerated.items; ++i) { 1209 for (i = 0; i < ue->info.value.enumerated.items; ++i) {
1209 name_len = strnlen(p, buf_len); 1210 name_len = strnlen(p, buf_len);
1210 if (name_len == 0 || name_len >= 64 || name_len == buf_len) { 1211 if (name_len == 0 || name_len >= 64 || name_len == buf_len) {
1211 kfree(names); 1212 kvfree(names);
1212 return -EINVAL; 1213 return -EINVAL;
1213 } 1214 }
1214 p += name_len + 1; 1215 p += name_len + 1;
@@ -1225,8 +1226,8 @@ static void snd_ctl_elem_user_free(struct snd_kcontrol *kcontrol)
1225{ 1226{
1226 struct user_element *ue = kcontrol->private_data; 1227 struct user_element *ue = kcontrol->private_data;
1227 1228
1228 kfree(ue->tlv_data); 1229 kvfree(ue->tlv_data);
1229 kfree(ue->priv_data); 1230 kvfree(ue->priv_data);
1230 kfree(ue); 1231 kfree(ue);
1231} 1232}
1232 1233
diff --git a/sound/core/hwdep.c b/sound/core/hwdep.c
index cbda5c8b675f..26e71cf05f1e 100644
--- a/sound/core/hwdep.c
+++ b/sound/core/hwdep.c
@@ -233,8 +233,6 @@ static int snd_hwdep_dsp_load(struct snd_hwdep *hw,
233 /* check whether the dsp was already loaded */ 233 /* check whether the dsp was already loaded */
234 if (hw->dsp_loaded & (1 << info.index)) 234 if (hw->dsp_loaded & (1 << info.index))
235 return -EBUSY; 235 return -EBUSY;
236 if (!access_ok(VERIFY_READ, info.image, info.length))
237 return -EFAULT;
238 err = hw->ops.dsp_load(hw, &info); 236 err = hw->ops.dsp_load(hw, &info);
239 if (err < 0) 237 if (err < 0)
240 return err; 238 return err;
diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c
index 509680d9b698..e2be10d17118 100644
--- a/sound/usb/usx2y/us122l.c
+++ b/sound/usb/usx2y/us122l.c
@@ -378,7 +378,7 @@ out:
378static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, 378static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
379 unsigned cmd, unsigned long arg) 379 unsigned cmd, unsigned long arg)
380{ 380{
381 struct usb_stream_config *cfg; 381 struct usb_stream_config cfg;
382 struct us122l *us122l = hw->private_data; 382 struct us122l *us122l = hw->private_data;
383 struct usb_stream *s; 383 struct usb_stream *s;
384 unsigned min_period_frames; 384 unsigned min_period_frames;
@@ -388,24 +388,21 @@ static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
388 if (cmd != SNDRV_USB_STREAM_IOCTL_SET_PARAMS) 388 if (cmd != SNDRV_USB_STREAM_IOCTL_SET_PARAMS)
389 return -ENOTTY; 389 return -ENOTTY;
390 390
391 cfg = memdup_user((void *)arg, sizeof(*cfg)); 391 if (copy_from_user(&cfg, (void __user *)arg, sizeof(cfg)))
392 if (IS_ERR(cfg)) 392 return -EFAULT;
393 return PTR_ERR(cfg); 393
394 if (cfg.version != USB_STREAM_INTERFACE_VERSION)
395 return -ENXIO;
394 396
395 if (cfg->version != USB_STREAM_INTERFACE_VERSION) {
396 err = -ENXIO;
397 goto free;
398 }
399 high_speed = us122l->dev->speed == USB_SPEED_HIGH; 397 high_speed = us122l->dev->speed == USB_SPEED_HIGH;
400 if ((cfg->sample_rate != 44100 && cfg->sample_rate != 48000 && 398 if ((cfg.sample_rate != 44100 && cfg.sample_rate != 48000 &&
401 (!high_speed || 399 (!high_speed ||
402 (cfg->sample_rate != 88200 && cfg->sample_rate != 96000))) || 400 (cfg.sample_rate != 88200 && cfg.sample_rate != 96000))) ||
403 cfg->frame_size != 6 || 401 cfg.frame_size != 6 ||
404 cfg->period_frames > 0x3000) { 402 cfg.period_frames > 0x3000)
405 err = -EINVAL; 403 return -EINVAL;
406 goto free; 404
407 } 405 switch (cfg.sample_rate) {
408 switch (cfg->sample_rate) {
409 case 44100: 406 case 44100:
410 min_period_frames = 48; 407 min_period_frames = 48;
411 break; 408 break;
@@ -418,10 +415,8 @@ static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
418 } 415 }
419 if (!high_speed) 416 if (!high_speed)
420 min_period_frames <<= 1; 417 min_period_frames <<= 1;
421 if (cfg->period_frames < min_period_frames) { 418 if (cfg.period_frames < min_period_frames)
422 err = -EINVAL; 419 return -EINVAL;
423 goto free;
424 }
425 420
426 snd_power_wait(hw->card, SNDRV_CTL_POWER_D0); 421 snd_power_wait(hw->card, SNDRV_CTL_POWER_D0);
427 422
@@ -430,24 +425,22 @@ static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
430 if (!us122l->master) 425 if (!us122l->master)
431 us122l->master = file; 426 us122l->master = file;
432 else if (us122l->master != file) { 427 else if (us122l->master != file) {
433 if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg))) { 428 if (!s || memcmp(&cfg, &s->cfg, sizeof(cfg))) {
434 err = -EIO; 429 err = -EIO;
435 goto unlock; 430 goto unlock;
436 } 431 }
437 us122l->slave = file; 432 us122l->slave = file;
438 } 433 }
439 if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg)) || 434 if (!s || memcmp(&cfg, &s->cfg, sizeof(cfg)) ||
440 s->state == usb_stream_xrun) { 435 s->state == usb_stream_xrun) {
441 us122l_stop(us122l); 436 us122l_stop(us122l);
442 if (!us122l_start(us122l, cfg->sample_rate, cfg->period_frames)) 437 if (!us122l_start(us122l, cfg.sample_rate, cfg.period_frames))
443 err = -EIO; 438 err = -EIO;
444 else 439 else
445 err = 1; 440 err = 1;
446 } 441 }
447unlock: 442unlock:
448 mutex_unlock(&us122l->mutex); 443 mutex_unlock(&us122l->mutex);
449free:
450 kfree(cfg);
451 wake_up_all(&us122l->sk.sleep); 444 wake_up_all(&us122l->sk.sleep);
452 return err; 445 return err;
453} 446}
diff --git a/sound/usb/usx2y/usX2Yhwdep.c b/sound/usb/usx2y/usX2Yhwdep.c
index 8b0a1eae573c..07d15bae75bc 100644
--- a/sound/usb/usx2y/usX2Yhwdep.c
+++ b/sound/usb/usx2y/usX2Yhwdep.c
@@ -198,24 +198,22 @@ static int snd_usX2Y_hwdep_dsp_load(struct snd_hwdep *hw,
198 struct snd_hwdep_dsp_image *dsp) 198 struct snd_hwdep_dsp_image *dsp)
199{ 199{
200 struct usX2Ydev *priv = hw->private_data; 200 struct usX2Ydev *priv = hw->private_data;
201 int lret, err = -EINVAL; 201 struct usb_device* dev = priv->dev;
202 snd_printdd( "dsp_load %s\n", dsp->name); 202 int lret, err;
203 char *buf;
203 204
204 if (access_ok(VERIFY_READ, dsp->image, dsp->length)) { 205 snd_printdd( "dsp_load %s\n", dsp->name);
205 struct usb_device* dev = priv->dev;
206 char *buf;
207 206
208 buf = memdup_user(dsp->image, dsp->length); 207 buf = memdup_user(dsp->image, dsp->length);
209 if (IS_ERR(buf)) 208 if (IS_ERR(buf))
210 return PTR_ERR(buf); 209 return PTR_ERR(buf);
211 210
212 err = usb_set_interface(dev, 0, 1); 211 err = usb_set_interface(dev, 0, 1);
213 if (err) 212 if (err)
214 snd_printk(KERN_ERR "usb_set_interface error \n"); 213 snd_printk(KERN_ERR "usb_set_interface error \n");
215 else 214 else
216 err = usb_bulk_msg(dev, usb_sndbulkpipe(dev, 2), buf, dsp->length, &lret, 6000); 215 err = usb_bulk_msg(dev, usb_sndbulkpipe(dev, 2), buf, dsp->length, &lret, 6000);
217 kfree(buf); 216 kfree(buf);
218 }
219 if (err) 217 if (err)
220 return err; 218 return err;
221 if (dsp->index == 1) { 219 if (dsp->index == 1) {