aboutsummaryrefslogtreecommitdiffstats
path: root/fs/cifs/cifsfs.c
diff options
context:
space:
mode:
authorDavid Windsor <dave@nullcore.net>2017-06-10 22:50:33 -0400
committerKees Cook <keescook@chromium.org>2018-01-15 15:07:57 -0500
commitde046449045a329bae5c9256e55b58a685a22532 (patch)
tree7aa94c2a5f9506ad2ac0391f64211658ac3f92bb /fs/cifs/cifsfs.c
parente9a0561b7c8ef964078fa340fc1983f1f6d30544 (diff)
cifs: Define usercopy region in cifs_request slab cache
CIFS request buffers, stored in the cifs_request slab cache, need to be copied to/from userspace. cache object allocation: fs/cifs/cifsfs.c: cifs_init_request_bufs(): ... cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv, cifs_req_cachep); fs/cifs/misc.c: cifs_buf_get(): ... ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS); ... return ret_buf; In support of usercopy hardening, this patch defines a region in the cifs_request slab cache in which userspace copy operations are allowed. This region is known as the slab cache's usercopy region. Slab caches can now check that each dynamically sized copy operation involving cache-managed memory falls entirely within the slab's usercopy region. This patch is verbatim from Brad Spengler/PaX Team's PAX_USERCOPY whitelisting code in the last public patch of grsecurity/PaX based on my understanding of the code. Changes or omissions from the original code are mine and don't reflect the original grsecurity/PaX code. Signed-off-by: David Windsor <dave@nullcore.net> [kees: adjust commit log, provide usage trace] Cc: Steve French <sfrench@samba.org> Cc: linux-cifs@vger.kernel.org Signed-off-by: Kees Cook <keescook@chromium.org>
Diffstat (limited to 'fs/cifs/cifsfs.c')
-rw-r--r--fs/cifs/cifsfs.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 31b7565b1617..29f4b0290fbd 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -1231,9 +1231,11 @@ cifs_init_request_bufs(void)
1231 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n", 1231 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1232 CIFSMaxBufSize, CIFSMaxBufSize); 1232 CIFSMaxBufSize, CIFSMaxBufSize);
1233*/ 1233*/
1234 cifs_req_cachep = kmem_cache_create("cifs_request", 1234 cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1235 CIFSMaxBufSize + max_hdr_size, 0, 1235 CIFSMaxBufSize + max_hdr_size, 0,
1236 SLAB_HWCACHE_ALIGN, NULL); 1236 SLAB_HWCACHE_ALIGN, 0,
1237 CIFSMaxBufSize + max_hdr_size,
1238 NULL);
1237 if (cifs_req_cachep == NULL) 1239 if (cifs_req_cachep == NULL)
1238 return -ENOMEM; 1240 return -ENOMEM;
1239 1241
@@ -1259,9 +1261,9 @@ cifs_init_request_bufs(void)
1259 more SMBs to use small buffer alloc and is still much more 1261 more SMBs to use small buffer alloc and is still much more
1260 efficient to alloc 1 per page off the slab compared to 17K (5page) 1262 efficient to alloc 1 per page off the slab compared to 17K (5page)
1261 alloc of large cifs buffers even when page debugging is on */ 1263 alloc of large cifs buffers even when page debugging is on */
1262 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq", 1264 cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1263 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN, 1265 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1264 NULL); 1266 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1265 if (cifs_sm_req_cachep == NULL) { 1267 if (cifs_sm_req_cachep == NULL) {
1266 mempool_destroy(cifs_req_poolp); 1268 mempool_destroy(cifs_req_poolp);
1267 kmem_cache_destroy(cifs_req_cachep); 1269 kmem_cache_destroy(cifs_req_cachep);