aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fuse/dev.c
diff options
context:
space:
mode:
authorMiklos Szeredi <mszeredi@suse.cz>2010-07-12 08:41:40 -0400
committerMiklos Szeredi <mszeredi@suse.cz>2010-07-12 08:41:40 -0400
commit7909b1c64078087ac153fb47a2f50793fe3ee7d0 (patch)
tree83367ca6286f3ebfee8b94533152848e7529e9fb /fs/fuse/dev.c
parent815c4163b6c8ebf8152f42b0a5fd015cfdcedc78 (diff)
fuse: don't use atomic kmap
Don't use atomic kmap for mapping userspace buffers in device read/write/splice. This is necessary because the next patch (adding store notify) requires that caller of fuse_copy_page() may sleep between invocations. The simplest way to ensure this is to change the atomic kmaps to non-atomic ones. Thankfully architectures where kmap() is not a no-op are going out of fashion, so we can ignore the (probably negligible) performance impact of this change. Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Diffstat (limited to 'fs/fuse/dev.c')
-rw-r--r--fs/fuse/dev.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 9424796d6634..7eb80d33c4f3 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -535,13 +535,13 @@ static void fuse_copy_finish(struct fuse_copy_state *cs)
535 if (!cs->write) { 535 if (!cs->write) {
536 buf->ops->unmap(cs->pipe, buf, cs->mapaddr); 536 buf->ops->unmap(cs->pipe, buf, cs->mapaddr);
537 } else { 537 } else {
538 kunmap_atomic(cs->mapaddr, KM_USER0); 538 kunmap(buf->page);
539 buf->len = PAGE_SIZE - cs->len; 539 buf->len = PAGE_SIZE - cs->len;
540 } 540 }
541 cs->currbuf = NULL; 541 cs->currbuf = NULL;
542 cs->mapaddr = NULL; 542 cs->mapaddr = NULL;
543 } else if (cs->mapaddr) { 543 } else if (cs->mapaddr) {
544 kunmap_atomic(cs->mapaddr, KM_USER0); 544 kunmap(cs->pg);
545 if (cs->write) { 545 if (cs->write) {
546 flush_dcache_page(cs->pg); 546 flush_dcache_page(cs->pg);
547 set_page_dirty_lock(cs->pg); 547 set_page_dirty_lock(cs->pg);
@@ -572,7 +572,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
572 572
573 BUG_ON(!cs->nr_segs); 573 BUG_ON(!cs->nr_segs);
574 cs->currbuf = buf; 574 cs->currbuf = buf;
575 cs->mapaddr = buf->ops->map(cs->pipe, buf, 1); 575 cs->mapaddr = buf->ops->map(cs->pipe, buf, 0);
576 cs->len = buf->len; 576 cs->len = buf->len;
577 cs->buf = cs->mapaddr + buf->offset; 577 cs->buf = cs->mapaddr + buf->offset;
578 cs->pipebufs++; 578 cs->pipebufs++;
@@ -592,7 +592,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
592 buf->len = 0; 592 buf->len = 0;
593 593
594 cs->currbuf = buf; 594 cs->currbuf = buf;
595 cs->mapaddr = kmap_atomic(page, KM_USER0); 595 cs->mapaddr = kmap(page);
596 cs->buf = cs->mapaddr; 596 cs->buf = cs->mapaddr;
597 cs->len = PAGE_SIZE; 597 cs->len = PAGE_SIZE;
598 cs->pipebufs++; 598 cs->pipebufs++;
@@ -611,7 +611,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
611 return err; 611 return err;
612 BUG_ON(err != 1); 612 BUG_ON(err != 1);
613 offset = cs->addr % PAGE_SIZE; 613 offset = cs->addr % PAGE_SIZE;
614 cs->mapaddr = kmap_atomic(cs->pg, KM_USER0); 614 cs->mapaddr = kmap(cs->pg);
615 cs->buf = cs->mapaddr + offset; 615 cs->buf = cs->mapaddr + offset;
616 cs->len = min(PAGE_SIZE - offset, cs->seglen); 616 cs->len = min(PAGE_SIZE - offset, cs->seglen);
617 cs->seglen -= cs->len; 617 cs->seglen -= cs->len;