diff options
author | Wu Fengguang <fengguang.wu@intel.com> | 2009-12-14 20:58:07 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-15 11:53:11 -0500 |
commit | f222318e9c3a315723e3524fb9d6566b2430db44 (patch) | |
tree | c40913c1e5e32eda037b687be2f835e2754b04b8 /drivers/char/mem.c | |
parent | 4ea2f43f28e30050bc99fe3134b6b679f3bf5b22 (diff) |
/dev/mem: introduce size_inside_page()
Introduce size_inside_page() to replace duplicate /dev/mem code.
Also apply it to /dev/kmem, whose alignment logic was buggy.
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Acked-by: Andi Kleen <ak@linux.intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Greg Kroah-Hartman <gregkh@suse.de>
Cc: Mark Brown <broonie@opensource.wolfsonmicro.com>
Cc: Johannes Berg <johannes@sipsolutions.net>
Cc: Avi Kivity <avi@qumranet.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/char/mem.c')
-rw-r--r-- | drivers/char/mem.c | 60 |
1 files changed, 19 insertions, 41 deletions
diff --git a/drivers/char/mem.c b/drivers/char/mem.c index f0a90590cb1a..aaa9c24d4c14 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c | |||
@@ -34,6 +34,19 @@ | |||
34 | # include <linux/efi.h> | 34 | # include <linux/efi.h> |
35 | #endif | 35 | #endif |
36 | 36 | ||
37 | static inline unsigned long size_inside_page(unsigned long start, | ||
38 | unsigned long size) | ||
39 | { | ||
40 | unsigned long sz; | ||
41 | |||
42 | if (-start & (PAGE_SIZE - 1)) | ||
43 | sz = -start & (PAGE_SIZE - 1); | ||
44 | else | ||
45 | sz = PAGE_SIZE; | ||
46 | |||
47 | return min_t(unsigned long, sz, size); | ||
48 | } | ||
49 | |||
37 | /* | 50 | /* |
38 | * Architectures vary in how they handle caching for addresses | 51 | * Architectures vary in how they handle caching for addresses |
39 | * outside of main memory. | 52 | * outside of main memory. |
@@ -141,15 +154,7 @@ static ssize_t read_mem(struct file * file, char __user * buf, | |||
141 | #endif | 154 | #endif |
142 | 155 | ||
143 | while (count > 0) { | 156 | while (count > 0) { |
144 | /* | 157 | sz = size_inside_page(p, count); |
145 | * Handle first page in case it's not aligned | ||
146 | */ | ||
147 | if (-p & (PAGE_SIZE - 1)) | ||
148 | sz = -p & (PAGE_SIZE - 1); | ||
149 | else | ||
150 | sz = PAGE_SIZE; | ||
151 | |||
152 | sz = min_t(unsigned long, sz, count); | ||
153 | 158 | ||
154 | if (!range_is_allowed(p >> PAGE_SHIFT, count)) | 159 | if (!range_is_allowed(p >> PAGE_SHIFT, count)) |
155 | return -EPERM; | 160 | return -EPERM; |
@@ -208,15 +213,7 @@ static ssize_t write_mem(struct file * file, const char __user * buf, | |||
208 | #endif | 213 | #endif |
209 | 214 | ||
210 | while (count > 0) { | 215 | while (count > 0) { |
211 | /* | 216 | sz = size_inside_page(p, count); |
212 | * Handle first page in case it's not aligned | ||
213 | */ | ||
214 | if (-p & (PAGE_SIZE - 1)) | ||
215 | sz = -p & (PAGE_SIZE - 1); | ||
216 | else | ||
217 | sz = PAGE_SIZE; | ||
218 | |||
219 | sz = min_t(unsigned long, sz, count); | ||
220 | 217 | ||
221 | if (!range_is_allowed(p >> PAGE_SHIFT, sz)) | 218 | if (!range_is_allowed(p >> PAGE_SHIFT, sz)) |
222 | return -EPERM; | 219 | return -EPERM; |
@@ -429,15 +426,7 @@ static ssize_t read_kmem(struct file *file, char __user *buf, | |||
429 | } | 426 | } |
430 | #endif | 427 | #endif |
431 | while (low_count > 0) { | 428 | while (low_count > 0) { |
432 | /* | 429 | sz = size_inside_page(p, low_count); |
433 | * Handle first page in case it's not aligned | ||
434 | */ | ||
435 | if (-p & (PAGE_SIZE - 1)) | ||
436 | sz = -p & (PAGE_SIZE - 1); | ||
437 | else | ||
438 | sz = PAGE_SIZE; | ||
439 | |||
440 | sz = min_t(unsigned long, sz, low_count); | ||
441 | 430 | ||
442 | /* | 431 | /* |
443 | * On ia64 if a page has been mapped somewhere as | 432 | * On ia64 if a page has been mapped somewhere as |
@@ -461,10 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf, | |||
461 | if (!kbuf) | 450 | if (!kbuf) |
462 | return -ENOMEM; | 451 | return -ENOMEM; |
463 | while (count > 0) { | 452 | while (count > 0) { |
464 | int len = count; | 453 | int len = size_inside_page(p, count); |
465 | 454 | ||
466 | if (len > PAGE_SIZE) | ||
467 | len = PAGE_SIZE; | ||
468 | len = vread(kbuf, (char *)p, len); | 455 | len = vread(kbuf, (char *)p, len); |
469 | if (!len) | 456 | if (!len) |
470 | break; | 457 | break; |
@@ -509,15 +496,8 @@ do_write_kmem(void *p, unsigned long realp, const char __user * buf, | |||
509 | 496 | ||
510 | while (count > 0) { | 497 | while (count > 0) { |
511 | char *ptr; | 498 | char *ptr; |
512 | /* | ||
513 | * Handle first page in case it's not aligned | ||
514 | */ | ||
515 | if (-realp & (PAGE_SIZE - 1)) | ||
516 | sz = -realp & (PAGE_SIZE - 1); | ||
517 | else | ||
518 | sz = PAGE_SIZE; | ||
519 | 499 | ||
520 | sz = min_t(unsigned long, sz, count); | 500 | sz = size_inside_page(realp, count); |
521 | 501 | ||
522 | /* | 502 | /* |
523 | * On ia64 if a page has been mapped somewhere as | 503 | * On ia64 if a page has been mapped somewhere as |
@@ -577,10 +557,8 @@ static ssize_t write_kmem(struct file * file, const char __user * buf, | |||
577 | if (!kbuf) | 557 | if (!kbuf) |
578 | return wrote ? wrote : -ENOMEM; | 558 | return wrote ? wrote : -ENOMEM; |
579 | while (count > 0) { | 559 | while (count > 0) { |
580 | int len = count; | 560 | int len = size_inside_page(p, count); |
581 | 561 | ||
582 | if (len > PAGE_SIZE) | ||
583 | len = PAGE_SIZE; | ||
584 | written = copy_from_user(kbuf, buf, len); | 562 | written = copy_from_user(kbuf, buf, len); |
585 | if (written) { | 563 | if (written) { |
586 | if (wrote + virtr) | 564 | if (wrote + virtr) |