diff options
author | Arve Hjønnevåg <arve@android.com> | 2009-04-06 18:12:58 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2009-04-17 14:06:26 -0400 |
commit | 7af7467efa64affc6505375ceac97d68cfb58e94 (patch) | |
tree | c68b173c0a08f95b633edb936b5a09929ceb7ace | |
parent | 282ca175d4c440ec4d74bc622ee497e5b3530ce5 (diff) |
Staging: binder: Cast to uintptr_t instead of size_t when aligning pointers
Signed-off-by: Arve Hjønnevåg <arve@android.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r-- | drivers/staging/android/binder.c | 26 |
1 files changed, 14 insertions, 12 deletions
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c index 09962e87c60..91a96292e6b 100644 --- a/drivers/staging/android/binder.c +++ b/drivers/staging/android/binder.c | |||
@@ -246,7 +246,7 @@ struct binder_proc { | |||
246 | struct files_struct *files; | 246 | struct files_struct *files; |
247 | struct hlist_node release_files_node; | 247 | struct hlist_node release_files_node; |
248 | void *buffer; | 248 | void *buffer; |
249 | size_t user_buffer_offset; | 249 | ptrdiff_t user_buffer_offset; |
250 | 250 | ||
251 | struct list_head buffers; | 251 | struct list_head buffers; |
252 | struct rb_root free_buffers; | 252 | struct rb_root free_buffers; |
@@ -614,7 +614,8 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate, | |||
614 | proc->pid, page_addr); | 614 | proc->pid, page_addr); |
615 | goto err_map_kernel_failed; | 615 | goto err_map_kernel_failed; |
616 | } | 616 | } |
617 | user_page_addr = (size_t)page_addr + proc->user_buffer_offset; | 617 | user_page_addr = |
618 | (uintptr_t)page_addr + proc->user_buffer_offset; | ||
618 | ret = vm_insert_page(vma, user_page_addr, page[0]); | 619 | ret = vm_insert_page(vma, user_page_addr, page[0]); |
619 | if (ret) { | 620 | if (ret) { |
620 | printk(KERN_ERR "binder: %d: binder_alloc_buf failed " | 621 | printk(KERN_ERR "binder: %d: binder_alloc_buf failed " |
@@ -635,7 +636,7 @@ free_range: | |||
635 | page_addr -= PAGE_SIZE) { | 636 | page_addr -= PAGE_SIZE) { |
636 | page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; | 637 | page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; |
637 | if (vma) | 638 | if (vma) |
638 | zap_page_range(vma, (size_t)page_addr + | 639 | zap_page_range(vma, (uintptr_t)page_addr + |
639 | proc->user_buffer_offset, PAGE_SIZE, NULL); | 640 | proc->user_buffer_offset, PAGE_SIZE, NULL); |
640 | err_vm_insert_page_failed: | 641 | err_vm_insert_page_failed: |
641 | unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); | 642 | unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); |
@@ -716,18 +717,19 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, | |||
716 | "er %p size %zd\n", proc->pid, size, buffer, buffer_size); | 717 | "er %p size %zd\n", proc->pid, size, buffer, buffer_size); |
717 | 718 | ||
718 | has_page_addr = | 719 | has_page_addr = |
719 | (void *)(((size_t)buffer->data + buffer_size) & PAGE_MASK); | 720 | (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); |
720 | if (n == NULL) { | 721 | if (n == NULL) { |
721 | if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) | 722 | if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) |
722 | buffer_size = size; /* no room for other buffers */ | 723 | buffer_size = size; /* no room for other buffers */ |
723 | else | 724 | else |
724 | buffer_size = size + sizeof(struct binder_buffer); | 725 | buffer_size = size + sizeof(struct binder_buffer); |
725 | } | 726 | } |
726 | end_page_addr = (void *)PAGE_ALIGN((size_t)buffer->data + buffer_size); | 727 | end_page_addr = |
728 | (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); | ||
727 | if (end_page_addr > has_page_addr) | 729 | if (end_page_addr > has_page_addr) |
728 | end_page_addr = has_page_addr; | 730 | end_page_addr = has_page_addr; |
729 | if (binder_update_page_range(proc, 1, | 731 | if (binder_update_page_range(proc, 1, |
730 | (void *)PAGE_ALIGN((size_t)buffer->data), end_page_addr, NULL)) | 732 | (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) |
731 | return NULL; | 733 | return NULL; |
732 | 734 | ||
733 | rb_erase(best_fit, &proc->free_buffers); | 735 | rb_erase(best_fit, &proc->free_buffers); |
@@ -758,12 +760,12 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, | |||
758 | 760 | ||
759 | static void *buffer_start_page(struct binder_buffer *buffer) | 761 | static void *buffer_start_page(struct binder_buffer *buffer) |
760 | { | 762 | { |
761 | return (void *)((size_t)buffer & PAGE_MASK); | 763 | return (void *)((uintptr_t)buffer & PAGE_MASK); |
762 | } | 764 | } |
763 | 765 | ||
764 | static void *buffer_end_page(struct binder_buffer *buffer) | 766 | static void *buffer_end_page(struct binder_buffer *buffer) |
765 | { | 767 | { |
766 | return (void *)(((size_t)(buffer + 1) - 1) & PAGE_MASK); | 768 | return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); |
767 | } | 769 | } |
768 | 770 | ||
769 | static void binder_delete_free_buffer( | 771 | static void binder_delete_free_buffer( |
@@ -841,8 +843,8 @@ static void binder_free_buf( | |||
841 | } | 843 | } |
842 | 844 | ||
843 | binder_update_page_range(proc, 0, | 845 | binder_update_page_range(proc, 0, |
844 | (void *)PAGE_ALIGN((size_t)buffer->data), | 846 | (void *)PAGE_ALIGN((uintptr_t)buffer->data), |
845 | (void *)(((size_t)buffer->data + buffer_size) & PAGE_MASK), | 847 | (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), |
846 | NULL); | 848 | NULL); |
847 | rb_erase(&buffer->rb_node, &proc->allocated_buffers); | 849 | rb_erase(&buffer->rb_node, &proc->allocated_buffers); |
848 | buffer->free = 1; | 850 | buffer->free = 1; |
@@ -2347,7 +2349,7 @@ retry: | |||
2347 | 2349 | ||
2348 | tr.data_size = t->buffer->data_size; | 2350 | tr.data_size = t->buffer->data_size; |
2349 | tr.offsets_size = t->buffer->offsets_size; | 2351 | tr.offsets_size = t->buffer->offsets_size; |
2350 | tr.data.ptr.buffer = (void *)((void *)t->buffer->data + proc->user_buffer_offset); | 2352 | tr.data.ptr.buffer = (void *)t->buffer->data + proc->user_buffer_offset; |
2351 | tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *)); | 2353 | tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *)); |
2352 | 2354 | ||
2353 | if (put_user(cmd, (uint32_t __user *)ptr)) | 2355 | if (put_user(cmd, (uint32_t __user *)ptr)) |
@@ -2753,7 +2755,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) | |||
2753 | goto err_get_vm_area_failed; | 2755 | goto err_get_vm_area_failed; |
2754 | } | 2756 | } |
2755 | proc->buffer = area->addr; | 2757 | proc->buffer = area->addr; |
2756 | proc->user_buffer_offset = vma->vm_start - (size_t)proc->buffer; | 2758 | proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; |
2757 | 2759 | ||
2758 | #ifdef CONFIG_CPU_CACHE_VIPT | 2760 | #ifdef CONFIG_CPU_CACHE_VIPT |
2759 | if (cache_is_vipt_aliasing()) { | 2761 | if (cache_is_vipt_aliasing()) { |