diff options
author | John Stultz <john.stultz@linaro.org> | 2011-12-20 19:49:54 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2011-12-21 16:40:25 -0500 |
commit | 1efb34394a694b458d66f25072318c375e22afe2 (patch) | |
tree | 71c58a14a06c12e0d4cb75173ab860c37d136a36 | |
parent | 33e8fc463eeec29227282e4bd2082f5928d629a5 (diff) |
ashmem: Whitespace cleanups
Fixes checkpatch warnings with the ashmem.c file
CC: Brian Swetland <swetland@google.com>
CC: Colin Cross <ccross@android.com>
CC: Arve Hjønnevåg <arve@android.com>
CC: Dima Zavin <dima@android.com>
CC: Robert Love <rlove@google.com>
Signed-off-by: John Stultz <john.stultz@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r-- | drivers/staging/android/ashmem.c | 46 |
1 files changed, 21 insertions, 25 deletions
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index a78ba21e02b5..99052bfd3a2d 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c | |||
@@ -41,11 +41,11 @@ | |||
41 | * Big Note: Mappings do NOT pin this structure; it dies on close() | 41 | * Big Note: Mappings do NOT pin this structure; it dies on close() |
42 | */ | 42 | */ |
43 | struct ashmem_area { | 43 | struct ashmem_area { |
44 | char name[ASHMEM_FULL_NAME_LEN];/* optional name for /proc/pid/maps */ | 44 | char name[ASHMEM_FULL_NAME_LEN]; /* optional name in /proc/pid/maps */ |
45 | struct list_head unpinned_list; /* list of all ashmem areas */ | 45 | struct list_head unpinned_list; /* list of all ashmem areas */ |
46 | struct file *file; /* the shmem-based backing file */ | 46 | struct file *file; /* the shmem-based backing file */ |
47 | size_t size; /* size of the mapping, in bytes */ | 47 | size_t size; /* size of the mapping, in bytes */ |
48 | unsigned long prot_mask; /* allowed prot bits, as vm_flags */ | 48 | unsigned long prot_mask; /* allowed prot bits, as vm_flags */ |
49 | }; | 49 | }; |
50 | 50 | ||
51 | /* | 51 | /* |
@@ -79,26 +79,26 @@ static struct kmem_cache *ashmem_area_cachep __read_mostly; | |||
79 | static struct kmem_cache *ashmem_range_cachep __read_mostly; | 79 | static struct kmem_cache *ashmem_range_cachep __read_mostly; |
80 | 80 | ||
81 | #define range_size(range) \ | 81 | #define range_size(range) \ |
82 | ((range)->pgend - (range)->pgstart + 1) | 82 | ((range)->pgend - (range)->pgstart + 1) |
83 | 83 | ||
84 | #define range_on_lru(range) \ | 84 | #define range_on_lru(range) \ |
85 | ((range)->purged == ASHMEM_NOT_PURGED) | 85 | ((range)->purged == ASHMEM_NOT_PURGED) |
86 | 86 | ||
87 | #define page_range_subsumes_range(range, start, end) \ | 87 | #define page_range_subsumes_range(range, start, end) \ |
88 | (((range)->pgstart >= (start)) && ((range)->pgend <= (end))) | 88 | (((range)->pgstart >= (start)) && ((range)->pgend <= (end))) |
89 | 89 | ||
90 | #define page_range_subsumed_by_range(range, start, end) \ | 90 | #define page_range_subsumed_by_range(range, start, end) \ |
91 | (((range)->pgstart <= (start)) && ((range)->pgend >= (end))) | 91 | (((range)->pgstart <= (start)) && ((range)->pgend >= (end))) |
92 | 92 | ||
93 | #define page_in_range(range, page) \ | 93 | #define page_in_range(range, page) \ |
94 | (((range)->pgstart <= (page)) && ((range)->pgend >= (page))) | 94 | (((range)->pgstart <= (page)) && ((range)->pgend >= (page))) |
95 | 95 | ||
96 | #define page_range_in_range(range, start, end) \ | 96 | #define page_range_in_range(range, start, end) \ |
97 | (page_in_range(range, start) || page_in_range(range, end) || \ | 97 | (page_in_range(range, start) || page_in_range(range, end) || \ |
98 | page_range_subsumes_range(range, start, end)) | 98 | page_range_subsumes_range(range, start, end)) |
99 | 99 | ||
100 | #define range_before_page(range, page) \ | 100 | #define range_before_page(range, page) \ |
101 | ((range)->pgend < (page)) | 101 | ((range)->pgend < (page)) |
102 | 102 | ||
103 | #define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE) | 103 | #define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE) |
104 | 104 | ||
@@ -220,9 +220,8 @@ static ssize_t ashmem_read(struct file *file, char __user *buf, | |||
220 | mutex_lock(&ashmem_mutex); | 220 | mutex_lock(&ashmem_mutex); |
221 | 221 | ||
222 | /* If size is not set, or set to 0, always return EOF. */ | 222 | /* If size is not set, or set to 0, always return EOF. */ |
223 | if (asma->size == 0) { | 223 | if (asma->size == 0) |
224 | goto out; | 224 | goto out; |
225 | } | ||
226 | 225 | ||
227 | if (!asma->file) { | 226 | if (!asma->file) { |
228 | ret = -EBADF; | 227 | ret = -EBADF; |
@@ -230,9 +229,8 @@ static ssize_t ashmem_read(struct file *file, char __user *buf, | |||
230 | } | 229 | } |
231 | 230 | ||
232 | ret = asma->file->f_op->read(asma->file, buf, len, pos); | 231 | ret = asma->file->f_op->read(asma->file, buf, len, pos); |
233 | if (ret < 0) { | 232 | if (ret < 0) |
234 | goto out; | 233 | goto out; |
235 | } | ||
236 | 234 | ||
237 | /** Update backing file pos, since f_ops->read() doesn't */ | 235 | /** Update backing file pos, since f_ops->read() doesn't */ |
238 | asma->file->f_pos = *pos; | 236 | asma->file->f_pos = *pos; |
@@ -260,9 +258,8 @@ static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin) | |||
260 | } | 258 | } |
261 | 259 | ||
262 | ret = asma->file->f_op->llseek(asma->file, offset, origin); | 260 | ret = asma->file->f_op->llseek(asma->file, offset, origin); |
263 | if (ret < 0) { | 261 | if (ret < 0) |
264 | goto out; | 262 | goto out; |
265 | } | ||
266 | 263 | ||
267 | /** Copy f_pos from backing file, since f_ops->llseek() sets it */ | 264 | /** Copy f_pos from backing file, since f_ops->llseek() sets it */ |
268 | file->f_pos = asma->file->f_pos; | 265 | file->f_pos = asma->file->f_pos; |
@@ -272,10 +269,9 @@ out: | |||
272 | return ret; | 269 | return ret; |
273 | } | 270 | } |
274 | 271 | ||
275 | static inline unsigned long | 272 | static inline unsigned long calc_vm_may_flags(unsigned long prot) |
276 | calc_vm_may_flags(unsigned long prot) | ||
277 | { | 273 | { |
278 | return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD ) | | 274 | return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) | |
279 | _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) | | 275 | _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) | |
280 | _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC); | 276 | _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC); |
281 | } | 277 | } |
@@ -295,7 +291,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) | |||
295 | 291 | ||
296 | /* requested protection bits must match our allowed protection mask */ | 292 | /* requested protection bits must match our allowed protection mask */ |
297 | if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) & | 293 | if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) & |
298 | calc_vm_prot_bits(PROT_MASK))) { | 294 | calc_vm_prot_bits(PROT_MASK))) { |
299 | ret = -EPERM; | 295 | ret = -EPERM; |
300 | goto out; | 296 | goto out; |
301 | } | 297 | } |
@@ -688,8 +684,8 @@ static struct file_operations ashmem_fops = { | |||
688 | .owner = THIS_MODULE, | 684 | .owner = THIS_MODULE, |
689 | .open = ashmem_open, | 685 | .open = ashmem_open, |
690 | .release = ashmem_release, | 686 | .release = ashmem_release, |
691 | .read = ashmem_read, | 687 | .read = ashmem_read, |
692 | .llseek = ashmem_llseek, | 688 | .llseek = ashmem_llseek, |
693 | .mmap = ashmem_mmap, | 689 | .mmap = ashmem_mmap, |
694 | .unlocked_ioctl = ashmem_ioctl, | 690 | .unlocked_ioctl = ashmem_ioctl, |
695 | .compat_ioctl = ashmem_ioctl, | 691 | .compat_ioctl = ashmem_ioctl, |