diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2016-10-07 20:09:18 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2016-10-07 20:09:18 -0400 |
commit | bf02f5d2c080cf61b770e09add602ca360d538fb (patch) | |
tree | 1899f41554f8778a4cc1860adb53c95deaedaafb | |
parent | 4b899da50dcf1a7850715650281b5d76af8a5eb4 (diff) | |
parent | 2c563880ea8fdc900693ae372fa07b3894f8ff63 (diff) |
Merge commit '2c563880ea' into work.xattr
pick xattr_handler conversion from lustre tree
33 files changed, 296 insertions, 530 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 16288e777ec3..562af94bec35 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c | |||
@@ -59,7 +59,6 @@ static struct dentry *binder_debugfs_dir_entry_proc; | |||
59 | static struct binder_node *binder_context_mgr_node; | 59 | static struct binder_node *binder_context_mgr_node; |
60 | static kuid_t binder_context_mgr_uid = INVALID_UID; | 60 | static kuid_t binder_context_mgr_uid = INVALID_UID; |
61 | static int binder_last_id; | 61 | static int binder_last_id; |
62 | static struct workqueue_struct *binder_deferred_workqueue; | ||
63 | 62 | ||
64 | #define BINDER_DEBUG_ENTRY(name) \ | 63 | #define BINDER_DEBUG_ENTRY(name) \ |
65 | static int binder_##name##_open(struct inode *inode, struct file *file) \ | 64 | static int binder_##name##_open(struct inode *inode, struct file *file) \ |
@@ -3227,7 +3226,7 @@ binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) | |||
3227 | if (hlist_unhashed(&proc->deferred_work_node)) { | 3226 | if (hlist_unhashed(&proc->deferred_work_node)) { |
3228 | hlist_add_head(&proc->deferred_work_node, | 3227 | hlist_add_head(&proc->deferred_work_node, |
3229 | &binder_deferred_list); | 3228 | &binder_deferred_list); |
3230 | queue_work(binder_deferred_workqueue, &binder_deferred_work); | 3229 | schedule_work(&binder_deferred_work); |
3231 | } | 3230 | } |
3232 | mutex_unlock(&binder_deferred_lock); | 3231 | mutex_unlock(&binder_deferred_lock); |
3233 | } | 3232 | } |
@@ -3679,10 +3678,6 @@ static int __init binder_init(void) | |||
3679 | { | 3678 | { |
3680 | int ret; | 3679 | int ret; |
3681 | 3680 | ||
3682 | binder_deferred_workqueue = create_singlethread_workqueue("binder"); | ||
3683 | if (!binder_deferred_workqueue) | ||
3684 | return -ENOMEM; | ||
3685 | |||
3686 | binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); | 3681 | binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); |
3687 | if (binder_debugfs_dir_entry_root) | 3682 | if (binder_debugfs_dir_entry_root) |
3688 | binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", | 3683 | binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", |
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig index 25bcfa0b474f..2585821b24ab 100644 --- a/drivers/dma-buf/Kconfig +++ b/drivers/dma-buf/Kconfig | |||
@@ -17,4 +17,17 @@ config SYNC_FILE | |||
17 | Files fds, to the DRM driver for example. More details at | 17 | Files fds, to the DRM driver for example. More details at |
18 | Documentation/sync_file.txt. | 18 | Documentation/sync_file.txt. |
19 | 19 | ||
20 | config SW_SYNC | ||
21 | bool "Sync File Validation Framework" | ||
22 | default n | ||
23 | depends on SYNC_FILE | ||
24 | depends on DEBUG_FS | ||
25 | ---help--- | ||
26 | A sync object driver that uses a 32bit counter to coordinate | ||
27 | synchronization. Useful when there is no hardware primitive backing | ||
28 | the synchronization. | ||
29 | |||
30 | WARNING: improper use of this can result in deadlocking kernel | ||
31 | drivers from userspace. Intended for test and debug only. | ||
32 | |||
20 | endmenu | 33 | endmenu |
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index f353db213a81..210a10bfad2b 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile | |||
@@ -1,2 +1,3 @@ | |||
1 | obj-y := dma-buf.o fence.o reservation.o seqno-fence.o fence-array.o | 1 | obj-y := dma-buf.o fence.o reservation.o seqno-fence.o fence-array.o |
2 | obj-$(CONFIG_SYNC_FILE) += sync_file.o | 2 | obj-$(CONFIG_SYNC_FILE) += sync_file.o |
3 | obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o | ||
diff --git a/drivers/staging/android/sw_sync.c b/drivers/dma-buf/sw_sync.c index 115c9174705f..62e8e6dc7953 100644 --- a/drivers/staging/android/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/dma-buf/sw_sync.c | 2 | * Sync File validation framework |
3 | * | 3 | * |
4 | * Copyright (C) 2012 Google, Inc. | 4 | * Copyright (C) 2012 Google, Inc. |
5 | * | 5 | * |
@@ -23,8 +23,38 @@ | |||
23 | #include "sync_debug.h" | 23 | #include "sync_debug.h" |
24 | 24 | ||
25 | #define CREATE_TRACE_POINTS | 25 | #define CREATE_TRACE_POINTS |
26 | #include "trace/sync.h" | 26 | #include "sync_trace.h" |
27 | 27 | ||
28 | /* | ||
29 | * SW SYNC validation framework | ||
30 | * | ||
31 | * A sync object driver that uses a 32bit counter to coordinate | ||
32 | * synchronization. Useful when there is no hardware primitive backing | ||
33 | * the synchronization. | ||
34 | * | ||
35 | * To start the framework just open: | ||
36 | * | ||
37 | * <debugfs>/sync/sw_sync | ||
38 | * | ||
39 | * That will create a sync timeline, all fences created under this timeline | ||
40 | * file descriptor will belong to the this timeline. | ||
41 | * | ||
42 | * The 'sw_sync' file can be opened many times as to create different | ||
43 | * timelines. | ||
44 | * | ||
45 | * Fences can be created with SW_SYNC_IOC_CREATE_FENCE ioctl with struct | ||
46 | * sw_sync_ioctl_create_fence as parameter. | ||
47 | * | ||
48 | * To increment the timeline counter, SW_SYNC_IOC_INC ioctl should be used | ||
49 | * with the increment as u32. This will update the last signaled value | ||
50 | * from the timeline and signal any fence that has a seqno smaller or equal | ||
51 | * to it. | ||
52 | * | ||
53 | * struct sw_sync_ioctl_create_fence | ||
54 | * @value: the seqno to initialise the fence with | ||
55 | * @name: the name of the new sync point | ||
56 | * @fence: return the fd of the new sync_file with the created fence | ||
57 | */ | ||
28 | struct sw_sync_create_fence_data { | 58 | struct sw_sync_create_fence_data { |
29 | __u32 value; | 59 | __u32 value; |
30 | char name[32]; | 60 | char name[32]; |
@@ -35,6 +65,7 @@ struct sw_sync_create_fence_data { | |||
35 | 65 | ||
36 | #define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\ | 66 | #define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\ |
37 | struct sw_sync_create_fence_data) | 67 | struct sw_sync_create_fence_data) |
68 | |||
38 | #define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32) | 69 | #define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32) |
39 | 70 | ||
40 | static const struct fence_ops timeline_fence_ops; | 71 | static const struct fence_ops timeline_fence_ops; |
@@ -176,7 +207,7 @@ static void timeline_fence_release(struct fence *fence) | |||
176 | 207 | ||
177 | spin_lock_irqsave(fence->lock, flags); | 208 | spin_lock_irqsave(fence->lock, flags); |
178 | list_del(&pt->child_list); | 209 | list_del(&pt->child_list); |
179 | if (WARN_ON_ONCE(!list_empty(&pt->active_list))) | 210 | if (!list_empty(&pt->active_list)) |
180 | list_del(&pt->active_list); | 211 | list_del(&pt->active_list); |
181 | spin_unlock_irqrestore(fence->lock, flags); | 212 | spin_unlock_irqrestore(fence->lock, flags); |
182 | 213 | ||
diff --git a/drivers/staging/android/sync_debug.c b/drivers/dma-buf/sync_debug.c index 4c5a85595a85..fab95204cf74 100644 --- a/drivers/staging/android/sync_debug.c +++ b/drivers/dma-buf/sync_debug.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/base/sync.c | 2 | * Sync File validation framework and debug information |
3 | * | 3 | * |
4 | * Copyright (C) 2012 Google, Inc. | 4 | * Copyright (C) 2012 Google, Inc. |
5 | * | 5 | * |
diff --git a/drivers/staging/android/sync_debug.h b/drivers/dma-buf/sync_debug.h index fab66396d421..d269aa6783aa 100644 --- a/drivers/staging/android/sync_debug.h +++ b/drivers/dma-buf/sync_debug.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * include/linux/sync.h | 2 | * Sync File validation framework and debug infomation |
3 | * | 3 | * |
4 | * Copyright (C) 2012 Google, Inc. | 4 | * Copyright (C) 2012 Google, Inc. |
5 | * | 5 | * |
diff --git a/drivers/staging/android/trace/sync.h b/drivers/dma-buf/sync_trace.h index 6b5ce9640ddd..d13d59ff1b85 100644 --- a/drivers/staging/android/trace/sync.h +++ b/drivers/dma-buf/sync_trace.h | |||
@@ -1,11 +1,11 @@ | |||
1 | #undef TRACE_SYSTEM | 1 | #undef TRACE_SYSTEM |
2 | #define TRACE_INCLUDE_PATH ../../drivers/staging/android/trace | 2 | #define TRACE_INCLUDE_PATH ../../drivers/dma-buf |
3 | #define TRACE_SYSTEM sync | 3 | #define TRACE_SYSTEM sync_trace |
4 | 4 | ||
5 | #if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ) | 5 | #if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ) |
6 | #define _TRACE_SYNC_H | 6 | #define _TRACE_SYNC_H |
7 | 7 | ||
8 | #include "../sync_debug.h" | 8 | #include "sync_debug.h" |
9 | #include <linux/tracepoint.h> | 9 | #include <linux/tracepoint.h> |
10 | 10 | ||
11 | TRACE_EVENT(sync_timeline, | 11 | TRACE_EVENT(sync_timeline, |
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig index 06e41d24ec62..6c00d6f765c6 100644 --- a/drivers/staging/android/Kconfig +++ b/drivers/staging/android/Kconfig | |||
@@ -24,19 +24,6 @@ config ANDROID_LOW_MEMORY_KILLER | |||
24 | scripts (/init.rc), and it defines priority values with minimum free memory size | 24 | scripts (/init.rc), and it defines priority values with minimum free memory size |
25 | for each priority. | 25 | for each priority. |
26 | 26 | ||
27 | config SW_SYNC | ||
28 | bool "Software synchronization framework" | ||
29 | default n | ||
30 | depends on SYNC_FILE | ||
31 | depends on DEBUG_FS | ||
32 | ---help--- | ||
33 | A sync object driver that uses a 32bit counter to coordinate | ||
34 | synchronization. Useful when there is no hardware primitive backing | ||
35 | the synchronization. | ||
36 | |||
37 | WARNING: improper use of this can result in deadlocking kernel | ||
38 | drivers from userspace. Intended for test and debug only. | ||
39 | |||
40 | source "drivers/staging/android/ion/Kconfig" | 27 | source "drivers/staging/android/ion/Kconfig" |
41 | 28 | ||
42 | endif # if ANDROID | 29 | endif # if ANDROID |
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile index 7ca61b77a8d4..7ed1be798909 100644 --- a/drivers/staging/android/Makefile +++ b/drivers/staging/android/Makefile | |||
@@ -4,4 +4,3 @@ obj-y += ion/ | |||
4 | 4 | ||
5 | obj-$(CONFIG_ASHMEM) += ashmem.o | 5 | obj-$(CONFIG_ASHMEM) += ashmem.o |
6 | obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o | 6 | obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o |
7 | obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o | ||
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index a2cf93b59016..88dd17e8b19a 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c | |||
@@ -205,19 +205,16 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, | |||
205 | goto err2; | 205 | goto err2; |
206 | } | 206 | } |
207 | 207 | ||
208 | buffer->dev = dev; | 208 | if (buffer->sg_table == NULL) { |
209 | buffer->size = len; | 209 | WARN_ONCE(1, "This heap needs to set the sgtable"); |
210 | |||
211 | table = heap->ops->map_dma(heap, buffer); | ||
212 | if (WARN_ONCE(table == NULL, | ||
213 | "heap->ops->map_dma should return ERR_PTR on error")) | ||
214 | table = ERR_PTR(-EINVAL); | ||
215 | if (IS_ERR(table)) { | ||
216 | ret = -EINVAL; | 210 | ret = -EINVAL; |
217 | goto err1; | 211 | goto err1; |
218 | } | 212 | } |
219 | 213 | ||
220 | buffer->sg_table = table; | 214 | table = buffer->sg_table; |
215 | buffer->dev = dev; | ||
216 | buffer->size = len; | ||
217 | |||
221 | if (ion_buffer_fault_user_mappings(buffer)) { | 218 | if (ion_buffer_fault_user_mappings(buffer)) { |
222 | int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; | 219 | int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; |
223 | struct scatterlist *sg; | 220 | struct scatterlist *sg; |
@@ -226,7 +223,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, | |||
226 | buffer->pages = vmalloc(sizeof(struct page *) * num_pages); | 223 | buffer->pages = vmalloc(sizeof(struct page *) * num_pages); |
227 | if (!buffer->pages) { | 224 | if (!buffer->pages) { |
228 | ret = -ENOMEM; | 225 | ret = -ENOMEM; |
229 | goto err; | 226 | goto err1; |
230 | } | 227 | } |
231 | 228 | ||
232 | for_each_sg(table->sgl, sg, table->nents, i) { | 229 | for_each_sg(table->sgl, sg, table->nents, i) { |
@@ -260,8 +257,6 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, | |||
260 | mutex_unlock(&dev->buffer_lock); | 257 | mutex_unlock(&dev->buffer_lock); |
261 | return buffer; | 258 | return buffer; |
262 | 259 | ||
263 | err: | ||
264 | heap->ops->unmap_dma(heap, buffer); | ||
265 | err1: | 260 | err1: |
266 | heap->ops->free(buffer); | 261 | heap->ops->free(buffer); |
267 | err2: | 262 | err2: |
@@ -273,7 +268,6 @@ void ion_buffer_destroy(struct ion_buffer *buffer) | |||
273 | { | 268 | { |
274 | if (WARN_ON(buffer->kmap_cnt > 0)) | 269 | if (WARN_ON(buffer->kmap_cnt > 0)) |
275 | buffer->heap->ops->unmap_kernel(buffer->heap, buffer); | 270 | buffer->heap->ops->unmap_kernel(buffer->heap, buffer); |
276 | buffer->heap->ops->unmap_dma(buffer->heap, buffer); | ||
277 | buffer->heap->ops->free(buffer); | 271 | buffer->heap->ops->free(buffer); |
278 | vfree(buffer->pages); | 272 | vfree(buffer->pages); |
279 | kfree(buffer); | 273 | kfree(buffer); |
@@ -551,7 +545,8 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len, | |||
551 | } | 545 | } |
552 | EXPORT_SYMBOL(ion_alloc); | 546 | EXPORT_SYMBOL(ion_alloc); |
553 | 547 | ||
554 | static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle) | 548 | static void ion_free_nolock(struct ion_client *client, |
549 | struct ion_handle *handle) | ||
555 | { | 550 | { |
556 | bool valid_handle; | 551 | bool valid_handle; |
557 | 552 | ||
@@ -576,32 +571,6 @@ void ion_free(struct ion_client *client, struct ion_handle *handle) | |||
576 | } | 571 | } |
577 | EXPORT_SYMBOL(ion_free); | 572 | EXPORT_SYMBOL(ion_free); |
578 | 573 | ||
579 | int ion_phys(struct ion_client *client, struct ion_handle *handle, | ||
580 | ion_phys_addr_t *addr, size_t *len) | ||
581 | { | ||
582 | struct ion_buffer *buffer; | ||
583 | int ret; | ||
584 | |||
585 | mutex_lock(&client->lock); | ||
586 | if (!ion_handle_validate(client, handle)) { | ||
587 | mutex_unlock(&client->lock); | ||
588 | return -EINVAL; | ||
589 | } | ||
590 | |||
591 | buffer = handle->buffer; | ||
592 | |||
593 | if (!buffer->heap->ops->phys) { | ||
594 | pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n", | ||
595 | __func__, buffer->heap->name, buffer->heap->type); | ||
596 | mutex_unlock(&client->lock); | ||
597 | return -ENODEV; | ||
598 | } | ||
599 | mutex_unlock(&client->lock); | ||
600 | ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); | ||
601 | return ret; | ||
602 | } | ||
603 | EXPORT_SYMBOL(ion_phys); | ||
604 | |||
605 | static void *ion_buffer_kmap_get(struct ion_buffer *buffer) | 574 | static void *ion_buffer_kmap_get(struct ion_buffer *buffer) |
606 | { | 575 | { |
607 | void *vaddr; | 576 | void *vaddr; |
@@ -917,26 +886,6 @@ void ion_client_destroy(struct ion_client *client) | |||
917 | } | 886 | } |
918 | EXPORT_SYMBOL(ion_client_destroy); | 887 | EXPORT_SYMBOL(ion_client_destroy); |
919 | 888 | ||
920 | struct sg_table *ion_sg_table(struct ion_client *client, | ||
921 | struct ion_handle *handle) | ||
922 | { | ||
923 | struct ion_buffer *buffer; | ||
924 | struct sg_table *table; | ||
925 | |||
926 | mutex_lock(&client->lock); | ||
927 | if (!ion_handle_validate(client, handle)) { | ||
928 | pr_err("%s: invalid handle passed to map_dma.\n", | ||
929 | __func__); | ||
930 | mutex_unlock(&client->lock); | ||
931 | return ERR_PTR(-EINVAL); | ||
932 | } | ||
933 | buffer = handle->buffer; | ||
934 | table = buffer->sg_table; | ||
935 | mutex_unlock(&client->lock); | ||
936 | return table; | ||
937 | } | ||
938 | EXPORT_SYMBOL(ion_sg_table); | ||
939 | |||
940 | static void ion_buffer_sync_for_device(struct ion_buffer *buffer, | 889 | static void ion_buffer_sync_for_device(struct ion_buffer *buffer, |
941 | struct device *dev, | 890 | struct device *dev, |
942 | enum dma_data_direction direction); | 891 | enum dma_data_direction direction); |
@@ -1358,7 +1307,8 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
1358 | struct ion_handle *handle; | 1307 | struct ion_handle *handle; |
1359 | 1308 | ||
1360 | mutex_lock(&client->lock); | 1309 | mutex_lock(&client->lock); |
1361 | handle = ion_handle_get_by_id_nolock(client, data.handle.handle); | 1310 | handle = ion_handle_get_by_id_nolock(client, |
1311 | data.handle.handle); | ||
1362 | if (IS_ERR(handle)) { | 1312 | if (IS_ERR(handle)) { |
1363 | mutex_unlock(&client->lock); | 1313 | mutex_unlock(&client->lock); |
1364 | return PTR_ERR(handle); | 1314 | return PTR_ERR(handle); |
@@ -1588,8 +1538,7 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) | |||
1588 | { | 1538 | { |
1589 | struct dentry *debug_file; | 1539 | struct dentry *debug_file; |
1590 | 1540 | ||
1591 | if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma || | 1541 | if (!heap->ops->allocate || !heap->ops->free) |
1592 | !heap->ops->unmap_dma) | ||
1593 | pr_err("%s: can not add heap with invalid ops struct.\n", | 1542 | pr_err("%s: can not add heap with invalid ops struct.\n", |
1594 | __func__); | 1543 | __func__); |
1595 | 1544 | ||
@@ -1703,37 +1652,3 @@ void ion_device_destroy(struct ion_device *dev) | |||
1703 | } | 1652 | } |
1704 | EXPORT_SYMBOL(ion_device_destroy); | 1653 | EXPORT_SYMBOL(ion_device_destroy); |
1705 | 1654 | ||
1706 | void __init ion_reserve(struct ion_platform_data *data) | ||
1707 | { | ||
1708 | int i; | ||
1709 | |||
1710 | for (i = 0; i < data->nr; i++) { | ||
1711 | if (data->heaps[i].size == 0) | ||
1712 | continue; | ||
1713 | |||
1714 | if (data->heaps[i].base == 0) { | ||
1715 | phys_addr_t paddr; | ||
1716 | |||
1717 | paddr = memblock_alloc_base(data->heaps[i].size, | ||
1718 | data->heaps[i].align, | ||
1719 | MEMBLOCK_ALLOC_ANYWHERE); | ||
1720 | if (!paddr) { | ||
1721 | pr_err("%s: error allocating memblock for heap %d\n", | ||
1722 | __func__, i); | ||
1723 | continue; | ||
1724 | } | ||
1725 | data->heaps[i].base = paddr; | ||
1726 | } else { | ||
1727 | int ret = memblock_reserve(data->heaps[i].base, | ||
1728 | data->heaps[i].size); | ||
1729 | if (ret) | ||
1730 | pr_err("memblock reserve of %zx@%lx failed\n", | ||
1731 | data->heaps[i].size, | ||
1732 | data->heaps[i].base); | ||
1733 | } | ||
1734 | pr_info("%s: %s reserved base %lx size %zu\n", __func__, | ||
1735 | data->heaps[i].name, | ||
1736 | data->heaps[i].base, | ||
1737 | data->heaps[i].size); | ||
1738 | } | ||
1739 | } | ||
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h index a1331fc169a1..93dafb4586e4 100644 --- a/drivers/staging/android/ion/ion.h +++ b/drivers/staging/android/ion/ion.h | |||
@@ -73,17 +73,6 @@ struct ion_platform_data { | |||
73 | }; | 73 | }; |
74 | 74 | ||
75 | /** | 75 | /** |
76 | * ion_reserve() - reserve memory for ion heaps if applicable | ||
77 | * @data: platform data specifying starting physical address and | ||
78 | * size | ||
79 | * | ||
80 | * Calls memblock reserve to set aside memory for heaps that are | ||
81 | * located at specific memory addresses or of specific sizes not | ||
82 | * managed by the kernel | ||
83 | */ | ||
84 | void ion_reserve(struct ion_platform_data *data); | ||
85 | |||
86 | /** | ||
87 | * ion_client_create() - allocate a client and returns it | 76 | * ion_client_create() - allocate a client and returns it |
88 | * @dev: the global ion device | 77 | * @dev: the global ion device |
89 | * @name: used for debugging | 78 | * @name: used for debugging |
@@ -130,36 +119,6 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len, | |||
130 | void ion_free(struct ion_client *client, struct ion_handle *handle); | 119 | void ion_free(struct ion_client *client, struct ion_handle *handle); |
131 | 120 | ||
132 | /** | 121 | /** |
133 | * ion_phys - returns the physical address and len of a handle | ||
134 | * @client: the client | ||
135 | * @handle: the handle | ||
136 | * @addr: a pointer to put the address in | ||
137 | * @len: a pointer to put the length in | ||
138 | * | ||
139 | * This function queries the heap for a particular handle to get the | ||
140 | * handle's physical address. It't output is only correct if | ||
141 | * a heap returns physically contiguous memory -- in other cases | ||
142 | * this api should not be implemented -- ion_sg_table should be used | ||
143 | * instead. Returns -EINVAL if the handle is invalid. This has | ||
144 | * no implications on the reference counting of the handle -- | ||
145 | * the returned value may not be valid if the caller is not | ||
146 | * holding a reference. | ||
147 | */ | ||
148 | int ion_phys(struct ion_client *client, struct ion_handle *handle, | ||
149 | ion_phys_addr_t *addr, size_t *len); | ||
150 | |||
151 | /** | ||
152 | * ion_map_dma - return an sg_table describing a handle | ||
153 | * @client: the client | ||
154 | * @handle: the handle | ||
155 | * | ||
156 | * This function returns the sg_table describing | ||
157 | * a particular ion handle. | ||
158 | */ | ||
159 | struct sg_table *ion_sg_table(struct ion_client *client, | ||
160 | struct ion_handle *handle); | ||
161 | |||
162 | /** | ||
163 | * ion_map_kernel - create mapping for the given handle | 122 | * ion_map_kernel - create mapping for the given handle |
164 | * @client: the client | 123 | * @client: the client |
165 | * @handle: handle to map | 124 | * @handle: handle to map |
diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c index 1fb0d81556da..c4f0795fb62e 100644 --- a/drivers/staging/android/ion/ion_carveout_heap.c +++ b/drivers/staging/android/ion/ion_carveout_heap.c | |||
@@ -25,6 +25,8 @@ | |||
25 | #include "ion.h" | 25 | #include "ion.h" |
26 | #include "ion_priv.h" | 26 | #include "ion_priv.h" |
27 | 27 | ||
28 | #define ION_CARVEOUT_ALLOCATE_FAIL -1 | ||
29 | |||
28 | struct ion_carveout_heap { | 30 | struct ion_carveout_heap { |
29 | struct ion_heap heap; | 31 | struct ion_heap heap; |
30 | struct gen_pool *pool; | 32 | struct gen_pool *pool; |
@@ -56,19 +58,6 @@ void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, | |||
56 | gen_pool_free(carveout_heap->pool, addr, size); | 58 | gen_pool_free(carveout_heap->pool, addr, size); |
57 | } | 59 | } |
58 | 60 | ||
59 | static int ion_carveout_heap_phys(struct ion_heap *heap, | ||
60 | struct ion_buffer *buffer, | ||
61 | ion_phys_addr_t *addr, size_t *len) | ||
62 | { | ||
63 | struct sg_table *table = buffer->priv_virt; | ||
64 | struct page *page = sg_page(table->sgl); | ||
65 | ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page)); | ||
66 | |||
67 | *addr = paddr; | ||
68 | *len = buffer->size; | ||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | static int ion_carveout_heap_allocate(struct ion_heap *heap, | 61 | static int ion_carveout_heap_allocate(struct ion_heap *heap, |
73 | struct ion_buffer *buffer, | 62 | struct ion_buffer *buffer, |
74 | unsigned long size, unsigned long align, | 63 | unsigned long size, unsigned long align, |
@@ -95,7 +84,7 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap, | |||
95 | } | 84 | } |
96 | 85 | ||
97 | sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0); | 86 | sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0); |
98 | buffer->priv_virt = table; | 87 | buffer->sg_table = table; |
99 | 88 | ||
100 | return 0; | 89 | return 0; |
101 | 90 | ||
@@ -109,7 +98,7 @@ err_free: | |||
109 | static void ion_carveout_heap_free(struct ion_buffer *buffer) | 98 | static void ion_carveout_heap_free(struct ion_buffer *buffer) |
110 | { | 99 | { |
111 | struct ion_heap *heap = buffer->heap; | 100 | struct ion_heap *heap = buffer->heap; |
112 | struct sg_table *table = buffer->priv_virt; | 101 | struct sg_table *table = buffer->sg_table; |
113 | struct page *page = sg_page(table->sgl); | 102 | struct page *page = sg_page(table->sgl); |
114 | ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page)); | 103 | ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page)); |
115 | 104 | ||
@@ -124,23 +113,9 @@ static void ion_carveout_heap_free(struct ion_buffer *buffer) | |||
124 | kfree(table); | 113 | kfree(table); |
125 | } | 114 | } |
126 | 115 | ||
127 | static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap, | ||
128 | struct ion_buffer *buffer) | ||
129 | { | ||
130 | return buffer->priv_virt; | ||
131 | } | ||
132 | |||
133 | static void ion_carveout_heap_unmap_dma(struct ion_heap *heap, | ||
134 | struct ion_buffer *buffer) | ||
135 | { | ||
136 | } | ||
137 | |||
138 | static struct ion_heap_ops carveout_heap_ops = { | 116 | static struct ion_heap_ops carveout_heap_ops = { |
139 | .allocate = ion_carveout_heap_allocate, | 117 | .allocate = ion_carveout_heap_allocate, |
140 | .free = ion_carveout_heap_free, | 118 | .free = ion_carveout_heap_free, |
141 | .phys = ion_carveout_heap_phys, | ||
142 | .map_dma = ion_carveout_heap_map_dma, | ||
143 | .unmap_dma = ion_carveout_heap_unmap_dma, | ||
144 | .map_user = ion_heap_map_user, | 119 | .map_user = ion_heap_map_user, |
145 | .map_kernel = ion_heap_map_kernel, | 120 | .map_kernel = ion_heap_map_kernel, |
146 | .unmap_kernel = ion_heap_unmap_kernel, | 121 | .unmap_kernel = ion_heap_unmap_kernel, |
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c index e0553fee9b8a..560cf907aed5 100644 --- a/drivers/staging/android/ion/ion_chunk_heap.c +++ b/drivers/staging/android/ion/ion_chunk_heap.c | |||
@@ -75,7 +75,7 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap, | |||
75 | sg = sg_next(sg); | 75 | sg = sg_next(sg); |
76 | } | 76 | } |
77 | 77 | ||
78 | buffer->priv_virt = table; | 78 | buffer->sg_table = table; |
79 | chunk_heap->allocated += allocated_size; | 79 | chunk_heap->allocated += allocated_size; |
80 | return 0; | 80 | return 0; |
81 | err: | 81 | err: |
@@ -95,7 +95,7 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer) | |||
95 | struct ion_heap *heap = buffer->heap; | 95 | struct ion_heap *heap = buffer->heap; |
96 | struct ion_chunk_heap *chunk_heap = | 96 | struct ion_chunk_heap *chunk_heap = |
97 | container_of(heap, struct ion_chunk_heap, heap); | 97 | container_of(heap, struct ion_chunk_heap, heap); |
98 | struct sg_table *table = buffer->priv_virt; | 98 | struct sg_table *table = buffer->sg_table; |
99 | struct scatterlist *sg; | 99 | struct scatterlist *sg; |
100 | int i; | 100 | int i; |
101 | unsigned long allocated_size; | 101 | unsigned long allocated_size; |
@@ -117,22 +117,9 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer) | |||
117 | kfree(table); | 117 | kfree(table); |
118 | } | 118 | } |
119 | 119 | ||
120 | static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap, | ||
121 | struct ion_buffer *buffer) | ||
122 | { | ||
123 | return buffer->priv_virt; | ||
124 | } | ||
125 | |||
126 | static void ion_chunk_heap_unmap_dma(struct ion_heap *heap, | ||
127 | struct ion_buffer *buffer) | ||
128 | { | ||
129 | } | ||
130 | |||
131 | static struct ion_heap_ops chunk_heap_ops = { | 120 | static struct ion_heap_ops chunk_heap_ops = { |
132 | .allocate = ion_chunk_heap_allocate, | 121 | .allocate = ion_chunk_heap_allocate, |
133 | .free = ion_chunk_heap_free, | 122 | .free = ion_chunk_heap_free, |
134 | .map_dma = ion_chunk_heap_map_dma, | ||
135 | .unmap_dma = ion_chunk_heap_unmap_dma, | ||
136 | .map_user = ion_heap_map_user, | 123 | .map_user = ion_heap_map_user, |
137 | .map_kernel = ion_heap_map_kernel, | 124 | .map_kernel = ion_heap_map_kernel, |
138 | .unmap_kernel = ion_heap_unmap_kernel, | 125 | .unmap_kernel = ion_heap_unmap_kernel, |
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c index a3446da4fdc2..d5ff3a282dfd 100644 --- a/drivers/staging/android/ion/ion_cma_heap.c +++ b/drivers/staging/android/ion/ion_cma_heap.c | |||
@@ -78,6 +78,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, | |||
78 | goto free_table; | 78 | goto free_table; |
79 | /* keep this for memory release */ | 79 | /* keep this for memory release */ |
80 | buffer->priv_virt = info; | 80 | buffer->priv_virt = info; |
81 | buffer->sg_table = info->table; | ||
81 | dev_dbg(dev, "Allocate buffer %p\n", buffer); | 82 | dev_dbg(dev, "Allocate buffer %p\n", buffer); |
82 | return 0; | 83 | return 0; |
83 | 84 | ||
@@ -105,36 +106,6 @@ static void ion_cma_free(struct ion_buffer *buffer) | |||
105 | kfree(info); | 106 | kfree(info); |
106 | } | 107 | } |
107 | 108 | ||
108 | /* return physical address in addr */ | ||
109 | static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer, | ||
110 | ion_phys_addr_t *addr, size_t *len) | ||
111 | { | ||
112 | struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); | ||
113 | struct device *dev = cma_heap->dev; | ||
114 | struct ion_cma_buffer_info *info = buffer->priv_virt; | ||
115 | |||
116 | dev_dbg(dev, "Return buffer %p physical address %pa\n", buffer, | ||
117 | &info->handle); | ||
118 | |||
119 | *addr = info->handle; | ||
120 | *len = buffer->size; | ||
121 | |||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap, | ||
126 | struct ion_buffer *buffer) | ||
127 | { | ||
128 | struct ion_cma_buffer_info *info = buffer->priv_virt; | ||
129 | |||
130 | return info->table; | ||
131 | } | ||
132 | |||
133 | static void ion_cma_heap_unmap_dma(struct ion_heap *heap, | ||
134 | struct ion_buffer *buffer) | ||
135 | { | ||
136 | } | ||
137 | |||
138 | static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer, | 109 | static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer, |
139 | struct vm_area_struct *vma) | 110 | struct vm_area_struct *vma) |
140 | { | 111 | { |
@@ -162,9 +133,6 @@ static void ion_cma_unmap_kernel(struct ion_heap *heap, | |||
162 | static struct ion_heap_ops ion_cma_ops = { | 133 | static struct ion_heap_ops ion_cma_ops = { |
163 | .allocate = ion_cma_allocate, | 134 | .allocate = ion_cma_allocate, |
164 | .free = ion_cma_free, | 135 | .free = ion_cma_free, |
165 | .map_dma = ion_cma_heap_map_dma, | ||
166 | .unmap_dma = ion_cma_heap_unmap_dma, | ||
167 | .phys = ion_cma_phys, | ||
168 | .map_user = ion_cma_mmap, | 136 | .map_user = ion_cma_mmap, |
169 | .map_kernel = ion_cma_map_kernel, | 137 | .map_kernel = ion_cma_map_kernel, |
170 | .unmap_kernel = ion_cma_unmap_kernel, | 138 | .unmap_kernel = ion_cma_unmap_kernel, |
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h index 0239883bffb7..25e4bb2dac87 100644 --- a/drivers/staging/android/ion/ion_priv.h +++ b/drivers/staging/android/ion/ion_priv.h | |||
@@ -42,8 +42,6 @@ struct ion_buffer *ion_handle_buffer(struct ion_handle *handle); | |||
42 | * @size: size of the buffer | 42 | * @size: size of the buffer |
43 | * @priv_virt: private data to the buffer representable as | 43 | * @priv_virt: private data to the buffer representable as |
44 | * a void * | 44 | * a void * |
45 | * @priv_phys: private data to the buffer representable as | ||
46 | * an ion_phys_addr_t (and someday a phys_addr_t) | ||
47 | * @lock: protects the buffers cnt fields | 45 | * @lock: protects the buffers cnt fields |
48 | * @kmap_cnt: number of times the buffer is mapped to the kernel | 46 | * @kmap_cnt: number of times the buffer is mapped to the kernel |
49 | * @vaddr: the kernel mapping if kmap_cnt is not zero | 47 | * @vaddr: the kernel mapping if kmap_cnt is not zero |
@@ -69,10 +67,7 @@ struct ion_buffer { | |||
69 | unsigned long flags; | 67 | unsigned long flags; |
70 | unsigned long private_flags; | 68 | unsigned long private_flags; |
71 | size_t size; | 69 | size_t size; |
72 | union { | 70 | void *priv_virt; |
73 | void *priv_virt; | ||
74 | ion_phys_addr_t priv_phys; | ||
75 | }; | ||
76 | struct mutex lock; | 71 | struct mutex lock; |
77 | int kmap_cnt; | 72 | int kmap_cnt; |
78 | void *vaddr; | 73 | void *vaddr; |
@@ -91,10 +86,6 @@ void ion_buffer_destroy(struct ion_buffer *buffer); | |||
91 | * struct ion_heap_ops - ops to operate on a given heap | 86 | * struct ion_heap_ops - ops to operate on a given heap |
92 | * @allocate: allocate memory | 87 | * @allocate: allocate memory |
93 | * @free: free memory | 88 | * @free: free memory |
94 | * @phys get physical address of a buffer (only define on | ||
95 | * physically contiguous heaps) | ||
96 | * @map_dma map the memory for dma to a scatterlist | ||
97 | * @unmap_dma unmap the memory for dma | ||
98 | * @map_kernel map memory to the kernel | 89 | * @map_kernel map memory to the kernel |
99 | * @unmap_kernel unmap memory to the kernel | 90 | * @unmap_kernel unmap memory to the kernel |
100 | * @map_user map memory to userspace | 91 | * @map_user map memory to userspace |
@@ -111,11 +102,6 @@ struct ion_heap_ops { | |||
111 | struct ion_buffer *buffer, unsigned long len, | 102 | struct ion_buffer *buffer, unsigned long len, |
112 | unsigned long align, unsigned long flags); | 103 | unsigned long align, unsigned long flags); |
113 | void (*free)(struct ion_buffer *buffer); | 104 | void (*free)(struct ion_buffer *buffer); |
114 | int (*phys)(struct ion_heap *heap, struct ion_buffer *buffer, | ||
115 | ion_phys_addr_t *addr, size_t *len); | ||
116 | struct sg_table * (*map_dma)(struct ion_heap *heap, | ||
117 | struct ion_buffer *buffer); | ||
118 | void (*unmap_dma)(struct ion_heap *heap, struct ion_buffer *buffer); | ||
119 | void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer); | 105 | void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer); |
120 | void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer); | 106 | void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer); |
121 | int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer, | 107 | int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer, |
@@ -328,20 +314,6 @@ struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *); | |||
328 | void ion_cma_heap_destroy(struct ion_heap *); | 314 | void ion_cma_heap_destroy(struct ion_heap *); |
329 | 315 | ||
330 | /** | 316 | /** |
331 | * kernel api to allocate/free from carveout -- used when carveout is | ||
332 | * used to back an architecture specific custom heap | ||
333 | */ | ||
334 | ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size, | ||
335 | unsigned long align); | ||
336 | void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, | ||
337 | unsigned long size); | ||
338 | /** | ||
339 | * The carveout heap returns physical addresses, since 0 may be a valid | ||
340 | * physical address, this is used to indicate allocation failed | ||
341 | */ | ||
342 | #define ION_CARVEOUT_ALLOCATE_FAIL -1 | ||
343 | |||
344 | /** | ||
345 | * functions for creating and destroying a heap pool -- allows you | 317 | * functions for creating and destroying a heap pool -- allows you |
346 | * to keep a pool of pre allocated memory to use from your heap. Keeping | 318 | * to keep a pool of pre allocated memory to use from your heap. Keeping |
347 | * a pool of memory that is ready for dma, ie any cached mapping have been | 319 | * a pool of memory that is ready for dma, ie any cached mapping have been |
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c index b69dfc706440..b697c6da0c3a 100644 --- a/drivers/staging/android/ion/ion_system_heap.c +++ b/drivers/staging/android/ion/ion_system_heap.c | |||
@@ -164,7 +164,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap, | |||
164 | list_del(&page->lru); | 164 | list_del(&page->lru); |
165 | } | 165 | } |
166 | 166 | ||
167 | buffer->priv_virt = table; | 167 | buffer->sg_table = table; |
168 | return 0; | 168 | return 0; |
169 | 169 | ||
170 | free_table: | 170 | free_table: |
@@ -199,17 +199,6 @@ static void ion_system_heap_free(struct ion_buffer *buffer) | |||
199 | kfree(table); | 199 | kfree(table); |
200 | } | 200 | } |
201 | 201 | ||
202 | static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap, | ||
203 | struct ion_buffer *buffer) | ||
204 | { | ||
205 | return buffer->priv_virt; | ||
206 | } | ||
207 | |||
208 | static void ion_system_heap_unmap_dma(struct ion_heap *heap, | ||
209 | struct ion_buffer *buffer) | ||
210 | { | ||
211 | } | ||
212 | |||
213 | static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask, | 202 | static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask, |
214 | int nr_to_scan) | 203 | int nr_to_scan) |
215 | { | 204 | { |
@@ -243,8 +232,6 @@ static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask, | |||
243 | static struct ion_heap_ops system_heap_ops = { | 232 | static struct ion_heap_ops system_heap_ops = { |
244 | .allocate = ion_system_heap_allocate, | 233 | .allocate = ion_system_heap_allocate, |
245 | .free = ion_system_heap_free, | 234 | .free = ion_system_heap_free, |
246 | .map_dma = ion_system_heap_map_dma, | ||
247 | .unmap_dma = ion_system_heap_unmap_dma, | ||
248 | .map_kernel = ion_heap_map_kernel, | 235 | .map_kernel = ion_heap_map_kernel, |
249 | .unmap_kernel = ion_heap_unmap_kernel, | 236 | .unmap_kernel = ion_heap_unmap_kernel, |
250 | .map_user = ion_heap_map_user, | 237 | .map_user = ion_heap_map_user, |
@@ -358,7 +345,7 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap, | |||
358 | 345 | ||
359 | sg_set_page(table->sgl, page, len, 0); | 346 | sg_set_page(table->sgl, page, len, 0); |
360 | 347 | ||
361 | buffer->priv_virt = table; | 348 | buffer->sg_table = table; |
362 | 349 | ||
363 | ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL); | 350 | ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL); |
364 | 351 | ||
@@ -375,7 +362,7 @@ free_pages: | |||
375 | 362 | ||
376 | static void ion_system_contig_heap_free(struct ion_buffer *buffer) | 363 | static void ion_system_contig_heap_free(struct ion_buffer *buffer) |
377 | { | 364 | { |
378 | struct sg_table *table = buffer->priv_virt; | 365 | struct sg_table *table = buffer->sg_table; |
379 | struct page *page = sg_page(table->sgl); | 366 | struct page *page = sg_page(table->sgl); |
380 | unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT; | 367 | unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT; |
381 | unsigned long i; | 368 | unsigned long i; |
@@ -386,34 +373,9 @@ static void ion_system_contig_heap_free(struct ion_buffer *buffer) | |||
386 | kfree(table); | 373 | kfree(table); |
387 | } | 374 | } |
388 | 375 | ||
389 | static int ion_system_contig_heap_phys(struct ion_heap *heap, | ||
390 | struct ion_buffer *buffer, | ||
391 | ion_phys_addr_t *addr, size_t *len) | ||
392 | { | ||
393 | struct sg_table *table = buffer->priv_virt; | ||
394 | struct page *page = sg_page(table->sgl); | ||
395 | *addr = page_to_phys(page); | ||
396 | *len = buffer->size; | ||
397 | return 0; | ||
398 | } | ||
399 | |||
400 | static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap, | ||
401 | struct ion_buffer *buffer) | ||
402 | { | ||
403 | return buffer->priv_virt; | ||
404 | } | ||
405 | |||
406 | static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap, | ||
407 | struct ion_buffer *buffer) | ||
408 | { | ||
409 | } | ||
410 | |||
411 | static struct ion_heap_ops kmalloc_ops = { | 376 | static struct ion_heap_ops kmalloc_ops = { |
412 | .allocate = ion_system_contig_heap_allocate, | 377 | .allocate = ion_system_contig_heap_allocate, |
413 | .free = ion_system_contig_heap_free, | 378 | .free = ion_system_contig_heap_free, |
414 | .phys = ion_system_contig_heap_phys, | ||
415 | .map_dma = ion_system_contig_heap_map_dma, | ||
416 | .unmap_dma = ion_system_contig_heap_unmap_dma, | ||
417 | .map_kernel = ion_heap_map_kernel, | 379 | .map_kernel = ion_heap_map_kernel, |
418 | .unmap_kernel = ion_heap_unmap_kernel, | 380 | .unmap_kernel = ion_heap_unmap_kernel, |
419 | .map_user = ion_heap_map_user, | 381 | .map_user = ion_heap_map_user, |
diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c index 10f94ec34536..608403c7586b 100644 --- a/drivers/staging/comedi/drivers/usbduxfast.c +++ b/drivers/staging/comedi/drivers/usbduxfast.c | |||
@@ -946,10 +946,8 @@ static int usbduxfast_auto_attach(struct comedi_device *dev, | |||
946 | } | 946 | } |
947 | 947 | ||
948 | devpriv->urb = usb_alloc_urb(0, GFP_KERNEL); | 948 | devpriv->urb = usb_alloc_urb(0, GFP_KERNEL); |
949 | if (!devpriv->urb) { | 949 | if (!devpriv->urb) |
950 | dev_err(dev->class_dev, "Could not alloc. urb\n"); | ||
951 | return -ENOMEM; | 950 | return -ENOMEM; |
952 | } | ||
953 | 951 | ||
954 | devpriv->inbuf = kmalloc(SIZEINBUF, GFP_KERNEL); | 952 | devpriv->inbuf = kmalloc(SIZEINBUF, GFP_KERNEL); |
955 | if (!devpriv->inbuf) | 953 | if (!devpriv->inbuf) |
diff --git a/drivers/staging/ks7010/ks7010_sdio.c b/drivers/staging/ks7010/ks7010_sdio.c index b7337fd813d5..47b69cbdb45b 100644 --- a/drivers/staging/ks7010/ks7010_sdio.c +++ b/drivers/staging/ks7010/ks7010_sdio.c | |||
@@ -297,11 +297,10 @@ static int enqueue_txdev(struct ks_wlan_private *priv, unsigned char *p, | |||
297 | static int write_to_device(struct ks_wlan_private *priv, unsigned char *buffer, | 297 | static int write_to_device(struct ks_wlan_private *priv, unsigned char *buffer, |
298 | unsigned long size) | 298 | unsigned long size) |
299 | { | 299 | { |
300 | int rc, retval; | 300 | int retval; |
301 | unsigned char rw_data; | 301 | unsigned char rw_data; |
302 | struct hostif_hdr *hdr; | 302 | struct hostif_hdr *hdr; |
303 | hdr = (struct hostif_hdr *)buffer; | 303 | hdr = (struct hostif_hdr *)buffer; |
304 | rc = 0; | ||
305 | 304 | ||
306 | DPRINTK(4, "size=%d\n", hdr->size); | 305 | DPRINTK(4, "size=%d\n", hdr->size); |
307 | if (hdr->event < HIF_DATA_REQ || HIF_REQ_MAX < hdr->event) { | 306 | if (hdr->event < HIF_DATA_REQ || HIF_REQ_MAX < hdr->event) { |
@@ -711,7 +710,6 @@ static int ks7010_sdio_update_index(struct ks_wlan_private *priv, u32 index) | |||
711 | int rc = 0; | 710 | int rc = 0; |
712 | int retval; | 711 | int retval; |
713 | unsigned char *data_buf; | 712 | unsigned char *data_buf; |
714 | data_buf = NULL; | ||
715 | 713 | ||
716 | data_buf = kmalloc(sizeof(u32), GFP_KERNEL); | 714 | data_buf = kmalloc(sizeof(u32), GFP_KERNEL); |
717 | if (!data_buf) { | 715 | if (!data_buf) { |
@@ -732,8 +730,7 @@ static int ks7010_sdio_update_index(struct ks_wlan_private *priv, u32 index) | |||
732 | goto error_out; | 730 | goto error_out; |
733 | } | 731 | } |
734 | error_out: | 732 | error_out: |
735 | if (data_buf) | 733 | kfree(data_buf); |
736 | kfree(data_buf); | ||
737 | return rc; | 734 | return rc; |
738 | } | 735 | } |
739 | 736 | ||
@@ -744,7 +741,7 @@ static int ks7010_sdio_data_compare(struct ks_wlan_private *priv, u32 address, | |||
744 | int rc = 0; | 741 | int rc = 0; |
745 | int retval; | 742 | int retval; |
746 | unsigned char *read_buf; | 743 | unsigned char *read_buf; |
747 | read_buf = NULL; | 744 | |
748 | read_buf = kmalloc(ROM_BUFF_SIZE, GFP_KERNEL); | 745 | read_buf = kmalloc(ROM_BUFF_SIZE, GFP_KERNEL); |
749 | if (!read_buf) { | 746 | if (!read_buf) { |
750 | rc = 1; | 747 | rc = 1; |
@@ -763,8 +760,7 @@ static int ks7010_sdio_data_compare(struct ks_wlan_private *priv, u32 address, | |||
763 | goto error_out; | 760 | goto error_out; |
764 | } | 761 | } |
765 | error_out: | 762 | error_out: |
766 | if (read_buf) | 763 | kfree(read_buf); |
767 | kfree(read_buf); | ||
768 | return rc; | 764 | return rc; |
769 | } | 765 | } |
770 | 766 | ||
@@ -778,8 +774,6 @@ static int ks7010_upload_firmware(struct ks_wlan_private *priv, | |||
778 | int length; | 774 | int length; |
779 | const struct firmware *fw_entry = NULL; | 775 | const struct firmware *fw_entry = NULL; |
780 | 776 | ||
781 | rom_buf = NULL; | ||
782 | |||
783 | /* buffer allocate */ | 777 | /* buffer allocate */ |
784 | rom_buf = kmalloc(ROM_BUFF_SIZE, GFP_KERNEL); | 778 | rom_buf = kmalloc(ROM_BUFF_SIZE, GFP_KERNEL); |
785 | if (!rom_buf) { | 779 | if (!rom_buf) { |
@@ -879,8 +873,7 @@ static int ks7010_upload_firmware(struct ks_wlan_private *priv, | |||
879 | release_firmware(fw_entry); | 873 | release_firmware(fw_entry); |
880 | error_out0: | 874 | error_out0: |
881 | sdio_release_host(card->func); | 875 | sdio_release_host(card->func); |
882 | if (rom_buf) | 876 | kfree(rom_buf); |
883 | kfree(rom_buf); | ||
884 | return rc; | 877 | return rc; |
885 | } | 878 | } |
886 | 879 | ||
@@ -1141,7 +1134,6 @@ static void ks7010_sdio_remove(struct sdio_func *func) | |||
1141 | int ret; | 1134 | int ret; |
1142 | struct ks_sdio_card *card; | 1135 | struct ks_sdio_card *card; |
1143 | struct ks_wlan_private *priv; | 1136 | struct ks_wlan_private *priv; |
1144 | struct net_device *netdev; | ||
1145 | DPRINTK(1, "ks7010_sdio_remove()\n"); | 1137 | DPRINTK(1, "ks7010_sdio_remove()\n"); |
1146 | 1138 | ||
1147 | card = sdio_get_drvdata(func); | 1139 | card = sdio_get_drvdata(func); |
@@ -1151,8 +1143,9 @@ static void ks7010_sdio_remove(struct sdio_func *func) | |||
1151 | 1143 | ||
1152 | DPRINTK(1, "priv = card->priv\n"); | 1144 | DPRINTK(1, "priv = card->priv\n"); |
1153 | priv = card->priv; | 1145 | priv = card->priv; |
1154 | netdev = priv->net_dev; | ||
1155 | if (priv) { | 1146 | if (priv) { |
1147 | struct net_device *netdev = priv->net_dev; | ||
1148 | |||
1156 | ks_wlan_net_stop(netdev); | 1149 | ks_wlan_net_stop(netdev); |
1157 | DPRINTK(1, "ks_wlan_net_stop\n"); | 1150 | DPRINTK(1, "ks_wlan_net_stop\n"); |
1158 | 1151 | ||
@@ -1199,9 +1192,7 @@ static void ks7010_sdio_remove(struct sdio_func *func) | |||
1199 | unregister_netdev(netdev); | 1192 | unregister_netdev(netdev); |
1200 | 1193 | ||
1201 | trx_device_exit(priv); | 1194 | trx_device_exit(priv); |
1202 | if (priv->ks_wlan_hw.read_buf) { | 1195 | kfree(priv->ks_wlan_hw.read_buf); |
1203 | kfree(priv->ks_wlan_hw.read_buf); | ||
1204 | } | ||
1205 | free_netdev(priv->net_dev); | 1196 | free_netdev(priv->net_dev); |
1206 | card->priv = NULL; | 1197 | card->priv = NULL; |
1207 | } | 1198 | } |
diff --git a/drivers/staging/ks7010/michael_mic.c b/drivers/staging/ks7010/michael_mic.c index e14c109b3cab..d332678781d2 100644 --- a/drivers/staging/ks7010/michael_mic.c +++ b/drivers/staging/ks7010/michael_mic.c | |||
@@ -20,15 +20,21 @@ | |||
20 | #define getUInt32( A, B ) (uint32_t)(A[B+0] << 0) + (A[B+1] << 8) + (A[B+2] << 16) + (A[B+3] << 24) | 20 | #define getUInt32( A, B ) (uint32_t)(A[B+0] << 0) + (A[B+1] << 8) + (A[B+2] << 16) + (A[B+3] << 24) |
21 | 21 | ||
22 | // Convert from UInt32 to Byte[] in a portable way | 22 | // Convert from UInt32 to Byte[] in a portable way |
23 | #define putUInt32( A, B, C ) A[B+0] = (uint8_t) (C & 0xff); \ | 23 | #define putUInt32(A, B, C) \ |
24 | A[B+1] = (uint8_t) ((C>>8) & 0xff); \ | 24 | do { \ |
25 | A[B+2] = (uint8_t) ((C>>16) & 0xff); \ | 25 | A[B + 0] = (uint8_t)(C & 0xff); \ |
26 | A[B+3] = (uint8_t) ((C>>24) & 0xff) | 26 | A[B + 1] = (uint8_t)((C >> 8) & 0xff); \ |
27 | A[B + 2] = (uint8_t)((C >> 16) & 0xff); \ | ||
28 | A[B + 3] = (uint8_t)((C >> 24) & 0xff); \ | ||
29 | } while (0) | ||
27 | 30 | ||
28 | // Reset the state to the empty message. | 31 | // Reset the state to the empty message. |
29 | #define MichaelClear( A ) A->L = A->K0; \ | 32 | #define MichaelClear(A) \ |
30 | A->R = A->K1; \ | 33 | do { \ |
31 | A->nBytesInM = 0; | 34 | A->L = A->K0; \ |
35 | A->R = A->K1; \ | ||
36 | A->nBytesInM = 0; \ | ||
37 | } while (0) | ||
32 | 38 | ||
33 | static | 39 | static |
34 | void MichaelInitializeFunction(struct michel_mic_t *Mic, uint8_t * key) | 40 | void MichaelInitializeFunction(struct michel_mic_t *Mic, uint8_t * key) |
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c index 07ec540946cd..cbc9a9c5385f 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c | |||
@@ -1468,11 +1468,6 @@ ksocknal_close_conn_locked(struct ksock_conn *conn, int error) | |||
1468 | 1468 | ||
1469 | conn->ksnc_route = NULL; | 1469 | conn->ksnc_route = NULL; |
1470 | 1470 | ||
1471 | #if 0 /* irrelevant with only eager routes */ | ||
1472 | /* make route least favourite */ | ||
1473 | list_del(&route->ksnr_list); | ||
1474 | list_add_tail(&route->ksnr_list, &peer->ksnp_routes); | ||
1475 | #endif | ||
1476 | ksocknal_route_decref(route); /* drop conn's ref on route */ | 1471 | ksocknal_route_decref(route); /* drop conn's ref on route */ |
1477 | } | 1472 | } |
1478 | 1473 | ||
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c index 303576d815c6..d53da552bd61 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c | |||
@@ -2008,13 +2008,6 @@ ksocknal_connect(struct ksock_route *route) | |||
2008 | list_splice_init(&peer->ksnp_tx_queue, &zombies); | 2008 | list_splice_init(&peer->ksnp_tx_queue, &zombies); |
2009 | } | 2009 | } |
2010 | 2010 | ||
2011 | #if 0 /* irrelevant with only eager routes */ | ||
2012 | if (!route->ksnr_deleted) { | ||
2013 | /* make this route least-favourite for re-selection */ | ||
2014 | list_del(&route->ksnr_list); | ||
2015 | list_add_tail(&route->ksnr_list, &peer->ksnp_routes); | ||
2016 | } | ||
2017 | #endif | ||
2018 | write_unlock_bh(&ksocknal_data.ksnd_global_lock); | 2011 | write_unlock_bh(&ksocknal_data.ksnd_global_lock); |
2019 | 2012 | ||
2020 | ksocknal_peer_failed(peer); | 2013 | ksocknal_peer_failed(peer); |
diff --git a/drivers/staging/lustre/lnet/lnet/lib-msg.c b/drivers/staging/lustre/lnet/lnet/lib-msg.c index 910e106e221d..0897e588bd54 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-msg.c +++ b/drivers/staging/lustre/lnet/lnet/lib-msg.c | |||
@@ -449,23 +449,7 @@ lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int status) | |||
449 | 449 | ||
450 | if (!msg) | 450 | if (!msg) |
451 | return; | 451 | return; |
452 | #if 0 | 452 | |
453 | CDEBUG(D_WARNING, "%s msg->%s Flags:%s%s%s%s%s%s%s%s%s%s%s txp %s rxp %s\n", | ||
454 | lnet_msgtyp2str(msg->msg_type), libcfs_id2str(msg->msg_target), | ||
455 | msg->msg_target_is_router ? "t" : "", | ||
456 | msg->msg_routing ? "X" : "", | ||
457 | msg->msg_ack ? "A" : "", | ||
458 | msg->msg_sending ? "S" : "", | ||
459 | msg->msg_receiving ? "R" : "", | ||
460 | msg->msg_delayed ? "d" : "", | ||
461 | msg->msg_txcredit ? "C" : "", | ||
462 | msg->msg_peertxcredit ? "c" : "", | ||
463 | msg->msg_rtrcredit ? "F" : "", | ||
464 | msg->msg_peerrtrcredit ? "f" : "", | ||
465 | msg->msg_onactivelist ? "!" : "", | ||
466 | !msg->msg_txpeer ? "<none>" : libcfs_nid2str(msg->msg_txpeer->lp_nid), | ||
467 | !msg->msg_rxpeer ? "<none>" : libcfs_nid2str(msg->msg_rxpeer->lp_nid)); | ||
468 | #endif | ||
469 | msg->msg_ev.status = status; | 453 | msg->msg_ev.status = status; |
470 | 454 | ||
471 | if (msg->msg_md) { | 455 | if (msg->msg_md) { |
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c index 57281b9e31ff..58a7401046e0 100644 --- a/drivers/staging/lustre/lustre/llite/file.c +++ b/drivers/staging/lustre/lustre/llite/file.c | |||
@@ -3213,10 +3213,10 @@ const struct inode_operations ll_file_inode_operations = { | |||
3213 | .setattr = ll_setattr, | 3213 | .setattr = ll_setattr, |
3214 | .getattr = ll_getattr, | 3214 | .getattr = ll_getattr, |
3215 | .permission = ll_inode_permission, | 3215 | .permission = ll_inode_permission, |
3216 | .setxattr = ll_setxattr, | 3216 | .setxattr = generic_setxattr, |
3217 | .getxattr = ll_getxattr, | 3217 | .getxattr = generic_getxattr, |
3218 | .listxattr = ll_listxattr, | 3218 | .listxattr = ll_listxattr, |
3219 | .removexattr = ll_removexattr, | 3219 | .removexattr = generic_removexattr, |
3220 | .fiemap = ll_fiemap, | 3220 | .fiemap = ll_fiemap, |
3221 | .get_acl = ll_get_acl, | 3221 | .get_acl = ll_get_acl, |
3222 | }; | 3222 | }; |
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h index 4d6d589a1677..27d3f7706d44 100644 --- a/drivers/staging/lustre/lustre/llite/llite_internal.h +++ b/drivers/staging/lustre/lustre/llite/llite_internal.h | |||
@@ -42,6 +42,7 @@ | |||
42 | #include "../include/lustre_mdc.h" | 42 | #include "../include/lustre_mdc.h" |
43 | #include "../include/lustre_intent.h" | 43 | #include "../include/lustre_intent.h" |
44 | #include <linux/compat.h> | 44 | #include <linux/compat.h> |
45 | #include <linux/xattr.h> | ||
45 | #include <linux/posix_acl_xattr.h> | 46 | #include <linux/posix_acl_xattr.h> |
46 | #include "vvp_internal.h" | 47 | #include "vvp_internal.h" |
47 | 48 | ||
@@ -933,12 +934,9 @@ static inline __u64 ll_file_maxbytes(struct inode *inode) | |||
933 | } | 934 | } |
934 | 935 | ||
935 | /* llite/xattr.c */ | 936 | /* llite/xattr.c */ |
936 | int ll_setxattr(struct dentry *dentry, struct inode *inode, | 937 | extern const struct xattr_handler *ll_xattr_handlers[]; |
937 | const char *name, const void *value, size_t size, int flags); | 938 | |
938 | ssize_t ll_getxattr(struct dentry *dentry, struct inode *inode, | ||
939 | const char *name, void *buffer, size_t size); | ||
940 | ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size); | 939 | ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size); |
941 | int ll_removexattr(struct dentry *dentry, const char *name); | ||
942 | 940 | ||
943 | /** | 941 | /** |
944 | * Common IO arguments for various VFS I/O interfaces. | 942 | * Common IO arguments for various VFS I/O interfaces. |
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c index 546063e728db..75d568f5bb90 100644 --- a/drivers/staging/lustre/lustre/llite/llite_lib.c +++ b/drivers/staging/lustre/lustre/llite/llite_lib.c | |||
@@ -418,6 +418,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, | |||
418 | CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid)); | 418 | CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid)); |
419 | 419 | ||
420 | sb->s_op = &lustre_super_operations; | 420 | sb->s_op = &lustre_super_operations; |
421 | sb->s_xattr = ll_xattr_handlers; | ||
421 | #if THREAD_SIZE >= 8192 /*b=17630*/ | 422 | #if THREAD_SIZE >= 8192 /*b=17630*/ |
422 | sb->s_export_op = &lustre_export_operations; | 423 | sb->s_export_op = &lustre_export_operations; |
423 | #endif | 424 | #endif |
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c index 2c4dc69731e8..09e180170de2 100644 --- a/drivers/staging/lustre/lustre/llite/namei.c +++ b/drivers/staging/lustre/lustre/llite/namei.c | |||
@@ -1106,10 +1106,10 @@ const struct inode_operations ll_dir_inode_operations = { | |||
1106 | .setattr = ll_setattr, | 1106 | .setattr = ll_setattr, |
1107 | .getattr = ll_getattr, | 1107 | .getattr = ll_getattr, |
1108 | .permission = ll_inode_permission, | 1108 | .permission = ll_inode_permission, |
1109 | .setxattr = ll_setxattr, | 1109 | .setxattr = generic_setxattr, |
1110 | .getxattr = ll_getxattr, | 1110 | .getxattr = generic_getxattr, |
1111 | .listxattr = ll_listxattr, | 1111 | .listxattr = ll_listxattr, |
1112 | .removexattr = ll_removexattr, | 1112 | .removexattr = generic_removexattr, |
1113 | .get_acl = ll_get_acl, | 1113 | .get_acl = ll_get_acl, |
1114 | }; | 1114 | }; |
1115 | 1115 | ||
@@ -1117,9 +1117,9 @@ const struct inode_operations ll_special_inode_operations = { | |||
1117 | .setattr = ll_setattr, | 1117 | .setattr = ll_setattr, |
1118 | .getattr = ll_getattr, | 1118 | .getattr = ll_getattr, |
1119 | .permission = ll_inode_permission, | 1119 | .permission = ll_inode_permission, |
1120 | .setxattr = ll_setxattr, | 1120 | .setxattr = generic_setxattr, |
1121 | .getxattr = ll_getxattr, | 1121 | .getxattr = generic_getxattr, |
1122 | .listxattr = ll_listxattr, | 1122 | .listxattr = ll_listxattr, |
1123 | .removexattr = ll_removexattr, | 1123 | .removexattr = generic_removexattr, |
1124 | .get_acl = ll_get_acl, | 1124 | .get_acl = ll_get_acl, |
1125 | }; | 1125 | }; |
diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c index 8c8bdfe1ad71..4601be94dd22 100644 --- a/drivers/staging/lustre/lustre/llite/symlink.c +++ b/drivers/staging/lustre/lustre/llite/symlink.c | |||
@@ -155,8 +155,8 @@ const struct inode_operations ll_fast_symlink_inode_operations = { | |||
155 | .get_link = ll_get_link, | 155 | .get_link = ll_get_link, |
156 | .getattr = ll_getattr, | 156 | .getattr = ll_getattr, |
157 | .permission = ll_inode_permission, | 157 | .permission = ll_inode_permission, |
158 | .setxattr = ll_setxattr, | 158 | .setxattr = generic_setxattr, |
159 | .getxattr = ll_getxattr, | 159 | .getxattr = generic_getxattr, |
160 | .listxattr = ll_listxattr, | 160 | .listxattr = ll_listxattr, |
161 | .removexattr = ll_removexattr, | 161 | .removexattr = generic_removexattr, |
162 | }; | 162 | }; |
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c index 98303cf85815..a02b80269b5c 100644 --- a/drivers/staging/lustre/lustre/llite/xattr.c +++ b/drivers/staging/lustre/lustre/llite/xattr.c | |||
@@ -99,46 +99,57 @@ int xattr_type_filter(struct ll_sb_info *sbi, int xattr_type) | |||
99 | return 0; | 99 | return 0; |
100 | } | 100 | } |
101 | 101 | ||
102 | static | 102 | static int |
103 | int ll_setxattr_common(struct inode *inode, const char *name, | 103 | ll_xattr_set_common(const struct xattr_handler *handler, |
104 | const void *value, size_t size, | 104 | struct dentry *dentry, struct inode *inode, |
105 | int flags, __u64 valid) | 105 | const char *name, const void *value, size_t size, |
106 | int flags) | ||
106 | { | 107 | { |
108 | char fullname[strlen(handler->prefix) + strlen(name) + 1]; | ||
107 | struct ll_sb_info *sbi = ll_i2sbi(inode); | 109 | struct ll_sb_info *sbi = ll_i2sbi(inode); |
108 | struct ptlrpc_request *req = NULL; | 110 | struct ptlrpc_request *req = NULL; |
109 | int xattr_type, rc; | ||
110 | const char *pv = value; | 111 | const char *pv = value; |
112 | __u64 valid; | ||
113 | int rc; | ||
114 | |||
115 | if (flags == XATTR_REPLACE) { | ||
116 | ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_REMOVEXATTR, 1); | ||
117 | valid = OBD_MD_FLXATTRRM; | ||
118 | } else { | ||
119 | ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_SETXATTR, 1); | ||
120 | valid = OBD_MD_FLXATTR; | ||
121 | } | ||
111 | 122 | ||
112 | xattr_type = get_xattr_type(name); | 123 | rc = xattr_type_filter(sbi, handler->flags); |
113 | rc = xattr_type_filter(sbi, xattr_type); | ||
114 | if (rc) | 124 | if (rc) |
115 | return rc; | 125 | return rc; |
116 | 126 | ||
117 | if ((xattr_type == XATTR_ACL_ACCESS_T || | 127 | if ((handler->flags == XATTR_ACL_ACCESS_T || |
118 | xattr_type == XATTR_ACL_DEFAULT_T) && | 128 | handler->flags == XATTR_ACL_DEFAULT_T) && |
119 | !inode_owner_or_capable(inode)) | 129 | !inode_owner_or_capable(inode)) |
120 | return -EPERM; | 130 | return -EPERM; |
121 | 131 | ||
122 | /* b10667: ignore lustre special xattr for now */ | 132 | /* b10667: ignore lustre special xattr for now */ |
123 | if ((xattr_type == XATTR_TRUSTED_T && strcmp(name, "trusted.lov") == 0) || | 133 | if ((handler->flags == XATTR_TRUSTED_T && !strcmp(name, "lov")) || |
124 | (xattr_type == XATTR_LUSTRE_T && strcmp(name, "lustre.lov") == 0)) | 134 | (handler->flags == XATTR_LUSTRE_T && !strcmp(name, "lov"))) |
125 | return 0; | 135 | return 0; |
126 | 136 | ||
127 | /* b15587: ignore security.capability xattr for now */ | 137 | /* b15587: ignore security.capability xattr for now */ |
128 | if ((xattr_type == XATTR_SECURITY_T && | 138 | if ((handler->flags == XATTR_SECURITY_T && |
129 | strcmp(name, "security.capability") == 0)) | 139 | !strcmp(name, "capability"))) |
130 | return 0; | 140 | return 0; |
131 | 141 | ||
132 | /* LU-549: Disable security.selinux when selinux is disabled */ | 142 | /* LU-549: Disable security.selinux when selinux is disabled */ |
133 | if (xattr_type == XATTR_SECURITY_T && !selinux_is_enabled() && | 143 | if (handler->flags == XATTR_SECURITY_T && !selinux_is_enabled() && |
134 | strcmp(name, "security.selinux") == 0) | 144 | strcmp(name, "selinux") == 0) |
135 | return -EOPNOTSUPP; | 145 | return -EOPNOTSUPP; |
136 | 146 | ||
147 | sprintf(fullname, "%s%s\n", handler->prefix, name); | ||
137 | rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode), | 148 | rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode), |
138 | valid, name, pv, size, 0, flags, | 149 | valid, fullname, pv, size, 0, flags, |
139 | ll_i2suppgid(inode), &req); | 150 | ll_i2suppgid(inode), &req); |
140 | if (rc) { | 151 | if (rc) { |
141 | if (rc == -EOPNOTSUPP && xattr_type == XATTR_USER_T) { | 152 | if (rc == -EOPNOTSUPP && handler->flags == XATTR_USER_T) { |
142 | LCONSOLE_INFO("Disabling user_xattr feature because it is not supported on the server\n"); | 153 | LCONSOLE_INFO("Disabling user_xattr feature because it is not supported on the server\n"); |
143 | sbi->ll_flags &= ~LL_SBI_USER_XATTR; | 154 | sbi->ll_flags &= ~LL_SBI_USER_XATTR; |
144 | } | 155 | } |
@@ -149,8 +160,10 @@ int ll_setxattr_common(struct inode *inode, const char *name, | |||
149 | return 0; | 160 | return 0; |
150 | } | 161 | } |
151 | 162 | ||
152 | int ll_setxattr(struct dentry *dentry, struct inode *inode, | 163 | static int ll_xattr_set(const struct xattr_handler *handler, |
153 | const char *name, const void *value, size_t size, int flags) | 164 | struct dentry *dentry, struct inode *inode, |
165 | const char *name, const void *value, size_t size, | ||
166 | int flags) | ||
154 | { | 167 | { |
155 | LASSERT(inode); | 168 | LASSERT(inode); |
156 | LASSERT(name); | 169 | LASSERT(name); |
@@ -158,20 +171,24 @@ int ll_setxattr(struct dentry *dentry, struct inode *inode, | |||
158 | CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), xattr %s\n", | 171 | CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), xattr %s\n", |
159 | PFID(ll_inode2fid(inode)), inode, name); | 172 | PFID(ll_inode2fid(inode)), inode, name); |
160 | 173 | ||
161 | ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_SETXATTR, 1); | 174 | if (!strcmp(name, "lov")) { |
162 | |||
163 | if ((strncmp(name, XATTR_TRUSTED_PREFIX, | ||
164 | sizeof(XATTR_TRUSTED_PREFIX) - 1) == 0 && | ||
165 | strcmp(name + sizeof(XATTR_TRUSTED_PREFIX) - 1, "lov") == 0) || | ||
166 | (strncmp(name, XATTR_LUSTRE_PREFIX, | ||
167 | sizeof(XATTR_LUSTRE_PREFIX) - 1) == 0 && | ||
168 | strcmp(name + sizeof(XATTR_LUSTRE_PREFIX) - 1, "lov") == 0)) { | ||
169 | struct lov_user_md *lump = (struct lov_user_md *)value; | 175 | struct lov_user_md *lump = (struct lov_user_md *)value; |
176 | int op_type = flags == XATTR_REPLACE ? LPROC_LL_REMOVEXATTR : | ||
177 | LPROC_LL_SETXATTR; | ||
170 | int rc = 0; | 178 | int rc = 0; |
171 | 179 | ||
180 | ll_stats_ops_tally(ll_i2sbi(inode), op_type, 1); | ||
181 | |||
172 | if (size != 0 && size < sizeof(struct lov_user_md)) | 182 | if (size != 0 && size < sizeof(struct lov_user_md)) |
173 | return -EINVAL; | 183 | return -EINVAL; |
174 | 184 | ||
185 | /* | ||
186 | * It is possible to set an xattr to a "" value of zero size. | ||
187 | * For this case we are going to treat it as a removal. | ||
188 | */ | ||
189 | if (!size && lump) | ||
190 | lump = NULL; | ||
191 | |||
175 | /* Attributes that are saved via getxattr will always have | 192 | /* Attributes that are saved via getxattr will always have |
176 | * the stripe_offset as 0. Instead, the MDS should be | 193 | * the stripe_offset as 0. Instead, the MDS should be |
177 | * allowed to pick the starting OST index. b=17846 | 194 | * allowed to pick the starting OST index. b=17846 |
@@ -194,92 +211,27 @@ int ll_setxattr(struct dentry *dentry, struct inode *inode, | |||
194 | 211 | ||
195 | return rc; | 212 | return rc; |
196 | 213 | ||
197 | } else if (strcmp(name, XATTR_NAME_LMA) == 0 || | 214 | } else if (!strcmp(name, "lma") || !strcmp(name, "link")) { |
198 | strcmp(name, XATTR_NAME_LINK) == 0) | 215 | ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_SETXATTR, 1); |
199 | return 0; | 216 | return 0; |
217 | } | ||
200 | 218 | ||
201 | return ll_setxattr_common(inode, name, value, size, flags, | 219 | return ll_xattr_set_common(handler, dentry, inode, name, value, size, |
202 | OBD_MD_FLXATTR); | 220 | flags); |
203 | } | ||
204 | |||
205 | int ll_removexattr(struct dentry *dentry, const char *name) | ||
206 | { | ||
207 | struct inode *inode = d_inode(dentry); | ||
208 | |||
209 | LASSERT(inode); | ||
210 | LASSERT(name); | ||
211 | |||
212 | CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), xattr %s\n", | ||
213 | PFID(ll_inode2fid(inode)), inode, name); | ||
214 | |||
215 | ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_REMOVEXATTR, 1); | ||
216 | return ll_setxattr_common(inode, name, NULL, 0, 0, | ||
217 | OBD_MD_FLXATTRRM); | ||
218 | } | 221 | } |
219 | 222 | ||
220 | static | 223 | static int |
221 | int ll_getxattr_common(struct inode *inode, const char *name, | 224 | ll_xattr_list(struct inode *inode, const char *name, int type, void *buffer, |
222 | void *buffer, size_t size, __u64 valid) | 225 | size_t size, __u64 valid) |
223 | { | 226 | { |
227 | struct ll_inode_info *lli = ll_i2info(inode); | ||
224 | struct ll_sb_info *sbi = ll_i2sbi(inode); | 228 | struct ll_sb_info *sbi = ll_i2sbi(inode); |
225 | struct ptlrpc_request *req = NULL; | 229 | struct ptlrpc_request *req = NULL; |
226 | struct mdt_body *body; | 230 | struct mdt_body *body; |
227 | int xattr_type, rc; | ||
228 | void *xdata; | 231 | void *xdata; |
229 | struct ll_inode_info *lli = ll_i2info(inode); | 232 | int rc; |
230 | |||
231 | CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n", | ||
232 | PFID(ll_inode2fid(inode)), inode); | ||
233 | |||
234 | /* listxattr have slightly different behavior from of ext3: | ||
235 | * without 'user_xattr' ext3 will list all xattr names but | ||
236 | * filtered out "^user..*"; we list them all for simplicity. | ||
237 | */ | ||
238 | if (!name) { | ||
239 | xattr_type = XATTR_OTHER_T; | ||
240 | goto do_getxattr; | ||
241 | } | ||
242 | |||
243 | xattr_type = get_xattr_type(name); | ||
244 | rc = xattr_type_filter(sbi, xattr_type); | ||
245 | if (rc) | ||
246 | return rc; | ||
247 | |||
248 | /* b15587: ignore security.capability xattr for now */ | ||
249 | if ((xattr_type == XATTR_SECURITY_T && | ||
250 | strcmp(name, "security.capability") == 0)) | ||
251 | return -ENODATA; | ||
252 | |||
253 | /* LU-549: Disable security.selinux when selinux is disabled */ | ||
254 | if (xattr_type == XATTR_SECURITY_T && !selinux_is_enabled() && | ||
255 | strcmp(name, "security.selinux") == 0) | ||
256 | return -EOPNOTSUPP; | ||
257 | |||
258 | #ifdef CONFIG_FS_POSIX_ACL | ||
259 | /* posix acl is under protection of LOOKUP lock. when calling to this, | ||
260 | * we just have path resolution to the target inode, so we have great | ||
261 | * chance that cached ACL is uptodate. | ||
262 | */ | ||
263 | if (xattr_type == XATTR_ACL_ACCESS_T) { | ||
264 | struct posix_acl *acl; | ||
265 | |||
266 | spin_lock(&lli->lli_lock); | ||
267 | acl = posix_acl_dup(lli->lli_posix_acl); | ||
268 | spin_unlock(&lli->lli_lock); | ||
269 | |||
270 | if (!acl) | ||
271 | return -ENODATA; | ||
272 | 233 | ||
273 | rc = posix_acl_to_xattr(&init_user_ns, acl, buffer, size); | 234 | if (sbi->ll_xattr_cache_enabled && type != XATTR_ACL_ACCESS_T) { |
274 | posix_acl_release(acl); | ||
275 | return rc; | ||
276 | } | ||
277 | if (xattr_type == XATTR_ACL_DEFAULT_T && !S_ISDIR(inode->i_mode)) | ||
278 | return -ENODATA; | ||
279 | #endif | ||
280 | |||
281 | do_getxattr: | ||
282 | if (sbi->ll_xattr_cache_enabled && xattr_type != XATTR_ACL_ACCESS_T) { | ||
283 | rc = ll_xattr_cache_get(inode, name, buffer, size, valid); | 235 | rc = ll_xattr_cache_get(inode, name, buffer, size, valid); |
284 | if (rc == -EAGAIN) | 236 | if (rc == -EAGAIN) |
285 | goto getxattr_nocache; | 237 | goto getxattr_nocache; |
@@ -340,7 +292,7 @@ getxattr_nocache: | |||
340 | } | 292 | } |
341 | 293 | ||
342 | out_xattr: | 294 | out_xattr: |
343 | if (rc == -EOPNOTSUPP && xattr_type == XATTR_USER_T) { | 295 | if (rc == -EOPNOTSUPP && type == XATTR_USER_T) { |
344 | LCONSOLE_INFO( | 296 | LCONSOLE_INFO( |
345 | "%s: disabling user_xattr feature because it is not supported on the server: rc = %d\n", | 297 | "%s: disabling user_xattr feature because it is not supported on the server: rc = %d\n", |
346 | ll_get_fsname(inode->i_sb, NULL, 0), rc); | 298 | ll_get_fsname(inode->i_sb, NULL, 0), rc); |
@@ -351,8 +303,63 @@ out: | |||
351 | return rc; | 303 | return rc; |
352 | } | 304 | } |
353 | 305 | ||
354 | ssize_t ll_getxattr(struct dentry *dentry, struct inode *inode, | 306 | static int ll_xattr_get_common(const struct xattr_handler *handler, |
355 | const char *name, void *buffer, size_t size) | 307 | struct dentry *dentry, struct inode *inode, |
308 | const char *name, void *buffer, size_t size) | ||
309 | { | ||
310 | char fullname[strlen(handler->prefix) + strlen(name) + 1]; | ||
311 | struct ll_sb_info *sbi = ll_i2sbi(inode); | ||
312 | struct ll_inode_info *lli = ll_i2info(inode); | ||
313 | int rc; | ||
314 | |||
315 | CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n", | ||
316 | PFID(ll_inode2fid(inode)), inode); | ||
317 | |||
318 | ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1); | ||
319 | |||
320 | rc = xattr_type_filter(sbi, handler->flags); | ||
321 | if (rc) | ||
322 | return rc; | ||
323 | |||
324 | /* b15587: ignore security.capability xattr for now */ | ||
325 | if ((handler->flags == XATTR_SECURITY_T && !strcmp(name, "capability"))) | ||
326 | return -ENODATA; | ||
327 | |||
328 | /* LU-549: Disable security.selinux when selinux is disabled */ | ||
329 | if (handler->flags == XATTR_SECURITY_T && !selinux_is_enabled() && | ||
330 | !strcmp(name, "selinux")) | ||
331 | return -EOPNOTSUPP; | ||
332 | |||
333 | #ifdef CONFIG_FS_POSIX_ACL | ||
334 | /* posix acl is under protection of LOOKUP lock. when calling to this, | ||
335 | * we just have path resolution to the target inode, so we have great | ||
336 | * chance that cached ACL is uptodate. | ||
337 | */ | ||
338 | if (handler->flags == XATTR_ACL_ACCESS_T) { | ||
339 | struct posix_acl *acl; | ||
340 | |||
341 | spin_lock(&lli->lli_lock); | ||
342 | acl = posix_acl_dup(lli->lli_posix_acl); | ||
343 | spin_unlock(&lli->lli_lock); | ||
344 | |||
345 | if (!acl) | ||
346 | return -ENODATA; | ||
347 | |||
348 | rc = posix_acl_to_xattr(&init_user_ns, acl, buffer, size); | ||
349 | posix_acl_release(acl); | ||
350 | return rc; | ||
351 | } | ||
352 | if (handler->flags == XATTR_ACL_DEFAULT_T && !S_ISDIR(inode->i_mode)) | ||
353 | return -ENODATA; | ||
354 | #endif | ||
355 | sprintf(fullname, "%s%s\n", handler->prefix, name); | ||
356 | return ll_xattr_list(inode, fullname, handler->flags, buffer, size, | ||
357 | OBD_MD_FLXATTR); | ||
358 | } | ||
359 | |||
360 | static int ll_xattr_get(const struct xattr_handler *handler, | ||
361 | struct dentry *dentry, struct inode *inode, | ||
362 | const char *name, void *buffer, size_t size) | ||
356 | { | 363 | { |
357 | LASSERT(inode); | 364 | LASSERT(inode); |
358 | LASSERT(name); | 365 | LASSERT(name); |
@@ -360,20 +367,15 @@ ssize_t ll_getxattr(struct dentry *dentry, struct inode *inode, | |||
360 | CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), xattr %s\n", | 367 | CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), xattr %s\n", |
361 | PFID(ll_inode2fid(inode)), inode, name); | 368 | PFID(ll_inode2fid(inode)), inode, name); |
362 | 369 | ||
363 | ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1); | 370 | if (!strcmp(name, "lov")) { |
364 | |||
365 | if ((strncmp(name, XATTR_TRUSTED_PREFIX, | ||
366 | sizeof(XATTR_TRUSTED_PREFIX) - 1) == 0 && | ||
367 | strcmp(name + sizeof(XATTR_TRUSTED_PREFIX) - 1, "lov") == 0) || | ||
368 | (strncmp(name, XATTR_LUSTRE_PREFIX, | ||
369 | sizeof(XATTR_LUSTRE_PREFIX) - 1) == 0 && | ||
370 | strcmp(name + sizeof(XATTR_LUSTRE_PREFIX) - 1, "lov") == 0)) { | ||
371 | struct lov_stripe_md *lsm; | 371 | struct lov_stripe_md *lsm; |
372 | struct lov_user_md *lump; | 372 | struct lov_user_md *lump; |
373 | struct lov_mds_md *lmm = NULL; | 373 | struct lov_mds_md *lmm = NULL; |
374 | struct ptlrpc_request *request = NULL; | 374 | struct ptlrpc_request *request = NULL; |
375 | int rc = 0, lmmsize = 0; | 375 | int rc = 0, lmmsize = 0; |
376 | 376 | ||
377 | ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1); | ||
378 | |||
377 | if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) | 379 | if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) |
378 | return -ENODATA; | 380 | return -ENODATA; |
379 | 381 | ||
@@ -439,7 +441,7 @@ out: | |||
439 | return rc; | 441 | return rc; |
440 | } | 442 | } |
441 | 443 | ||
442 | return ll_getxattr_common(inode, name, buffer, size, OBD_MD_FLXATTR); | 444 | return ll_xattr_get_common(handler, dentry, inode, name, buffer, size); |
443 | } | 445 | } |
444 | 446 | ||
445 | ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size) | 447 | ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size) |
@@ -457,7 +459,8 @@ ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size) | |||
457 | 459 | ||
458 | ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LISTXATTR, 1); | 460 | ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LISTXATTR, 1); |
459 | 461 | ||
460 | rc = ll_getxattr_common(inode, NULL, buffer, size, OBD_MD_FLXATTRLS); | 462 | rc = ll_xattr_list(inode, NULL, XATTR_OTHER_T, buffer, size, |
463 | OBD_MD_FLXATTRLS); | ||
461 | if (rc < 0) | 464 | if (rc < 0) |
462 | goto out; | 465 | goto out; |
463 | 466 | ||
@@ -518,3 +521,57 @@ out: | |||
518 | 521 | ||
519 | return rc; | 522 | return rc; |
520 | } | 523 | } |
524 | |||
525 | static const struct xattr_handler ll_user_xattr_handler = { | ||
526 | .prefix = XATTR_USER_PREFIX, | ||
527 | .flags = XATTR_USER_T, | ||
528 | .get = ll_xattr_get_common, | ||
529 | .set = ll_xattr_set_common, | ||
530 | }; | ||
531 | |||
532 | static const struct xattr_handler ll_trusted_xattr_handler = { | ||
533 | .prefix = XATTR_TRUSTED_PREFIX, | ||
534 | .flags = XATTR_TRUSTED_T, | ||
535 | .get = ll_xattr_get, | ||
536 | .set = ll_xattr_set, | ||
537 | }; | ||
538 | |||
539 | static const struct xattr_handler ll_security_xattr_handler = { | ||
540 | .prefix = XATTR_SECURITY_PREFIX, | ||
541 | .flags = XATTR_SECURITY_T, | ||
542 | .get = ll_xattr_get_common, | ||
543 | .set = ll_xattr_set_common, | ||
544 | }; | ||
545 | |||
546 | static const struct xattr_handler ll_acl_access_xattr_handler = { | ||
547 | .prefix = XATTR_NAME_POSIX_ACL_ACCESS, | ||
548 | .flags = XATTR_ACL_ACCESS_T, | ||
549 | .get = ll_xattr_get_common, | ||
550 | .set = ll_xattr_set_common, | ||
551 | }; | ||
552 | |||
553 | static const struct xattr_handler ll_acl_default_xattr_handler = { | ||
554 | .prefix = XATTR_NAME_POSIX_ACL_DEFAULT, | ||
555 | .flags = XATTR_ACL_DEFAULT_T, | ||
556 | .get = ll_xattr_get_common, | ||
557 | .set = ll_xattr_set_common, | ||
558 | }; | ||
559 | |||
560 | static const struct xattr_handler ll_lustre_xattr_handler = { | ||
561 | .prefix = XATTR_LUSTRE_PREFIX, | ||
562 | .flags = XATTR_LUSTRE_T, | ||
563 | .get = ll_xattr_get, | ||
564 | .set = ll_xattr_set, | ||
565 | }; | ||
566 | |||
567 | const struct xattr_handler *ll_xattr_handlers[] = { | ||
568 | &ll_user_xattr_handler, | ||
569 | &ll_trusted_xattr_handler, | ||
570 | &ll_security_xattr_handler, | ||
571 | #ifdef CONFIG_FS_POSIX_ACL | ||
572 | &ll_acl_access_xattr_handler, | ||
573 | &ll_acl_default_xattr_handler, | ||
574 | #endif | ||
575 | &ll_lustre_xattr_handler, | ||
576 | NULL, | ||
577 | }; | ||
diff --git a/drivers/staging/media/lirc/lirc_imon.c b/drivers/staging/media/lirc/lirc_imon.c index ff1926ca1f96..a183e68ec320 100644 --- a/drivers/staging/media/lirc/lirc_imon.c +++ b/drivers/staging/media/lirc/lirc_imon.c | |||
@@ -797,16 +797,11 @@ static int imon_probe(struct usb_interface *interface, | |||
797 | goto free_rbuf; | 797 | goto free_rbuf; |
798 | } | 798 | } |
799 | rx_urb = usb_alloc_urb(0, GFP_KERNEL); | 799 | rx_urb = usb_alloc_urb(0, GFP_KERNEL); |
800 | if (!rx_urb) { | 800 | if (!rx_urb) |
801 | dev_err(dev, "%s: usb_alloc_urb failed for IR urb\n", __func__); | ||
802 | goto free_lirc_buf; | 801 | goto free_lirc_buf; |
803 | } | ||
804 | tx_urb = usb_alloc_urb(0, GFP_KERNEL); | 802 | tx_urb = usb_alloc_urb(0, GFP_KERNEL); |
805 | if (!tx_urb) { | 803 | if (!tx_urb) |
806 | dev_err(dev, "%s: usb_alloc_urb failed for display urb\n", | ||
807 | __func__); | ||
808 | goto free_rx_urb; | 804 | goto free_rx_urb; |
809 | } | ||
810 | 805 | ||
811 | mutex_init(&context->ctx_lock); | 806 | mutex_init(&context->ctx_lock); |
812 | context->vfd_proto_6p = vfd_proto_6p; | 807 | context->vfd_proto_6p = vfd_proto_6p; |
diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c index 2218d0042030..b080fde6d740 100644 --- a/drivers/staging/media/lirc/lirc_sasem.c +++ b/drivers/staging/media/lirc/lirc_sasem.c | |||
@@ -758,17 +758,12 @@ static int sasem_probe(struct usb_interface *interface, | |||
758 | } | 758 | } |
759 | rx_urb = usb_alloc_urb(0, GFP_KERNEL); | 759 | rx_urb = usb_alloc_urb(0, GFP_KERNEL); |
760 | if (!rx_urb) { | 760 | if (!rx_urb) { |
761 | dev_err(&interface->dev, | ||
762 | "%s: usb_alloc_urb failed for IR urb\n", __func__); | ||
763 | alloc_status = 5; | 761 | alloc_status = 5; |
764 | goto alloc_status_switch; | 762 | goto alloc_status_switch; |
765 | } | 763 | } |
766 | if (vfd_ep_found) { | 764 | if (vfd_ep_found) { |
767 | tx_urb = usb_alloc_urb(0, GFP_KERNEL); | 765 | tx_urb = usb_alloc_urb(0, GFP_KERNEL); |
768 | if (!tx_urb) { | 766 | if (!tx_urb) { |
769 | dev_err(&interface->dev, | ||
770 | "%s: usb_alloc_urb failed for VFD urb", | ||
771 | __func__); | ||
772 | alloc_status = 6; | 767 | alloc_status = 6; |
773 | goto alloc_status_switch; | 768 | goto alloc_status_switch; |
774 | } | 769 | } |
diff --git a/drivers/staging/most/hdm-usb/hdm_usb.c b/drivers/staging/most/hdm-usb/hdm_usb.c index aeae071f2823..9ec29788c047 100644 --- a/drivers/staging/most/hdm-usb/hdm_usb.c +++ b/drivers/staging/most/hdm-usb/hdm_usb.c | |||
@@ -650,10 +650,8 @@ static int hdm_enqueue(struct most_interface *iface, int channel, | |||
650 | return -ENODEV; | 650 | return -ENODEV; |
651 | 651 | ||
652 | urb = usb_alloc_urb(NO_ISOCHRONOUS_URB, GFP_ATOMIC); | 652 | urb = usb_alloc_urb(NO_ISOCHRONOUS_URB, GFP_ATOMIC); |
653 | if (!urb) { | 653 | if (!urb) |
654 | dev_err(dev, "Failed to allocate URB\n"); | ||
655 | return -ENOMEM; | 654 | return -ENOMEM; |
656 | } | ||
657 | 655 | ||
658 | anchor = kzalloc(sizeof(*anchor), GFP_ATOMIC); | 656 | anchor = kzalloc(sizeof(*anchor), GFP_ATOMIC); |
659 | if (!anchor) { | 657 | if (!anchor) { |
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c index dd0970facdf5..7af1af8c6616 100644 --- a/drivers/staging/rtl8192u/r8192U_core.c +++ b/drivers/staging/rtl8192u/r8192U_core.c | |||
@@ -1702,11 +1702,8 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb) | |||
1702 | } | 1702 | } |
1703 | if (bSend0Byte) { | 1703 | if (bSend0Byte) { |
1704 | tx_urb_zero = usb_alloc_urb(0, GFP_ATOMIC); | 1704 | tx_urb_zero = usb_alloc_urb(0, GFP_ATOMIC); |
1705 | if (!tx_urb_zero) { | 1705 | if (!tx_urb_zero) |
1706 | RT_TRACE(COMP_ERR, | ||
1707 | "can't alloc urb for zero byte\n"); | ||
1708 | return -ENOMEM; | 1706 | return -ENOMEM; |
1709 | } | ||
1710 | usb_fill_bulk_urb(tx_urb_zero, udev, | 1707 | usb_fill_bulk_urb(tx_urb_zero, udev, |
1711 | usb_sndbulkpipe(udev, idx_pipe), | 1708 | usb_sndbulkpipe(udev, idx_pipe), |
1712 | &zero, 0, tx_zero_isr, dev); | 1709 | &zero, 0, tx_zero_isr, dev); |
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index ac4fecb30d0e..0594828bdabf 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c | |||
@@ -440,10 +440,8 @@ static bool vnt_alloc_bufs(struct vnt_private *priv) | |||
440 | 440 | ||
441 | /* allocate URBs */ | 441 | /* allocate URBs */ |
442 | tx_context->urb = usb_alloc_urb(0, GFP_KERNEL); | 442 | tx_context->urb = usb_alloc_urb(0, GFP_KERNEL); |
443 | if (!tx_context->urb) { | 443 | if (!tx_context->urb) |
444 | dev_err(&priv->usb->dev, "alloc tx urb failed\n"); | ||
445 | goto free_tx; | 444 | goto free_tx; |
446 | } | ||
447 | 445 | ||
448 | tx_context->in_use = false; | 446 | tx_context->in_use = false; |
449 | } | 447 | } |
@@ -462,10 +460,8 @@ static bool vnt_alloc_bufs(struct vnt_private *priv) | |||
462 | 460 | ||
463 | /* allocate URBs */ | 461 | /* allocate URBs */ |
464 | rcb->urb = usb_alloc_urb(0, GFP_KERNEL); | 462 | rcb->urb = usb_alloc_urb(0, GFP_KERNEL); |
465 | if (!rcb->urb) { | 463 | if (!rcb->urb) |
466 | dev_err(&priv->usb->dev, "Failed to alloc rx urb\n"); | ||
467 | goto free_rx_tx; | 464 | goto free_rx_tx; |
468 | } | ||
469 | 465 | ||
470 | rcb->skb = dev_alloc_skb(priv->rx_buf_sz); | 466 | rcb->skb = dev_alloc_skb(priv->rx_buf_sz); |
471 | if (!rcb->skb) | 467 | if (!rcb->skb) |
@@ -479,10 +475,8 @@ static bool vnt_alloc_bufs(struct vnt_private *priv) | |||
479 | } | 475 | } |
480 | 476 | ||
481 | priv->interrupt_urb = usb_alloc_urb(0, GFP_KERNEL); | 477 | priv->interrupt_urb = usb_alloc_urb(0, GFP_KERNEL); |
482 | if (!priv->interrupt_urb) { | 478 | if (!priv->interrupt_urb) |
483 | dev_err(&priv->usb->dev, "Failed to alloc int urb\n"); | ||
484 | goto free_rx_tx; | 479 | goto free_rx_tx; |
485 | } | ||
486 | 480 | ||
487 | priv->int_buf.data_buf = kmalloc(MAX_INTERRUPT_SIZE, GFP_KERNEL); | 481 | priv->int_buf.data_buf = kmalloc(MAX_INTERRUPT_SIZE, GFP_KERNEL); |
488 | if (!priv->int_buf.data_buf) { | 482 | if (!priv->int_buf.data_buf) { |