diff options
author | Stefan Richter <stefanr@s5r6.in-berlin.de> | 2012-04-09 14:51:18 -0400 |
---|---|---|
committer | Stefan Richter <stefanr@s5r6.in-berlin.de> | 2012-04-17 16:27:37 -0400 |
commit | 0b6c4857f7684f6d3f59e0506f62953575346978 (patch) | |
tree | c266c9232022a962f1cb868d0f3664074efa2134 /drivers | |
parent | fe2af11c220c7bb3a67f7aec0594811e5c59e019 (diff) |
firewire: core: fix DMA mapping direction
Seen with recent libdc1394: If a client mmap()s the buffer of an
isochronous reception buffer with PROT_READ|PROT_WRITE instead of just
PROT_READ, firewire-core sets the wrong DMA mapping direction during
buffer initialization.
The fix is to split fw_iso_buffer_init() into allocation and DMA mapping
and to perform the latter after both buffer and DMA context were
allocated. Buffer allocation and context allocation may happen in any
order, but we need the context type (reception or transmission) in order
to set the DMA direction of the buffer.
Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/firewire/core-cdev.c | 51 | ||||
-rw-r--r-- | drivers/firewire/core-iso.c | 80 | ||||
-rw-r--r-- | drivers/firewire/core.h | 7 |
3 files changed, 99 insertions, 39 deletions
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index 2e6b24547e2a..2783f69dada6 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/compat.h> | 22 | #include <linux/compat.h> |
23 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
24 | #include <linux/device.h> | 24 | #include <linux/device.h> |
25 | #include <linux/dma-mapping.h> | ||
25 | #include <linux/errno.h> | 26 | #include <linux/errno.h> |
26 | #include <linux/firewire.h> | 27 | #include <linux/firewire.h> |
27 | #include <linux/firewire-cdev.h> | 28 | #include <linux/firewire-cdev.h> |
@@ -70,6 +71,7 @@ struct client { | |||
70 | u64 iso_closure; | 71 | u64 iso_closure; |
71 | struct fw_iso_buffer buffer; | 72 | struct fw_iso_buffer buffer; |
72 | unsigned long vm_start; | 73 | unsigned long vm_start; |
74 | bool buffer_is_mapped; | ||
73 | 75 | ||
74 | struct list_head phy_receiver_link; | 76 | struct list_head phy_receiver_link; |
75 | u64 phy_receiver_closure; | 77 | u64 phy_receiver_closure; |
@@ -959,11 +961,20 @@ static void iso_mc_callback(struct fw_iso_context *context, | |||
959 | sizeof(e->interrupt), NULL, 0); | 961 | sizeof(e->interrupt), NULL, 0); |
960 | } | 962 | } |
961 | 963 | ||
964 | static enum dma_data_direction iso_dma_direction(struct fw_iso_context *context) | ||
965 | { | ||
966 | if (context->type == FW_ISO_CONTEXT_TRANSMIT) | ||
967 | return DMA_TO_DEVICE; | ||
968 | else | ||
969 | return DMA_FROM_DEVICE; | ||
970 | } | ||
971 | |||
962 | static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) | 972 | static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) |
963 | { | 973 | { |
964 | struct fw_cdev_create_iso_context *a = &arg->create_iso_context; | 974 | struct fw_cdev_create_iso_context *a = &arg->create_iso_context; |
965 | struct fw_iso_context *context; | 975 | struct fw_iso_context *context; |
966 | fw_iso_callback_t cb; | 976 | fw_iso_callback_t cb; |
977 | int ret; | ||
967 | 978 | ||
968 | BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT || | 979 | BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT || |
969 | FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE || | 980 | FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE || |
@@ -1004,8 +1015,21 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) | |||
1004 | if (client->iso_context != NULL) { | 1015 | if (client->iso_context != NULL) { |
1005 | spin_unlock_irq(&client->lock); | 1016 | spin_unlock_irq(&client->lock); |
1006 | fw_iso_context_destroy(context); | 1017 | fw_iso_context_destroy(context); |
1018 | |||
1007 | return -EBUSY; | 1019 | return -EBUSY; |
1008 | } | 1020 | } |
1021 | if (!client->buffer_is_mapped) { | ||
1022 | ret = fw_iso_buffer_map_dma(&client->buffer, | ||
1023 | client->device->card, | ||
1024 | iso_dma_direction(context)); | ||
1025 | if (ret < 0) { | ||
1026 | spin_unlock_irq(&client->lock); | ||
1027 | fw_iso_context_destroy(context); | ||
1028 | |||
1029 | return ret; | ||
1030 | } | ||
1031 | client->buffer_is_mapped = true; | ||
1032 | } | ||
1009 | client->iso_closure = a->closure; | 1033 | client->iso_closure = a->closure; |
1010 | client->iso_context = context; | 1034 | client->iso_context = context; |
1011 | spin_unlock_irq(&client->lock); | 1035 | spin_unlock_irq(&client->lock); |
@@ -1651,7 +1675,6 @@ static long fw_device_op_compat_ioctl(struct file *file, | |||
1651 | static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) | 1675 | static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) |
1652 | { | 1676 | { |
1653 | struct client *client = file->private_data; | 1677 | struct client *client = file->private_data; |
1654 | enum dma_data_direction direction; | ||
1655 | unsigned long size; | 1678 | unsigned long size; |
1656 | int page_count, ret; | 1679 | int page_count, ret; |
1657 | 1680 | ||
@@ -1674,20 +1697,28 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) | |||
1674 | if (size & ~PAGE_MASK) | 1697 | if (size & ~PAGE_MASK) |
1675 | return -EINVAL; | 1698 | return -EINVAL; |
1676 | 1699 | ||
1677 | if (vma->vm_flags & VM_WRITE) | 1700 | ret = fw_iso_buffer_alloc(&client->buffer, page_count); |
1678 | direction = DMA_TO_DEVICE; | ||
1679 | else | ||
1680 | direction = DMA_FROM_DEVICE; | ||
1681 | |||
1682 | ret = fw_iso_buffer_init(&client->buffer, client->device->card, | ||
1683 | page_count, direction); | ||
1684 | if (ret < 0) | 1701 | if (ret < 0) |
1685 | return ret; | 1702 | return ret; |
1686 | 1703 | ||
1687 | ret = fw_iso_buffer_map(&client->buffer, vma); | 1704 | spin_lock_irq(&client->lock); |
1705 | if (client->iso_context) { | ||
1706 | ret = fw_iso_buffer_map_dma(&client->buffer, | ||
1707 | client->device->card, | ||
1708 | iso_dma_direction(client->iso_context)); | ||
1709 | client->buffer_is_mapped = (ret == 0); | ||
1710 | } | ||
1711 | spin_unlock_irq(&client->lock); | ||
1688 | if (ret < 0) | 1712 | if (ret < 0) |
1689 | fw_iso_buffer_destroy(&client->buffer, client->device->card); | 1713 | goto fail; |
1690 | 1714 | ||
1715 | ret = fw_iso_buffer_map_vma(&client->buffer, vma); | ||
1716 | if (ret < 0) | ||
1717 | goto fail; | ||
1718 | |||
1719 | return 0; | ||
1720 | fail: | ||
1721 | fw_iso_buffer_destroy(&client->buffer, client->device->card); | ||
1691 | return ret; | 1722 | return ret; |
1692 | } | 1723 | } |
1693 | 1724 | ||
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index d1565828ae2c..8382e27e9a27 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c | |||
@@ -39,52 +39,73 @@ | |||
39 | * Isochronous DMA context management | 39 | * Isochronous DMA context management |
40 | */ | 40 | */ |
41 | 41 | ||
42 | int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, | 42 | int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count) |
43 | int page_count, enum dma_data_direction direction) | ||
44 | { | 43 | { |
45 | int i, j; | 44 | int i; |
46 | dma_addr_t address; | ||
47 | |||
48 | buffer->page_count = page_count; | ||
49 | buffer->direction = direction; | ||
50 | 45 | ||
46 | buffer->page_count = 0; | ||
47 | buffer->page_count_mapped = 0; | ||
51 | buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]), | 48 | buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]), |
52 | GFP_KERNEL); | 49 | GFP_KERNEL); |
53 | if (buffer->pages == NULL) | 50 | if (buffer->pages == NULL) |
54 | goto out; | 51 | return -ENOMEM; |
55 | 52 | ||
56 | for (i = 0; i < buffer->page_count; i++) { | 53 | for (i = 0; i < page_count; i++) { |
57 | buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); | 54 | buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); |
58 | if (buffer->pages[i] == NULL) | 55 | if (buffer->pages[i] == NULL) |
59 | goto out_pages; | 56 | break; |
57 | } | ||
58 | buffer->page_count = i; | ||
59 | if (i < page_count) { | ||
60 | fw_iso_buffer_destroy(buffer, NULL); | ||
61 | return -ENOMEM; | ||
62 | } | ||
60 | 63 | ||
64 | return 0; | ||
65 | } | ||
66 | |||
67 | int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card, | ||
68 | enum dma_data_direction direction) | ||
69 | { | ||
70 | dma_addr_t address; | ||
71 | int i; | ||
72 | |||
73 | buffer->direction = direction; | ||
74 | |||
75 | for (i = 0; i < buffer->page_count; i++) { | ||
61 | address = dma_map_page(card->device, buffer->pages[i], | 76 | address = dma_map_page(card->device, buffer->pages[i], |
62 | 0, PAGE_SIZE, direction); | 77 | 0, PAGE_SIZE, direction); |
63 | if (dma_mapping_error(card->device, address)) { | 78 | if (dma_mapping_error(card->device, address)) |
64 | __free_page(buffer->pages[i]); | 79 | break; |
65 | goto out_pages; | 80 | |
66 | } | ||
67 | set_page_private(buffer->pages[i], address); | 81 | set_page_private(buffer->pages[i], address); |
68 | } | 82 | } |
83 | buffer->page_count_mapped = i; | ||
84 | if (i < buffer->page_count) | ||
85 | return -ENOMEM; | ||
69 | 86 | ||
70 | return 0; | 87 | return 0; |
88 | } | ||
71 | 89 | ||
72 | out_pages: | 90 | int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, |
73 | for (j = 0; j < i; j++) { | 91 | int page_count, enum dma_data_direction direction) |
74 | address = page_private(buffer->pages[j]); | 92 | { |
75 | dma_unmap_page(card->device, address, | 93 | int ret; |
76 | PAGE_SIZE, direction); | 94 | |
77 | __free_page(buffer->pages[j]); | 95 | ret = fw_iso_buffer_alloc(buffer, page_count); |
78 | } | 96 | if (ret < 0) |
79 | kfree(buffer->pages); | 97 | return ret; |
80 | out: | 98 | |
81 | buffer->pages = NULL; | 99 | ret = fw_iso_buffer_map_dma(buffer, card, direction); |
100 | if (ret < 0) | ||
101 | fw_iso_buffer_destroy(buffer, card); | ||
82 | 102 | ||
83 | return -ENOMEM; | 103 | return ret; |
84 | } | 104 | } |
85 | EXPORT_SYMBOL(fw_iso_buffer_init); | 105 | EXPORT_SYMBOL(fw_iso_buffer_init); |
86 | 106 | ||
87 | int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma) | 107 | int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer, |
108 | struct vm_area_struct *vma) | ||
88 | { | 109 | { |
89 | unsigned long uaddr; | 110 | unsigned long uaddr; |
90 | int i, err; | 111 | int i, err; |
@@ -107,15 +128,18 @@ void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, | |||
107 | int i; | 128 | int i; |
108 | dma_addr_t address; | 129 | dma_addr_t address; |
109 | 130 | ||
110 | for (i = 0; i < buffer->page_count; i++) { | 131 | for (i = 0; i < buffer->page_count_mapped; i++) { |
111 | address = page_private(buffer->pages[i]); | 132 | address = page_private(buffer->pages[i]); |
112 | dma_unmap_page(card->device, address, | 133 | dma_unmap_page(card->device, address, |
113 | PAGE_SIZE, buffer->direction); | 134 | PAGE_SIZE, buffer->direction); |
114 | __free_page(buffer->pages[i]); | ||
115 | } | 135 | } |
136 | for (i = 0; i < buffer->page_count; i++) | ||
137 | __free_page(buffer->pages[i]); | ||
116 | 138 | ||
117 | kfree(buffer->pages); | 139 | kfree(buffer->pages); |
118 | buffer->pages = NULL; | 140 | buffer->pages = NULL; |
141 | buffer->page_count = 0; | ||
142 | buffer->page_count_mapped = 0; | ||
119 | } | 143 | } |
120 | EXPORT_SYMBOL(fw_iso_buffer_destroy); | 144 | EXPORT_SYMBOL(fw_iso_buffer_destroy); |
121 | 145 | ||
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h index 9047f5547d98..94257aecd054 100644 --- a/drivers/firewire/core.h +++ b/drivers/firewire/core.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <linux/compiler.h> | 4 | #include <linux/compiler.h> |
5 | #include <linux/device.h> | 5 | #include <linux/device.h> |
6 | #include <linux/dma-mapping.h> | ||
6 | #include <linux/fs.h> | 7 | #include <linux/fs.h> |
7 | #include <linux/list.h> | 8 | #include <linux/list.h> |
8 | #include <linux/idr.h> | 9 | #include <linux/idr.h> |
@@ -169,7 +170,11 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event); | |||
169 | 170 | ||
170 | /* -iso */ | 171 | /* -iso */ |
171 | 172 | ||
172 | int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma); | 173 | int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count); |
174 | int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card, | ||
175 | enum dma_data_direction direction); | ||
176 | int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer, | ||
177 | struct vm_area_struct *vma); | ||
173 | 178 | ||
174 | 179 | ||
175 | /* -topology */ | 180 | /* -topology */ |