diff options
author | Alan Cox <alan@redhat.com> | 2008-10-16 01:02:47 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-16 14:21:38 -0400 |
commit | 9d793b0bcbbbc37d80241862dfa5257963d5415e (patch) | |
tree | b842e5e92825d85fae63afadf4fcb4c1a681c28c | |
parent | 673c0c00382ed807f09d94e806f3519ddeeb4f70 (diff) |
i2o: Fix 32/64bit DMA locking
The I2O ioctls assume 32bits. In itself that is fine as they are old
cards and nobody uses 64bit. However on LKML it was noted this
assumption is also made for allocated memory and is unsafe on 64bit
systems.
Fixing this is a mess. It turns out there is tons of crap buried in a
header file that does racy 32/64bit filtering on the masks.
So we:
- Verify all callers of the racy code can sleep (i2o_dma_[re]alloc)
- Move the code into a new i2o/memory.c file
- Remove the gfp_mask argument so nobody can try and misuse the function
- Wrap a mutex around the problem area (a single mutex is easy to do and
none of this is performance relevant)
- Switch the remaining problem kmalloc holdout to use i2o_dma_alloc
Cc: Markus Lidel <Markus.Lidel@shadowconnect.com>
Cc: Vasily Averin <vvs@sw.ru>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | drivers/message/i2o/Makefile | 2 | ||||
-rw-r--r-- | drivers/message/i2o/device.c | 2 | ||||
-rw-r--r-- | drivers/message/i2o/exec-osm.c | 4 | ||||
-rw-r--r-- | drivers/message/i2o/i2o_config.c | 31 | ||||
-rw-r--r-- | drivers/message/i2o/iop.c | 2 | ||||
-rw-r--r-- | drivers/message/i2o/memory.c | 313 | ||||
-rw-r--r-- | drivers/message/i2o/pci.c | 16 | ||||
-rw-r--r-- | include/linux/i2o.h | 292 |
8 files changed, 351 insertions, 311 deletions
diff --git a/drivers/message/i2o/Makefile b/drivers/message/i2o/Makefile index 2c2e39aa1efa..b0982dacfd0a 100644 --- a/drivers/message/i2o/Makefile +++ b/drivers/message/i2o/Makefile | |||
@@ -5,7 +5,7 @@ | |||
5 | # In the future, some of these should be built conditionally. | 5 | # In the future, some of these should be built conditionally. |
6 | # | 6 | # |
7 | 7 | ||
8 | i2o_core-y += iop.o driver.o device.o debug.o pci.o exec-osm.o | 8 | i2o_core-y += iop.o driver.o device.o debug.o pci.o exec-osm.o memory.o |
9 | i2o_bus-y += bus-osm.o | 9 | i2o_bus-y += bus-osm.o |
10 | i2o_config-y += config-osm.o | 10 | i2o_config-y += config-osm.o |
11 | obj-$(CONFIG_I2O) += i2o_core.o | 11 | obj-$(CONFIG_I2O) += i2o_core.o |
diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c index 8774c670e668..54c2e9ae23e5 100644 --- a/drivers/message/i2o/device.c +++ b/drivers/message/i2o/device.c | |||
@@ -467,7 +467,7 @@ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist, | |||
467 | 467 | ||
468 | res.virt = NULL; | 468 | res.virt = NULL; |
469 | 469 | ||
470 | if (i2o_dma_alloc(dev, &res, reslen, GFP_KERNEL)) | 470 | if (i2o_dma_alloc(dev, &res, reslen)) |
471 | return -ENOMEM; | 471 | return -ENOMEM; |
472 | 472 | ||
473 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); | 473 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); |
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c index 6cbcc21de518..56faef1a1d55 100644 --- a/drivers/message/i2o/exec-osm.c +++ b/drivers/message/i2o/exec-osm.c | |||
@@ -388,8 +388,8 @@ static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind) | |||
388 | 388 | ||
389 | dev = &c->pdev->dev; | 389 | dev = &c->pdev->dev; |
390 | 390 | ||
391 | if (i2o_dma_realloc | 391 | if (i2o_dma_realloc(dev, &c->dlct, |
392 | (dev, &c->dlct, le32_to_cpu(sb->expected_lct_size), GFP_KERNEL)) | 392 | le32_to_cpu(sb->expected_lct_size))) |
393 | return -ENOMEM; | 393 | return -ENOMEM; |
394 | 394 | ||
395 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); | 395 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); |
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c index 4238de98d4a6..a3fabdbe6ca6 100644 --- a/drivers/message/i2o/i2o_config.c +++ b/drivers/message/i2o/i2o_config.c | |||
@@ -260,7 +260,7 @@ static int i2o_cfg_swdl(unsigned long arg) | |||
260 | if (IS_ERR(msg)) | 260 | if (IS_ERR(msg)) |
261 | return PTR_ERR(msg); | 261 | return PTR_ERR(msg); |
262 | 262 | ||
263 | if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) { | 263 | if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize)) { |
264 | i2o_msg_nop(c, msg); | 264 | i2o_msg_nop(c, msg); |
265 | return -ENOMEM; | 265 | return -ENOMEM; |
266 | } | 266 | } |
@@ -339,7 +339,7 @@ static int i2o_cfg_swul(unsigned long arg) | |||
339 | if (IS_ERR(msg)) | 339 | if (IS_ERR(msg)) |
340 | return PTR_ERR(msg); | 340 | return PTR_ERR(msg); |
341 | 341 | ||
342 | if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) { | 342 | if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize)) { |
343 | i2o_msg_nop(c, msg); | 343 | i2o_msg_nop(c, msg); |
344 | return -ENOMEM; | 344 | return -ENOMEM; |
345 | } | 345 | } |
@@ -634,9 +634,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, | |||
634 | sg_size = sg[i].flag_count & 0xffffff; | 634 | sg_size = sg[i].flag_count & 0xffffff; |
635 | p = &(sg_list[sg_index]); | 635 | p = &(sg_list[sg_index]); |
636 | /* Allocate memory for the transfer */ | 636 | /* Allocate memory for the transfer */ |
637 | if (i2o_dma_alloc | 637 | if (i2o_dma_alloc(&c->pdev->dev, p, sg_size)) { |
638 | (&c->pdev->dev, p, sg_size, | ||
639 | PCI_DMA_BIDIRECTIONAL)) { | ||
640 | printk(KERN_DEBUG | 638 | printk(KERN_DEBUG |
641 | "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", | 639 | "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", |
642 | c->name, sg_size, i, sg_count); | 640 | c->name, sg_size, i, sg_count); |
@@ -780,12 +778,11 @@ static int i2o_cfg_passthru(unsigned long arg) | |||
780 | u32 size = 0; | 778 | u32 size = 0; |
781 | u32 reply_size = 0; | 779 | u32 reply_size = 0; |
782 | u32 rcode = 0; | 780 | u32 rcode = 0; |
783 | void *sg_list[SG_TABLESIZE]; | 781 | struct i2o_dma sg_list[SG_TABLESIZE]; |
784 | u32 sg_offset = 0; | 782 | u32 sg_offset = 0; |
785 | u32 sg_count = 0; | 783 | u32 sg_count = 0; |
786 | int sg_index = 0; | 784 | int sg_index = 0; |
787 | u32 i = 0; | 785 | u32 i = 0; |
788 | void *p = NULL; | ||
789 | i2o_status_block *sb; | 786 | i2o_status_block *sb; |
790 | struct i2o_message *msg; | 787 | struct i2o_message *msg; |
791 | unsigned int iop; | 788 | unsigned int iop; |
@@ -842,6 +839,7 @@ static int i2o_cfg_passthru(unsigned long arg) | |||
842 | memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); | 839 | memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); |
843 | if (sg_offset) { | 840 | if (sg_offset) { |
844 | struct sg_simple_element *sg; | 841 | struct sg_simple_element *sg; |
842 | struct i2o_dma *p; | ||
845 | 843 | ||
846 | if (sg_offset * 4 >= size) { | 844 | if (sg_offset * 4 >= size) { |
847 | rcode = -EFAULT; | 845 | rcode = -EFAULT; |
@@ -871,22 +869,22 @@ static int i2o_cfg_passthru(unsigned long arg) | |||
871 | goto sg_list_cleanup; | 869 | goto sg_list_cleanup; |
872 | } | 870 | } |
873 | sg_size = sg[i].flag_count & 0xffffff; | 871 | sg_size = sg[i].flag_count & 0xffffff; |
872 | p = &(sg_list[sg_index]); | ||
873 | if (i2o_dma_alloc(&c->pdev->dev, p, sg_size)) { | ||
874 | /* Allocate memory for the transfer */ | 874 | /* Allocate memory for the transfer */ |
875 | p = kmalloc(sg_size, GFP_KERNEL); | ||
876 | if (!p) { | ||
877 | printk(KERN_DEBUG | 875 | printk(KERN_DEBUG |
878 | "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", | 876 | "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", |
879 | c->name, sg_size, i, sg_count); | 877 | c->name, sg_size, i, sg_count); |
880 | rcode = -ENOMEM; | 878 | rcode = -ENOMEM; |
881 | goto sg_list_cleanup; | 879 | goto sg_list_cleanup; |
882 | } | 880 | } |
883 | sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame. | 881 | sg_index++; |
884 | /* Copy in the user's SG buffer if necessary */ | 882 | /* Copy in the user's SG buffer if necessary */ |
885 | if (sg[i]. | 883 | if (sg[i]. |
886 | flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) { | 884 | flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) { |
887 | // TODO 64bit fix | 885 | // TODO 64bit fix |
888 | if (copy_from_user | 886 | if (copy_from_user |
889 | (p, (void __user *)sg[i].addr_bus, | 887 | (p->virt, (void __user *)sg[i].addr_bus, |
890 | sg_size)) { | 888 | sg_size)) { |
891 | printk(KERN_DEBUG | 889 | printk(KERN_DEBUG |
892 | "%s: Could not copy SG buf %d FROM user\n", | 890 | "%s: Could not copy SG buf %d FROM user\n", |
@@ -895,8 +893,7 @@ static int i2o_cfg_passthru(unsigned long arg) | |||
895 | goto sg_list_cleanup; | 893 | goto sg_list_cleanup; |
896 | } | 894 | } |
897 | } | 895 | } |
898 | //TODO 64bit fix | 896 | sg[i].addr_bus = p->phys; |
899 | sg[i].addr_bus = virt_to_bus(p); | ||
900 | } | 897 | } |
901 | } | 898 | } |
902 | 899 | ||
@@ -908,7 +905,7 @@ static int i2o_cfg_passthru(unsigned long arg) | |||
908 | } | 905 | } |
909 | 906 | ||
910 | if (sg_offset) { | 907 | if (sg_offset) { |
911 | u32 rmsg[128]; | 908 | u32 rmsg[I2O_OUTBOUND_MSG_FRAME_SIZE]; |
912 | /* Copy back the Scatter Gather buffers back to user space */ | 909 | /* Copy back the Scatter Gather buffers back to user space */ |
913 | u32 j; | 910 | u32 j; |
914 | // TODO 64bit fix | 911 | // TODO 64bit fix |
@@ -942,11 +939,11 @@ static int i2o_cfg_passthru(unsigned long arg) | |||
942 | sg_size = sg[j].flag_count & 0xffffff; | 939 | sg_size = sg[j].flag_count & 0xffffff; |
943 | // TODO 64bit fix | 940 | // TODO 64bit fix |
944 | if (copy_to_user | 941 | if (copy_to_user |
945 | ((void __user *)sg[j].addr_bus, sg_list[j], | 942 | ((void __user *)sg[j].addr_bus, sg_list[j].virt, |
946 | sg_size)) { | 943 | sg_size)) { |
947 | printk(KERN_WARNING | 944 | printk(KERN_WARNING |
948 | "%s: Could not copy %p TO user %x\n", | 945 | "%s: Could not copy %p TO user %x\n", |
949 | c->name, sg_list[j], | 946 | c->name, sg_list[j].virt, |
950 | sg[j].addr_bus); | 947 | sg[j].addr_bus); |
951 | rcode = -EFAULT; | 948 | rcode = -EFAULT; |
952 | goto sg_list_cleanup; | 949 | goto sg_list_cleanup; |
@@ -973,7 +970,7 @@ sg_list_cleanup: | |||
973 | } | 970 | } |
974 | 971 | ||
975 | for (i = 0; i < sg_index; i++) | 972 | for (i = 0; i < sg_index; i++) |
976 | kfree(sg_list[i]); | 973 | i2o_dma_free(&c->pdev->dev, &sg_list[i]); |
977 | 974 | ||
978 | cleanup: | 975 | cleanup: |
979 | kfree(reply); | 976 | kfree(reply); |
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c index da715e11c1b2..be2b5926d26c 100644 --- a/drivers/message/i2o/iop.c +++ b/drivers/message/i2o/iop.c | |||
@@ -1004,7 +1004,7 @@ static int i2o_hrt_get(struct i2o_controller *c) | |||
1004 | 1004 | ||
1005 | size = hrt->num_entries * hrt->entry_len << 2; | 1005 | size = hrt->num_entries * hrt->entry_len << 2; |
1006 | if (size > c->hrt.len) { | 1006 | if (size > c->hrt.len) { |
1007 | if (i2o_dma_realloc(dev, &c->hrt, size, GFP_KERNEL)) | 1007 | if (i2o_dma_realloc(dev, &c->hrt, size)) |
1008 | return -ENOMEM; | 1008 | return -ENOMEM; |
1009 | else | 1009 | else |
1010 | hrt = c->hrt.virt; | 1010 | hrt = c->hrt.virt; |
diff --git a/drivers/message/i2o/memory.c b/drivers/message/i2o/memory.c new file mode 100644 index 000000000000..f5cc95c564e2 --- /dev/null +++ b/drivers/message/i2o/memory.c | |||
@@ -0,0 +1,313 @@ | |||
1 | /* | ||
2 | * Functions to handle I2O memory | ||
3 | * | ||
4 | * Pulled from the inlines in i2o headers and uninlined | ||
5 | * | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License as published by the | ||
9 | * Free Software Foundation; either version 2 of the License, or (at your | ||
10 | * option) any later version. | ||
11 | */ | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | #include <linux/i2o.h> | ||
15 | #include <linux/delay.h> | ||
16 | #include <linux/string.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include "core.h" | ||
19 | |||
20 | /* Protects our 32/64bit mask switching */ | ||
21 | static DEFINE_MUTEX(mem_lock); | ||
22 | |||
23 | /** | ||
24 | * i2o_sg_tablesize - Calculate the maximum number of elements in a SGL | ||
25 | * @c: I2O controller for which the calculation should be done | ||
26 | * @body_size: maximum body size used for message in 32-bit words. | ||
27 | * | ||
28 | * Return the maximum number of SG elements in a SG list. | ||
29 | */ | ||
30 | u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size) | ||
31 | { | ||
32 | i2o_status_block *sb = c->status_block.virt; | ||
33 | u16 sg_count = | ||
34 | (sb->inbound_frame_size - sizeof(struct i2o_message) / 4) - | ||
35 | body_size; | ||
36 | |||
37 | if (c->pae_support) { | ||
38 | /* | ||
39 | * for 64-bit a SG attribute element must be added and each | ||
40 | * SG element needs 12 bytes instead of 8. | ||
41 | */ | ||
42 | sg_count -= 2; | ||
43 | sg_count /= 3; | ||
44 | } else | ||
45 | sg_count /= 2; | ||
46 | |||
47 | if (c->short_req && (sg_count > 8)) | ||
48 | sg_count = 8; | ||
49 | |||
50 | return sg_count; | ||
51 | } | ||
52 | EXPORT_SYMBOL_GPL(i2o_sg_tablesize); | ||
53 | |||
54 | |||
55 | /** | ||
56 | * i2o_dma_map_single - Map pointer to controller and fill in I2O message. | ||
57 | * @c: I2O controller | ||
58 | * @ptr: pointer to the data which should be mapped | ||
59 | * @size: size of data in bytes | ||
60 | * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE | ||
61 | * @sg_ptr: pointer to the SG list inside the I2O message | ||
62 | * | ||
63 | * This function does all necessary DMA handling and also writes the I2O | ||
64 | * SGL elements into the I2O message. For details on DMA handling see also | ||
65 | * dma_map_single(). The pointer sg_ptr will only be set to the end of the | ||
66 | * SG list if the allocation was successful. | ||
67 | * | ||
68 | * Returns DMA address which must be checked for failures using | ||
69 | * dma_mapping_error(). | ||
70 | */ | ||
71 | dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr, | ||
72 | size_t size, | ||
73 | enum dma_data_direction direction, | ||
74 | u32 ** sg_ptr) | ||
75 | { | ||
76 | u32 sg_flags; | ||
77 | u32 *mptr = *sg_ptr; | ||
78 | dma_addr_t dma_addr; | ||
79 | |||
80 | switch (direction) { | ||
81 | case DMA_TO_DEVICE: | ||
82 | sg_flags = 0xd4000000; | ||
83 | break; | ||
84 | case DMA_FROM_DEVICE: | ||
85 | sg_flags = 0xd0000000; | ||
86 | break; | ||
87 | default: | ||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction); | ||
92 | if (!dma_mapping_error(&c->pdev->dev, dma_addr)) { | ||
93 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | ||
94 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) { | ||
95 | *mptr++ = cpu_to_le32(0x7C020002); | ||
96 | *mptr++ = cpu_to_le32(PAGE_SIZE); | ||
97 | } | ||
98 | #endif | ||
99 | |||
100 | *mptr++ = cpu_to_le32(sg_flags | size); | ||
101 | *mptr++ = cpu_to_le32(i2o_dma_low(dma_addr)); | ||
102 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | ||
103 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) | ||
104 | *mptr++ = cpu_to_le32(i2o_dma_high(dma_addr)); | ||
105 | #endif | ||
106 | *sg_ptr = mptr; | ||
107 | } | ||
108 | return dma_addr; | ||
109 | } | ||
110 | EXPORT_SYMBOL_GPL(i2o_dma_map_single); | ||
111 | |||
112 | /** | ||
113 | * i2o_dma_map_sg - Map a SG List to controller and fill in I2O message. | ||
114 | * @c: I2O controller | ||
115 | * @sg: SG list to be mapped | ||
116 | * @sg_count: number of elements in the SG list | ||
117 | * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE | ||
118 | * @sg_ptr: pointer to the SG list inside the I2O message | ||
119 | * | ||
120 | * This function does all necessary DMA handling and also writes the I2O | ||
121 | * SGL elements into the I2O message. For details on DMA handling see also | ||
122 | * dma_map_sg(). The pointer sg_ptr will only be set to the end of the SG | ||
123 | * list if the allocation was successful. | ||
124 | * | ||
125 | * Returns 0 on failure or 1 on success. | ||
126 | */ | ||
127 | int i2o_dma_map_sg(struct i2o_controller *c, struct scatterlist *sg, | ||
128 | int sg_count, enum dma_data_direction direction, u32 ** sg_ptr) | ||
129 | { | ||
130 | u32 sg_flags; | ||
131 | u32 *mptr = *sg_ptr; | ||
132 | |||
133 | switch (direction) { | ||
134 | case DMA_TO_DEVICE: | ||
135 | sg_flags = 0x14000000; | ||
136 | break; | ||
137 | case DMA_FROM_DEVICE: | ||
138 | sg_flags = 0x10000000; | ||
139 | break; | ||
140 | default: | ||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | sg_count = dma_map_sg(&c->pdev->dev, sg, sg_count, direction); | ||
145 | if (!sg_count) | ||
146 | return 0; | ||
147 | |||
148 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | ||
149 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) { | ||
150 | *mptr++ = cpu_to_le32(0x7C020002); | ||
151 | *mptr++ = cpu_to_le32(PAGE_SIZE); | ||
152 | } | ||
153 | #endif | ||
154 | |||
155 | while (sg_count-- > 0) { | ||
156 | if (!sg_count) | ||
157 | sg_flags |= 0xC0000000; | ||
158 | *mptr++ = cpu_to_le32(sg_flags | sg_dma_len(sg)); | ||
159 | *mptr++ = cpu_to_le32(i2o_dma_low(sg_dma_address(sg))); | ||
160 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | ||
161 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) | ||
162 | *mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg))); | ||
163 | #endif | ||
164 | sg = sg_next(sg); | ||
165 | } | ||
166 | *sg_ptr = mptr; | ||
167 | |||
168 | return 1; | ||
169 | } | ||
170 | EXPORT_SYMBOL_GPL(i2o_dma_map_sg); | ||
171 | |||
172 | /** | ||
173 | * i2o_dma_alloc - Allocate DMA memory | ||
174 | * @dev: struct device pointer to the PCI device of the I2O controller | ||
175 | * @addr: i2o_dma struct which should get the DMA buffer | ||
176 | * @len: length of the new DMA memory | ||
177 | * | ||
178 | * Allocate a coherent DMA memory and write the pointers into addr. | ||
179 | * | ||
180 | * Returns 0 on success or -ENOMEM on failure. | ||
181 | */ | ||
182 | int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, size_t len) | ||
183 | { | ||
184 | struct pci_dev *pdev = to_pci_dev(dev); | ||
185 | int dma_64 = 0; | ||
186 | |||
187 | mutex_lock(&mem_lock); | ||
188 | if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_64BIT_MASK)) { | ||
189 | dma_64 = 1; | ||
190 | if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { | ||
191 | mutex_unlock(&mem_lock); | ||
192 | return -ENOMEM; | ||
193 | } | ||
194 | } | ||
195 | |||
196 | addr->virt = dma_alloc_coherent(dev, len, &addr->phys, GFP_KERNEL); | ||
197 | |||
198 | if ((sizeof(dma_addr_t) > 4) && dma_64) | ||
199 | if (pci_set_dma_mask(pdev, DMA_64BIT_MASK)) | ||
200 | printk(KERN_WARNING "i2o: unable to set 64-bit DMA"); | ||
201 | mutex_unlock(&mem_lock); | ||
202 | |||
203 | if (!addr->virt) | ||
204 | return -ENOMEM; | ||
205 | |||
206 | memset(addr->virt, 0, len); | ||
207 | addr->len = len; | ||
208 | |||
209 | return 0; | ||
210 | } | ||
211 | EXPORT_SYMBOL_GPL(i2o_dma_alloc); | ||
212 | |||
213 | |||
214 | /** | ||
215 | * i2o_dma_free - Free DMA memory | ||
216 | * @dev: struct device pointer to the PCI device of the I2O controller | ||
217 | * @addr: i2o_dma struct which contains the DMA buffer | ||
218 | * | ||
219 | * Free a coherent DMA memory and set virtual address of addr to NULL. | ||
220 | */ | ||
221 | void i2o_dma_free(struct device *dev, struct i2o_dma *addr) | ||
222 | { | ||
223 | if (addr->virt) { | ||
224 | if (addr->phys) | ||
225 | dma_free_coherent(dev, addr->len, addr->virt, | ||
226 | addr->phys); | ||
227 | else | ||
228 | kfree(addr->virt); | ||
229 | addr->virt = NULL; | ||
230 | } | ||
231 | } | ||
232 | EXPORT_SYMBOL_GPL(i2o_dma_free); | ||
233 | |||
234 | |||
235 | /** | ||
236 | * i2o_dma_realloc - Realloc DMA memory | ||
237 | * @dev: struct device pointer to the PCI device of the I2O controller | ||
238 | * @addr: pointer to a i2o_dma struct DMA buffer | ||
239 | * @len: new length of memory | ||
240 | * | ||
241 | * If there was something allocated in the addr, free it first. If len > 0 | ||
242 | * than try to allocate it and write the addresses back to the addr | ||
243 | * structure. If len == 0 set the virtual address to NULL. | ||
244 | * | ||
245 | * Returns the 0 on success or negative error code on failure. | ||
246 | */ | ||
247 | int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr, size_t len) | ||
248 | { | ||
249 | i2o_dma_free(dev, addr); | ||
250 | |||
251 | if (len) | ||
252 | return i2o_dma_alloc(dev, addr, len); | ||
253 | |||
254 | return 0; | ||
255 | } | ||
256 | EXPORT_SYMBOL_GPL(i2o_dma_realloc); | ||
257 | |||
258 | /* | ||
259 | * i2o_pool_alloc - Allocate an slab cache and mempool | ||
260 | * @mempool: pointer to struct i2o_pool to write data into. | ||
261 | * @name: name which is used to identify cache | ||
262 | * @size: size of each object | ||
263 | * @min_nr: minimum number of objects | ||
264 | * | ||
265 | * First allocates a slab cache with name and size. Then allocates a | ||
266 | * mempool which uses the slab cache for allocation and freeing. | ||
267 | * | ||
268 | * Returns 0 on success or negative error code on failure. | ||
269 | */ | ||
270 | int i2o_pool_alloc(struct i2o_pool *pool, const char *name, | ||
271 | size_t size, int min_nr) | ||
272 | { | ||
273 | pool->name = kmalloc(strlen(name) + 1, GFP_KERNEL); | ||
274 | if (!pool->name) | ||
275 | goto exit; | ||
276 | strcpy(pool->name, name); | ||
277 | |||
278 | pool->slab = | ||
279 | kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL); | ||
280 | if (!pool->slab) | ||
281 | goto free_name; | ||
282 | |||
283 | pool->mempool = mempool_create_slab_pool(min_nr, pool->slab); | ||
284 | if (!pool->mempool) | ||
285 | goto free_slab; | ||
286 | |||
287 | return 0; | ||
288 | |||
289 | free_slab: | ||
290 | kmem_cache_destroy(pool->slab); | ||
291 | |||
292 | free_name: | ||
293 | kfree(pool->name); | ||
294 | |||
295 | exit: | ||
296 | return -ENOMEM; | ||
297 | } | ||
298 | EXPORT_SYMBOL_GPL(i2o_pool_alloc); | ||
299 | |||
300 | /* | ||
301 | * i2o_pool_free - Free slab cache and mempool again | ||
302 | * @mempool: pointer to struct i2o_pool which should be freed | ||
303 | * | ||
304 | * Note that you have to return all objects to the mempool again before | ||
305 | * calling i2o_pool_free(). | ||
306 | */ | ||
307 | void i2o_pool_free(struct i2o_pool *pool) | ||
308 | { | ||
309 | mempool_destroy(pool->mempool); | ||
310 | kmem_cache_destroy(pool->slab); | ||
311 | kfree(pool->name); | ||
312 | }; | ||
313 | EXPORT_SYMBOL_GPL(i2o_pool_free); | ||
diff --git a/drivers/message/i2o/pci.c b/drivers/message/i2o/pci.c index 685a89547a51..610ef1204e68 100644 --- a/drivers/message/i2o/pci.c +++ b/drivers/message/i2o/pci.c | |||
@@ -186,31 +186,29 @@ static int __devinit i2o_pci_alloc(struct i2o_controller *c) | |||
186 | } | 186 | } |
187 | } | 187 | } |
188 | 188 | ||
189 | if (i2o_dma_alloc(dev, &c->status, 8, GFP_KERNEL)) { | 189 | if (i2o_dma_alloc(dev, &c->status, 8)) { |
190 | i2o_pci_free(c); | 190 | i2o_pci_free(c); |
191 | return -ENOMEM; | 191 | return -ENOMEM; |
192 | } | 192 | } |
193 | 193 | ||
194 | if (i2o_dma_alloc(dev, &c->hrt, sizeof(i2o_hrt), GFP_KERNEL)) { | 194 | if (i2o_dma_alloc(dev, &c->hrt, sizeof(i2o_hrt))) { |
195 | i2o_pci_free(c); | 195 | i2o_pci_free(c); |
196 | return -ENOMEM; | 196 | return -ENOMEM; |
197 | } | 197 | } |
198 | 198 | ||
199 | if (i2o_dma_alloc(dev, &c->dlct, 8192, GFP_KERNEL)) { | 199 | if (i2o_dma_alloc(dev, &c->dlct, 8192)) { |
200 | i2o_pci_free(c); | 200 | i2o_pci_free(c); |
201 | return -ENOMEM; | 201 | return -ENOMEM; |
202 | } | 202 | } |
203 | 203 | ||
204 | if (i2o_dma_alloc(dev, &c->status_block, sizeof(i2o_status_block), | 204 | if (i2o_dma_alloc(dev, &c->status_block, sizeof(i2o_status_block))) { |
205 | GFP_KERNEL)) { | ||
206 | i2o_pci_free(c); | 205 | i2o_pci_free(c); |
207 | return -ENOMEM; | 206 | return -ENOMEM; |
208 | } | 207 | } |
209 | 208 | ||
210 | if (i2o_dma_alloc | 209 | if (i2o_dma_alloc(dev, &c->out_queue, |
211 | (dev, &c->out_queue, | 210 | I2O_MAX_OUTBOUND_MSG_FRAMES * I2O_OUTBOUND_MSG_FRAME_SIZE * |
212 | I2O_MAX_OUTBOUND_MSG_FRAMES * I2O_OUTBOUND_MSG_FRAME_SIZE * | 211 | sizeof(u32))) { |
213 | sizeof(u32), GFP_KERNEL)) { | ||
214 | i2o_pci_free(c); | 212 | i2o_pci_free(c); |
215 | return -ENOMEM; | 213 | return -ENOMEM; |
216 | } | 214 | } |
diff --git a/include/linux/i2o.h b/include/linux/i2o.h index 75ae6d8aba4f..4c4e57d1f19d 100644 --- a/include/linux/i2o.h +++ b/include/linux/i2o.h | |||
@@ -570,7 +570,6 @@ struct i2o_controller { | |||
570 | #endif | 570 | #endif |
571 | spinlock_t lock; /* lock for controller | 571 | spinlock_t lock; /* lock for controller |
572 | configuration */ | 572 | configuration */ |
573 | |||
574 | void *driver_data[I2O_MAX_DRIVERS]; /* storage for drivers */ | 573 | void *driver_data[I2O_MAX_DRIVERS]; /* storage for drivers */ |
575 | }; | 574 | }; |
576 | 575 | ||
@@ -691,289 +690,22 @@ static inline u32 i2o_dma_high(dma_addr_t dma_addr) | |||
691 | }; | 690 | }; |
692 | #endif | 691 | #endif |
693 | 692 | ||
694 | /** | 693 | extern u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size); |
695 | * i2o_sg_tablesize - Calculate the maximum number of elements in a SGL | 694 | extern dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr, |
696 | * @c: I2O controller for which the calculation should be done | ||
697 | * @body_size: maximum body size used for message in 32-bit words. | ||
698 | * | ||
699 | * Return the maximum number of SG elements in a SG list. | ||
700 | */ | ||
701 | static inline u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size) | ||
702 | { | ||
703 | i2o_status_block *sb = c->status_block.virt; | ||
704 | u16 sg_count = | ||
705 | (sb->inbound_frame_size - sizeof(struct i2o_message) / 4) - | ||
706 | body_size; | ||
707 | |||
708 | if (c->pae_support) { | ||
709 | /* | ||
710 | * for 64-bit a SG attribute element must be added and each | ||
711 | * SG element needs 12 bytes instead of 8. | ||
712 | */ | ||
713 | sg_count -= 2; | ||
714 | sg_count /= 3; | ||
715 | } else | ||
716 | sg_count /= 2; | ||
717 | |||
718 | if (c->short_req && (sg_count > 8)) | ||
719 | sg_count = 8; | ||
720 | |||
721 | return sg_count; | ||
722 | }; | ||
723 | |||
724 | /** | ||
725 | * i2o_dma_map_single - Map pointer to controller and fill in I2O message. | ||
726 | * @c: I2O controller | ||
727 | * @ptr: pointer to the data which should be mapped | ||
728 | * @size: size of data in bytes | ||
729 | * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE | ||
730 | * @sg_ptr: pointer to the SG list inside the I2O message | ||
731 | * | ||
732 | * This function does all necessary DMA handling and also writes the I2O | ||
733 | * SGL elements into the I2O message. For details on DMA handling see also | ||
734 | * dma_map_single(). The pointer sg_ptr will only be set to the end of the | ||
735 | * SG list if the allocation was successful. | ||
736 | * | ||
737 | * Returns DMA address which must be checked for failures using | ||
738 | * dma_mapping_error(). | ||
739 | */ | ||
740 | static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr, | ||
741 | size_t size, | 695 | size_t size, |
742 | enum dma_data_direction direction, | 696 | enum dma_data_direction direction, |
743 | u32 ** sg_ptr) | 697 | u32 ** sg_ptr); |
744 | { | 698 | extern int i2o_dma_map_sg(struct i2o_controller *c, |
745 | u32 sg_flags; | ||
746 | u32 *mptr = *sg_ptr; | ||
747 | dma_addr_t dma_addr; | ||
748 | |||
749 | switch (direction) { | ||
750 | case DMA_TO_DEVICE: | ||
751 | sg_flags = 0xd4000000; | ||
752 | break; | ||
753 | case DMA_FROM_DEVICE: | ||
754 | sg_flags = 0xd0000000; | ||
755 | break; | ||
756 | default: | ||
757 | return 0; | ||
758 | } | ||
759 | |||
760 | dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction); | ||
761 | if (!dma_mapping_error(&c->pdev->dev, dma_addr)) { | ||
762 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | ||
763 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) { | ||
764 | *mptr++ = cpu_to_le32(0x7C020002); | ||
765 | *mptr++ = cpu_to_le32(PAGE_SIZE); | ||
766 | } | ||
767 | #endif | ||
768 | |||
769 | *mptr++ = cpu_to_le32(sg_flags | size); | ||
770 | *mptr++ = cpu_to_le32(i2o_dma_low(dma_addr)); | ||
771 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | ||
772 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) | ||
773 | *mptr++ = cpu_to_le32(i2o_dma_high(dma_addr)); | ||
774 | #endif | ||
775 | *sg_ptr = mptr; | ||
776 | } | ||
777 | return dma_addr; | ||
778 | }; | ||
779 | |||
780 | /** | ||
781 | * i2o_dma_map_sg - Map a SG List to controller and fill in I2O message. | ||
782 | * @c: I2O controller | ||
783 | * @sg: SG list to be mapped | ||
784 | * @sg_count: number of elements in the SG list | ||
785 | * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE | ||
786 | * @sg_ptr: pointer to the SG list inside the I2O message | ||
787 | * | ||
788 | * This function does all necessary DMA handling and also writes the I2O | ||
789 | * SGL elements into the I2O message. For details on DMA handling see also | ||
790 | * dma_map_sg(). The pointer sg_ptr will only be set to the end of the SG | ||
791 | * list if the allocation was successful. | ||
792 | * | ||
793 | * Returns 0 on failure or 1 on success. | ||
794 | */ | ||
795 | static inline int i2o_dma_map_sg(struct i2o_controller *c, | ||
796 | struct scatterlist *sg, int sg_count, | 699 | struct scatterlist *sg, int sg_count, |
797 | enum dma_data_direction direction, | 700 | enum dma_data_direction direction, |
798 | u32 ** sg_ptr) | 701 | u32 ** sg_ptr); |
799 | { | 702 | extern int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, size_t len); |
800 | u32 sg_flags; | 703 | extern void i2o_dma_free(struct device *dev, struct i2o_dma *addr); |
801 | u32 *mptr = *sg_ptr; | 704 | extern int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr, |
802 | 705 | size_t len); | |
803 | switch (direction) { | 706 | extern int i2o_pool_alloc(struct i2o_pool *pool, const char *name, |
804 | case DMA_TO_DEVICE: | 707 | size_t size, int min_nr); |
805 | sg_flags = 0x14000000; | 708 | extern void i2o_pool_free(struct i2o_pool *pool); |
806 | break; | ||
807 | case DMA_FROM_DEVICE: | ||
808 | sg_flags = 0x10000000; | ||
809 | break; | ||
810 | default: | ||
811 | return 0; | ||
812 | } | ||
813 | |||
814 | sg_count = dma_map_sg(&c->pdev->dev, sg, sg_count, direction); | ||
815 | if (!sg_count) | ||
816 | return 0; | ||
817 | |||
818 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | ||
819 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) { | ||
820 | *mptr++ = cpu_to_le32(0x7C020002); | ||
821 | *mptr++ = cpu_to_le32(PAGE_SIZE); | ||
822 | } | ||
823 | #endif | ||
824 | |||
825 | while (sg_count-- > 0) { | ||
826 | if (!sg_count) | ||
827 | sg_flags |= 0xC0000000; | ||
828 | *mptr++ = cpu_to_le32(sg_flags | sg_dma_len(sg)); | ||
829 | *mptr++ = cpu_to_le32(i2o_dma_low(sg_dma_address(sg))); | ||
830 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | ||
831 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) | ||
832 | *mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg))); | ||
833 | #endif | ||
834 | sg = sg_next(sg); | ||
835 | } | ||
836 | *sg_ptr = mptr; | ||
837 | |||
838 | return 1; | ||
839 | }; | ||
840 | |||
841 | /** | ||
842 | * i2o_dma_alloc - Allocate DMA memory | ||
843 | * @dev: struct device pointer to the PCI device of the I2O controller | ||
844 | * @addr: i2o_dma struct which should get the DMA buffer | ||
845 | * @len: length of the new DMA memory | ||
846 | * @gfp_mask: GFP mask | ||
847 | * | ||
848 | * Allocate a coherent DMA memory and write the pointers into addr. | ||
849 | * | ||
850 | * Returns 0 on success or -ENOMEM on failure. | ||
851 | */ | ||
852 | static inline int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, | ||
853 | size_t len, gfp_t gfp_mask) | ||
854 | { | ||
855 | struct pci_dev *pdev = to_pci_dev(dev); | ||
856 | int dma_64 = 0; | ||
857 | |||
858 | if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_64BIT_MASK)) { | ||
859 | dma_64 = 1; | ||
860 | if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) | ||
861 | return -ENOMEM; | ||
862 | } | ||
863 | |||
864 | addr->virt = dma_alloc_coherent(dev, len, &addr->phys, gfp_mask); | ||
865 | |||
866 | if ((sizeof(dma_addr_t) > 4) && dma_64) | ||
867 | if (pci_set_dma_mask(pdev, DMA_64BIT_MASK)) | ||
868 | printk(KERN_WARNING "i2o: unable to set 64-bit DMA"); | ||
869 | |||
870 | if (!addr->virt) | ||
871 | return -ENOMEM; | ||
872 | |||
873 | memset(addr->virt, 0, len); | ||
874 | addr->len = len; | ||
875 | |||
876 | return 0; | ||
877 | }; | ||
878 | |||
879 | /** | ||
880 | * i2o_dma_free - Free DMA memory | ||
881 | * @dev: struct device pointer to the PCI device of the I2O controller | ||
882 | * @addr: i2o_dma struct which contains the DMA buffer | ||
883 | * | ||
884 | * Free a coherent DMA memory and set virtual address of addr to NULL. | ||
885 | */ | ||
886 | static inline void i2o_dma_free(struct device *dev, struct i2o_dma *addr) | ||
887 | { | ||
888 | if (addr->virt) { | ||
889 | if (addr->phys) | ||
890 | dma_free_coherent(dev, addr->len, addr->virt, | ||
891 | addr->phys); | ||
892 | else | ||
893 | kfree(addr->virt); | ||
894 | addr->virt = NULL; | ||
895 | } | ||
896 | }; | ||
897 | |||
898 | /** | ||
899 | * i2o_dma_realloc - Realloc DMA memory | ||
900 | * @dev: struct device pointer to the PCI device of the I2O controller | ||
901 | * @addr: pointer to a i2o_dma struct DMA buffer | ||
902 | * @len: new length of memory | ||
903 | * @gfp_mask: GFP mask | ||
904 | * | ||
905 | * If there was something allocated in the addr, free it first. If len > 0 | ||
906 | * than try to allocate it and write the addresses back to the addr | ||
907 | * structure. If len == 0 set the virtual address to NULL. | ||
908 | * | ||
909 | * Returns the 0 on success or negative error code on failure. | ||
910 | */ | ||
911 | static inline int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr, | ||
912 | size_t len, gfp_t gfp_mask) | ||
913 | { | ||
914 | i2o_dma_free(dev, addr); | ||
915 | |||
916 | if (len) | ||
917 | return i2o_dma_alloc(dev, addr, len, gfp_mask); | ||
918 | |||
919 | return 0; | ||
920 | }; | ||
921 | |||
922 | /* | ||
923 | * i2o_pool_alloc - Allocate an slab cache and mempool | ||
924 | * @mempool: pointer to struct i2o_pool to write data into. | ||
925 | * @name: name which is used to identify cache | ||
926 | * @size: size of each object | ||
927 | * @min_nr: minimum number of objects | ||
928 | * | ||
929 | * First allocates a slab cache with name and size. Then allocates a | ||
930 | * mempool which uses the slab cache for allocation and freeing. | ||
931 | * | ||
932 | * Returns 0 on success or negative error code on failure. | ||
933 | */ | ||
934 | static inline int i2o_pool_alloc(struct i2o_pool *pool, const char *name, | ||
935 | size_t size, int min_nr) | ||
936 | { | ||
937 | pool->name = kmalloc(strlen(name) + 1, GFP_KERNEL); | ||
938 | if (!pool->name) | ||
939 | goto exit; | ||
940 | strcpy(pool->name, name); | ||
941 | |||
942 | pool->slab = | ||
943 | kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL); | ||
944 | if (!pool->slab) | ||
945 | goto free_name; | ||
946 | |||
947 | pool->mempool = mempool_create_slab_pool(min_nr, pool->slab); | ||
948 | if (!pool->mempool) | ||
949 | goto free_slab; | ||
950 | |||
951 | return 0; | ||
952 | |||
953 | free_slab: | ||
954 | kmem_cache_destroy(pool->slab); | ||
955 | |||
956 | free_name: | ||
957 | kfree(pool->name); | ||
958 | |||
959 | exit: | ||
960 | return -ENOMEM; | ||
961 | }; | ||
962 | |||
963 | /* | ||
964 | * i2o_pool_free - Free slab cache and mempool again | ||
965 | * @mempool: pointer to struct i2o_pool which should be freed | ||
966 | * | ||
967 | * Note that you have to return all objects to the mempool again before | ||
968 | * calling i2o_pool_free(). | ||
969 | */ | ||
970 | static inline void i2o_pool_free(struct i2o_pool *pool) | ||
971 | { | ||
972 | mempool_destroy(pool->mempool); | ||
973 | kmem_cache_destroy(pool->slab); | ||
974 | kfree(pool->name); | ||
975 | }; | ||
976 | |||
977 | /* I2O driver (OSM) functions */ | 709 | /* I2O driver (OSM) functions */ |
978 | extern int i2o_driver_register(struct i2o_driver *); | 710 | extern int i2o_driver_register(struct i2o_driver *); |
979 | extern void i2o_driver_unregister(struct i2o_driver *); | 711 | extern void i2o_driver_unregister(struct i2o_driver *); |