diff options
60 files changed, 1113 insertions, 1453 deletions
diff --git a/Documentation/crypto/async-tx-api.txt b/Documentation/crypto/async-tx-api.txt index c1e9545c59bd..9f59fcbf5d82 100644 --- a/Documentation/crypto/async-tx-api.txt +++ b/Documentation/crypto/async-tx-api.txt | |||
@@ -13,9 +13,9 @@ | |||
13 | 3.6 Constraints | 13 | 3.6 Constraints |
14 | 3.7 Example | 14 | 3.7 Example |
15 | 15 | ||
16 | 4 DRIVER DEVELOPER NOTES | 16 | 4 DMAENGINE DRIVER DEVELOPER NOTES |
17 | 4.1 Conformance points | 17 | 4.1 Conformance points |
18 | 4.2 "My application needs finer control of hardware channels" | 18 | 4.2 "My application needs exclusive control of hardware channels" |
19 | 19 | ||
20 | 5 SOURCE | 20 | 5 SOURCE |
21 | 21 | ||
@@ -150,6 +150,7 @@ ops_run_* and ops_complete_* routines in drivers/md/raid5.c for more | |||
150 | implementation examples. | 150 | implementation examples. |
151 | 151 | ||
152 | 4 DRIVER DEVELOPMENT NOTES | 152 | 4 DRIVER DEVELOPMENT NOTES |
153 | |||
153 | 4.1 Conformance points: | 154 | 4.1 Conformance points: |
154 | There are a few conformance points required in dmaengine drivers to | 155 | There are a few conformance points required in dmaengine drivers to |
155 | accommodate assumptions made by applications using the async_tx API: | 156 | accommodate assumptions made by applications using the async_tx API: |
@@ -158,58 +159,49 @@ accommodate assumptions made by applications using the async_tx API: | |||
158 | 3/ Use async_tx_run_dependencies() in the descriptor clean up path to | 159 | 3/ Use async_tx_run_dependencies() in the descriptor clean up path to |
159 | handle submission of dependent operations | 160 | handle submission of dependent operations |
160 | 161 | ||
161 | 4.2 "My application needs finer control of hardware channels" | 162 | 4.2 "My application needs exclusive control of hardware channels" |
162 | This requirement seems to arise from cases where a DMA engine driver is | 163 | Primarily this requirement arises from cases where a DMA engine driver |
163 | trying to support device-to-memory DMA. The dmaengine and async_tx | 164 | is being used to support device-to-memory operations. A channel that is |
164 | implementations were designed for offloading memory-to-memory | 165 | performing these operations cannot, for many platform specific reasons, |
165 | operations; however, there are some capabilities of the dmaengine layer | 166 | be shared. For these cases the dma_request_channel() interface is |
166 | that can be used for platform-specific channel management. | 167 | provided. |
167 | Platform-specific constraints can be handled by registering the | 168 | |
168 | application as a 'dma_client' and implementing a 'dma_event_callback' to | 169 | The interface is: |
169 | apply a filter to the available channels in the system. Before showing | 170 | struct dma_chan *dma_request_channel(dma_cap_mask_t mask, |
170 | how to implement a custom dma_event callback some background of | 171 | dma_filter_fn filter_fn, |
171 | dmaengine's client support is required. | 172 | void *filter_param); |
172 | 173 | ||
173 | The following routines in dmaengine support multiple clients requesting | 174 | Where dma_filter_fn is defined as: |
174 | use of a channel: | 175 | typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); |
175 | - dma_async_client_register(struct dma_client *client) | 176 | |
176 | - dma_async_client_chan_request(struct dma_client *client) | 177 | When the optional 'filter_fn' parameter is set to NULL |
177 | 178 | dma_request_channel simply returns the first channel that satisfies the | |
178 | dma_async_client_register takes a pointer to an initialized dma_client | 179 | capability mask. Otherwise, when the mask parameter is insufficient for |
179 | structure. It expects that the 'event_callback' and 'cap_mask' fields | 180 | specifying the necessary channel, the filter_fn routine can be used to |
180 | are already initialized. | 181 | disposition the available channels in the system. The filter_fn routine |
181 | 182 | is called once for each free channel in the system. Upon seeing a | |
182 | dma_async_client_chan_request triggers dmaengine to notify the client of | 183 | suitable channel filter_fn returns DMA_ACK which flags that channel to |
183 | all channels that satisfy the capability mask. It is up to the client's | 184 | be the return value from dma_request_channel. A channel allocated via |
184 | event_callback routine to track how many channels the client needs and | 185 | this interface is exclusive to the caller, until dma_release_channel() |
185 | how many it is currently using. The dma_event_callback routine returns a | 186 | is called. |
186 | dma_state_client code to let dmaengine know the status of the | 187 | |
187 | allocation. | 188 | The DMA_PRIVATE capability flag is used to tag dma devices that should |
188 | 189 | not be used by the general-purpose allocator. It can be set at | |
189 | Below is the example of how to extend this functionality for | 190 | initialization time if it is known that a channel will always be |
190 | platform-specific filtering of the available channels beyond the | 191 | private. Alternatively, it is set when dma_request_channel() finds an |
191 | standard capability mask: | 192 | unused "public" channel. |
192 | 193 | ||
193 | static enum dma_state_client | 194 | A couple caveats to note when implementing a driver and consumer: |
194 | my_dma_client_callback(struct dma_client *client, | 195 | 1/ Once a channel has been privately allocated it will no longer be |
195 | struct dma_chan *chan, enum dma_state state) | 196 | considered by the general-purpose allocator even after a call to |
196 | { | 197 | dma_release_channel(). |
197 | struct dma_device *dma_dev; | 198 | 2/ Since capabilities are specified at the device level a dma_device |
198 | struct my_platform_specific_dma *plat_dma_dev; | 199 | with multiple channels will either have all channels public, or all |
199 | 200 | channels private. | |
200 | dma_dev = chan->device; | ||
201 | plat_dma_dev = container_of(dma_dev, | ||
202 | struct my_platform_specific_dma, | ||
203 | dma_dev); | ||
204 | |||
205 | if (!plat_dma_dev->platform_specific_capability) | ||
206 | return DMA_DUP; | ||
207 | |||
208 | . . . | ||
209 | } | ||
210 | 201 | ||
211 | 5 SOURCE | 202 | 5 SOURCE |
212 | include/linux/dmaengine.h: core header file for DMA drivers and clients | 203 | |
204 | include/linux/dmaengine.h: core header file for DMA drivers and api users | ||
213 | drivers/dma/dmaengine.c: offload engine channel management routines | 205 | drivers/dma/dmaengine.c: offload engine channel management routines |
214 | drivers/dma/: location for offload engine drivers | 206 | drivers/dma/: location for offload engine drivers |
215 | include/linux/async_tx.h: core header file for the async_tx api | 207 | include/linux/async_tx.h: core header file for the async_tx api |
diff --git a/Documentation/dmaengine.txt b/Documentation/dmaengine.txt new file mode 100644 index 000000000000..0c1c2f63c0a9 --- /dev/null +++ b/Documentation/dmaengine.txt | |||
@@ -0,0 +1 @@ | |||
See Documentation/crypto/async-tx-api.txt | |||
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c index ea7bc1e8562b..3fbfd1e32a9e 100644 --- a/arch/avr32/mach-at32ap/at32ap700x.c +++ b/arch/avr32/mach-at32ap/at32ap700x.c | |||
@@ -1305,7 +1305,7 @@ struct platform_device *__init | |||
1305 | at32_add_device_mci(unsigned int id, struct mci_platform_data *data) | 1305 | at32_add_device_mci(unsigned int id, struct mci_platform_data *data) |
1306 | { | 1306 | { |
1307 | struct platform_device *pdev; | 1307 | struct platform_device *pdev; |
1308 | struct dw_dma_slave *dws; | 1308 | struct dw_dma_slave *dws = &data->dma_slave; |
1309 | u32 pioa_mask; | 1309 | u32 pioa_mask; |
1310 | u32 piob_mask; | 1310 | u32 piob_mask; |
1311 | 1311 | ||
@@ -1324,22 +1324,13 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data) | |||
1324 | ARRAY_SIZE(atmel_mci0_resource))) | 1324 | ARRAY_SIZE(atmel_mci0_resource))) |
1325 | goto fail; | 1325 | goto fail; |
1326 | 1326 | ||
1327 | if (data->dma_slave) | 1327 | dws->dma_dev = &dw_dmac0_device.dev; |
1328 | dws = kmemdup(to_dw_dma_slave(data->dma_slave), | 1328 | dws->reg_width = DW_DMA_SLAVE_WIDTH_32BIT; |
1329 | sizeof(struct dw_dma_slave), GFP_KERNEL); | ||
1330 | else | ||
1331 | dws = kzalloc(sizeof(struct dw_dma_slave), GFP_KERNEL); | ||
1332 | |||
1333 | dws->slave.dev = &pdev->dev; | ||
1334 | dws->slave.dma_dev = &dw_dmac0_device.dev; | ||
1335 | dws->slave.reg_width = DMA_SLAVE_WIDTH_32BIT; | ||
1336 | dws->cfg_hi = (DWC_CFGH_SRC_PER(0) | 1329 | dws->cfg_hi = (DWC_CFGH_SRC_PER(0) |
1337 | | DWC_CFGH_DST_PER(1)); | 1330 | | DWC_CFGH_DST_PER(1)); |
1338 | dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | 1331 | dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL |
1339 | | DWC_CFGL_HS_SRC_POL); | 1332 | | DWC_CFGL_HS_SRC_POL); |
1340 | 1333 | ||
1341 | data->dma_slave = &dws->slave; | ||
1342 | |||
1343 | if (platform_device_add_data(pdev, data, | 1334 | if (platform_device_add_data(pdev, data, |
1344 | sizeof(struct mci_platform_data))) | 1335 | sizeof(struct mci_platform_data))) |
1345 | goto fail; | 1336 | goto fail; |
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 5ddad7bd60ac..0d428278356d 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile | |||
@@ -77,7 +77,7 @@ libs-y += arch/parisc/lib/ `$(CC) -print-libgcc-file-name` | |||
77 | 77 | ||
78 | drivers-$(CONFIG_OPROFILE) += arch/parisc/oprofile/ | 78 | drivers-$(CONFIG_OPROFILE) += arch/parisc/oprofile/ |
79 | 79 | ||
80 | PALO := $(shell if which palo; then : ; \ | 80 | PALO := $(shell if (which palo 2>&1); then : ; \ |
81 | elif [ -x /sbin/palo ]; then echo /sbin/palo; \ | 81 | elif [ -x /sbin/palo ]; then echo /sbin/palo; \ |
82 | fi) | 82 | fi) |
83 | 83 | ||
diff --git a/arch/parisc/include/asm/byteorder.h b/arch/parisc/include/asm/byteorder.h index db148313de5d..83095c5bb379 100644 --- a/arch/parisc/include/asm/byteorder.h +++ b/arch/parisc/include/asm/byteorder.h | |||
@@ -4,9 +4,10 @@ | |||
4 | #include <asm/types.h> | 4 | #include <asm/types.h> |
5 | #include <linux/compiler.h> | 5 | #include <linux/compiler.h> |
6 | 6 | ||
7 | #ifdef __GNUC__ | 7 | #define __BIG_ENDIAN |
8 | #define __SWAB_64_THRU_32__ | ||
8 | 9 | ||
9 | static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 x) | 10 | static inline __attribute_const__ __u16 __arch_swab16(__u16 x) |
10 | { | 11 | { |
11 | __asm__("dep %0, 15, 8, %0\n\t" /* deposit 00ab -> 0bab */ | 12 | __asm__("dep %0, 15, 8, %0\n\t" /* deposit 00ab -> 0bab */ |
12 | "shd %%r0, %0, 8, %0" /* shift 000000ab -> 00ba */ | 13 | "shd %%r0, %0, 8, %0" /* shift 000000ab -> 00ba */ |
@@ -14,8 +15,9 @@ static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 x) | |||
14 | : "0" (x)); | 15 | : "0" (x)); |
15 | return x; | 16 | return x; |
16 | } | 17 | } |
18 | #define __arch_swab16 __arch_swab16 | ||
17 | 19 | ||
18 | static __inline__ __attribute_const__ __u32 ___arch__swab24(__u32 x) | 20 | static inline __attribute_const__ __u32 __arch_swab24(__u32 x) |
19 | { | 21 | { |
20 | __asm__("shd %0, %0, 8, %0\n\t" /* shift xabcxabc -> cxab */ | 22 | __asm__("shd %0, %0, 8, %0\n\t" /* shift xabcxabc -> cxab */ |
21 | "dep %0, 15, 8, %0\n\t" /* deposit cxab -> cbab */ | 23 | "dep %0, 15, 8, %0\n\t" /* deposit cxab -> cbab */ |
@@ -25,7 +27,7 @@ static __inline__ __attribute_const__ __u32 ___arch__swab24(__u32 x) | |||
25 | return x; | 27 | return x; |
26 | } | 28 | } |
27 | 29 | ||
28 | static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x) | 30 | static inline __attribute_const__ __u32 __arch_swab32(__u32 x) |
29 | { | 31 | { |
30 | unsigned int temp; | 32 | unsigned int temp; |
31 | __asm__("shd %0, %0, 16, %1\n\t" /* shift abcdabcd -> cdab */ | 33 | __asm__("shd %0, %0, 16, %1\n\t" /* shift abcdabcd -> cdab */ |
@@ -35,7 +37,7 @@ static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x) | |||
35 | : "0" (x)); | 37 | : "0" (x)); |
36 | return x; | 38 | return x; |
37 | } | 39 | } |
38 | 40 | #define __arch_swab32 __arch_swab32 | |
39 | 41 | ||
40 | #if BITS_PER_LONG > 32 | 42 | #if BITS_PER_LONG > 32 |
41 | /* | 43 | /* |
@@ -48,7 +50,8 @@ static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x) | |||
48 | ** HSHR 67452301 -> *6*4*2*0 into %0 | 50 | ** HSHR 67452301 -> *6*4*2*0 into %0 |
49 | ** OR %0 | %1 -> 76543210 into %0 (all done!) | 51 | ** OR %0 | %1 -> 76543210 into %0 (all done!) |
50 | */ | 52 | */ |
51 | static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x) { | 53 | static inline __attribute_const__ __u64 __arch_swab64(__u64 x) |
54 | { | ||
52 | __u64 temp; | 55 | __u64 temp; |
53 | __asm__("permh,3210 %0, %0\n\t" | 56 | __asm__("permh,3210 %0, %0\n\t" |
54 | "hshl %0, 8, %1\n\t" | 57 | "hshl %0, 8, %1\n\t" |
@@ -58,25 +61,9 @@ static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x) { | |||
58 | : "0" (x)); | 61 | : "0" (x)); |
59 | return x; | 62 | return x; |
60 | } | 63 | } |
61 | #define __arch__swab64(x) ___arch__swab64(x) | 64 | #define __arch_swab64 __arch_swab64 |
62 | #define __BYTEORDER_HAS_U64__ | 65 | #endif /* BITS_PER_LONG > 32 */ |
63 | #elif !defined(__STRICT_ANSI__) | ||
64 | static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x) | ||
65 | { | ||
66 | __u32 t1 = ___arch__swab32((__u32) x); | ||
67 | __u32 t2 = ___arch__swab32((__u32) (x >> 32)); | ||
68 | return (((__u64) t1 << 32) | t2); | ||
69 | } | ||
70 | #define __arch__swab64(x) ___arch__swab64(x) | ||
71 | #define __BYTEORDER_HAS_U64__ | ||
72 | #endif | ||
73 | |||
74 | #define __arch__swab16(x) ___arch__swab16(x) | ||
75 | #define __arch__swab24(x) ___arch__swab24(x) | ||
76 | #define __arch__swab32(x) ___arch__swab32(x) | ||
77 | |||
78 | #endif /* __GNUC__ */ | ||
79 | 66 | ||
80 | #include <linux/byteorder/big_endian.h> | 67 | #include <linux/byteorder.h> |
81 | 68 | ||
82 | #endif /* _PARISC_BYTEORDER_H */ | 69 | #endif /* _PARISC_BYTEORDER_H */ |
diff --git a/arch/parisc/include/asm/checksum.h b/arch/parisc/include/asm/checksum.h index e9639ccc3fce..c84b2fcb18a9 100644 --- a/arch/parisc/include/asm/checksum.h +++ b/arch/parisc/include/asm/checksum.h | |||
@@ -182,7 +182,7 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, | |||
182 | #endif | 182 | #endif |
183 | : "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len) | 183 | : "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len) |
184 | : "0" (sum), "1" (saddr), "2" (daddr), "3" (len), "r" (proto) | 184 | : "0" (sum), "1" (saddr), "2" (daddr), "3" (len), "r" (proto) |
185 | : "r19", "r20", "r21", "r22"); | 185 | : "r19", "r20", "r21", "r22", "memory"); |
186 | return csum_fold(sum); | 186 | return csum_fold(sum); |
187 | } | 187 | } |
188 | 188 | ||
diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h index 55ddb1842107..d3031d1f9d03 100644 --- a/arch/parisc/include/asm/io.h +++ b/arch/parisc/include/asm/io.h | |||
@@ -4,12 +4,6 @@ | |||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <asm/pgtable.h> | 5 | #include <asm/pgtable.h> |
6 | 6 | ||
7 | extern unsigned long parisc_vmerge_boundary; | ||
8 | extern unsigned long parisc_vmerge_max_size; | ||
9 | |||
10 | #define BIO_VMERGE_BOUNDARY parisc_vmerge_boundary | ||
11 | #define BIO_VMERGE_MAX_SIZE parisc_vmerge_max_size | ||
12 | |||
13 | #define virt_to_phys(a) ((unsigned long)__pa(a)) | 7 | #define virt_to_phys(a) ((unsigned long)__pa(a)) |
14 | #define phys_to_virt(a) __va(a) | 8 | #define phys_to_virt(a) __va(a) |
15 | #define virt_to_bus virt_to_phys | 9 | #define virt_to_bus virt_to_phys |
@@ -182,9 +176,9 @@ static inline void __raw_writeq(unsigned long long b, volatile void __iomem *add | |||
182 | 176 | ||
183 | /* readb can never be const, so use __fswab instead of le*_to_cpu */ | 177 | /* readb can never be const, so use __fswab instead of le*_to_cpu */ |
184 | #define readb(addr) __raw_readb(addr) | 178 | #define readb(addr) __raw_readb(addr) |
185 | #define readw(addr) __fswab16(__raw_readw(addr)) | 179 | #define readw(addr) le16_to_cpu(__raw_readw(addr)) |
186 | #define readl(addr) __fswab32(__raw_readl(addr)) | 180 | #define readl(addr) le32_to_cpu(__raw_readl(addr)) |
187 | #define readq(addr) __fswab64(__raw_readq(addr)) | 181 | #define readq(addr) le64_to_cpu(__raw_readq(addr)) |
188 | #define writeb(b, addr) __raw_writeb(b, addr) | 182 | #define writeb(b, addr) __raw_writeb(b, addr) |
189 | #define writew(b, addr) __raw_writew(cpu_to_le16(b), addr) | 183 | #define writew(b, addr) __raw_writew(cpu_to_le16(b), addr) |
190 | #define writel(b, addr) __raw_writel(cpu_to_le32(b), addr) | 184 | #define writel(b, addr) __raw_writel(cpu_to_le32(b), addr) |
diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h index 85856c74ad1d..354b2aca990e 100644 --- a/arch/parisc/include/asm/mmu_context.h +++ b/arch/parisc/include/asm/mmu_context.h | |||
@@ -34,16 +34,21 @@ destroy_context(struct mm_struct *mm) | |||
34 | mm->context = 0; | 34 | mm->context = 0; |
35 | } | 35 | } |
36 | 36 | ||
37 | static inline void load_context(mm_context_t context) | 37 | static inline unsigned long __space_to_prot(mm_context_t context) |
38 | { | 38 | { |
39 | mtsp(context, 3); | ||
40 | #if SPACEID_SHIFT == 0 | 39 | #if SPACEID_SHIFT == 0 |
41 | mtctl(context << 1,8); | 40 | return context << 1; |
42 | #else | 41 | #else |
43 | mtctl(context >> (SPACEID_SHIFT - 1),8); | 42 | return context >> (SPACEID_SHIFT - 1); |
44 | #endif | 43 | #endif |
45 | } | 44 | } |
46 | 45 | ||
46 | static inline void load_context(mm_context_t context) | ||
47 | { | ||
48 | mtsp(context, 3); | ||
49 | mtctl(__space_to_prot(context), 8); | ||
50 | } | ||
51 | |||
47 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) | 52 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) |
48 | { | 53 | { |
49 | 54 | ||
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h index 3c9d34844c83..9d64df8754ba 100644 --- a/arch/parisc/include/asm/processor.h +++ b/arch/parisc/include/asm/processor.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <asm/ptrace.h> | 17 | #include <asm/ptrace.h> |
18 | #include <asm/types.h> | 18 | #include <asm/types.h> |
19 | #include <asm/system.h> | 19 | #include <asm/system.h> |
20 | #include <asm/percpu.h> | ||
20 | #endif /* __ASSEMBLY__ */ | 21 | #endif /* __ASSEMBLY__ */ |
21 | 22 | ||
22 | #define KERNEL_STACK_SIZE (4*PAGE_SIZE) | 23 | #define KERNEL_STACK_SIZE (4*PAGE_SIZE) |
@@ -109,8 +110,7 @@ struct cpuinfo_parisc { | |||
109 | }; | 110 | }; |
110 | 111 | ||
111 | extern struct system_cpuinfo_parisc boot_cpu_data; | 112 | extern struct system_cpuinfo_parisc boot_cpu_data; |
112 | extern struct cpuinfo_parisc cpu_data[NR_CPUS]; | 113 | DECLARE_PER_CPU(struct cpuinfo_parisc, cpu_data); |
113 | #define current_cpu_data cpu_data[smp_processor_id()] | ||
114 | 114 | ||
115 | #define CPU_HVERSION ((boot_cpu_data.hversion >> 4) & 0x0FFF) | 115 | #define CPU_HVERSION ((boot_cpu_data.hversion >> 4) & 0x0FFF) |
116 | 116 | ||
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index 4878b9501f24..1c6dbb6f6e56 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h | |||
@@ -241,4 +241,6 @@ unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned lo | |||
241 | #define __copy_to_user_inatomic __copy_to_user | 241 | #define __copy_to_user_inatomic __copy_to_user |
242 | #define __copy_from_user_inatomic __copy_from_user | 242 | #define __copy_from_user_inatomic __copy_from_user |
243 | 243 | ||
244 | int fixup_exception(struct pt_regs *regs); | ||
245 | |||
244 | #endif /* __PARISC_UACCESS_H */ | 246 | #endif /* __PARISC_UACCESS_H */ |
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index 884b7ce16a3b..994bcd980909 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c | |||
@@ -549,6 +549,38 @@ static int parisc_generic_match(struct device *dev, struct device_driver *drv) | |||
549 | return match_device(to_parisc_driver(drv), to_parisc_device(dev)); | 549 | return match_device(to_parisc_driver(drv), to_parisc_device(dev)); |
550 | } | 550 | } |
551 | 551 | ||
552 | static ssize_t make_modalias(struct device *dev, char *buf) | ||
553 | { | ||
554 | const struct parisc_device *padev = to_parisc_device(dev); | ||
555 | const struct parisc_device_id *id = &padev->id; | ||
556 | |||
557 | return sprintf(buf, "parisc:t%02Xhv%04Xrev%02Xsv%08X\n", | ||
558 | (u8)id->hw_type, (u16)id->hversion, (u8)id->hversion_rev, | ||
559 | (u32)id->sversion); | ||
560 | } | ||
561 | |||
562 | static int parisc_uevent(struct device *dev, struct kobj_uevent_env *env) | ||
563 | { | ||
564 | const struct parisc_device *padev; | ||
565 | char modalias[40]; | ||
566 | |||
567 | if (!dev) | ||
568 | return -ENODEV; | ||
569 | |||
570 | padev = to_parisc_device(dev); | ||
571 | if (!padev) | ||
572 | return -ENODEV; | ||
573 | |||
574 | if (add_uevent_var(env, "PARISC_NAME=%s", padev->name)) | ||
575 | return -ENOMEM; | ||
576 | |||
577 | make_modalias(dev, modalias); | ||
578 | if (add_uevent_var(env, "MODALIAS=%s", modalias)) | ||
579 | return -ENOMEM; | ||
580 | |||
581 | return 0; | ||
582 | } | ||
583 | |||
552 | #define pa_dev_attr(name, field, format_string) \ | 584 | #define pa_dev_attr(name, field, format_string) \ |
553 | static ssize_t name##_show(struct device *dev, struct device_attribute *attr, char *buf) \ | 585 | static ssize_t name##_show(struct device *dev, struct device_attribute *attr, char *buf) \ |
554 | { \ | 586 | { \ |
@@ -566,12 +598,7 @@ pa_dev_attr_id(sversion, "0x%05x\n"); | |||
566 | 598 | ||
567 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) | 599 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) |
568 | { | 600 | { |
569 | struct parisc_device *padev = to_parisc_device(dev); | 601 | return make_modalias(dev, buf); |
570 | struct parisc_device_id *id = &padev->id; | ||
571 | |||
572 | return sprintf(buf, "parisc:t%02Xhv%04Xrev%02Xsv%08X\n", | ||
573 | (u8)id->hw_type, (u16)id->hversion, (u8)id->hversion_rev, | ||
574 | (u32)id->sversion); | ||
575 | } | 602 | } |
576 | 603 | ||
577 | static struct device_attribute parisc_device_attrs[] = { | 604 | static struct device_attribute parisc_device_attrs[] = { |
@@ -587,6 +614,7 @@ static struct device_attribute parisc_device_attrs[] = { | |||
587 | struct bus_type parisc_bus_type = { | 614 | struct bus_type parisc_bus_type = { |
588 | .name = "parisc", | 615 | .name = "parisc", |
589 | .match = parisc_generic_match, | 616 | .match = parisc_generic_match, |
617 | .uevent = parisc_uevent, | ||
590 | .dev_attrs = parisc_device_attrs, | 618 | .dev_attrs = parisc_device_attrs, |
591 | .probe = parisc_driver_probe, | 619 | .probe = parisc_driver_probe, |
592 | .remove = parisc_driver_remove, | 620 | .remove = parisc_driver_remove, |
diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S index 2cbf13b3ef11..5595a2f31181 100644 --- a/arch/parisc/kernel/hpmc.S +++ b/arch/parisc/kernel/hpmc.S | |||
@@ -80,6 +80,7 @@ END(hpmc_pim_data) | |||
80 | 80 | ||
81 | .import intr_save, code | 81 | .import intr_save, code |
82 | ENTRY(os_hpmc) | 82 | ENTRY(os_hpmc) |
83 | .os_hpmc: | ||
83 | 84 | ||
84 | /* | 85 | /* |
85 | * registers modified: | 86 | * registers modified: |
@@ -295,5 +296,10 @@ os_hpmc_6: | |||
295 | b . | 296 | b . |
296 | nop | 297 | nop |
297 | ENDPROC(os_hpmc) | 298 | ENDPROC(os_hpmc) |
298 | ENTRY(os_hpmc_end) /* this label used to compute os_hpmc checksum */ | 299 | .os_hpmc_end: |
299 | nop | 300 | nop |
301 | .data | ||
302 | .align 4 | ||
303 | .export os_hpmc_size | ||
304 | os_hpmc_size: | ||
305 | .word .os_hpmc_end-.os_hpmc | ||
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 4cea935e2f99..ac2c822928c7 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c | |||
@@ -298,7 +298,7 @@ unsigned long txn_affinity_addr(unsigned int irq, int cpu) | |||
298 | irq_desc[irq].affinity = cpumask_of_cpu(cpu); | 298 | irq_desc[irq].affinity = cpumask_of_cpu(cpu); |
299 | #endif | 299 | #endif |
300 | 300 | ||
301 | return cpu_data[cpu].txn_addr; | 301 | return per_cpu(cpu_data, cpu).txn_addr; |
302 | } | 302 | } |
303 | 303 | ||
304 | 304 | ||
@@ -309,8 +309,9 @@ unsigned long txn_alloc_addr(unsigned int virt_irq) | |||
309 | next_cpu++; /* assign to "next" CPU we want this bugger on */ | 309 | next_cpu++; /* assign to "next" CPU we want this bugger on */ |
310 | 310 | ||
311 | /* validate entry */ | 311 | /* validate entry */ |
312 | while ((next_cpu < NR_CPUS) && (!cpu_data[next_cpu].txn_addr || | 312 | while ((next_cpu < NR_CPUS) && |
313 | !cpu_online(next_cpu))) | 313 | (!per_cpu(cpu_data, next_cpu).txn_addr || |
314 | !cpu_online(next_cpu))) | ||
314 | next_cpu++; | 315 | next_cpu++; |
315 | 316 | ||
316 | if (next_cpu >= NR_CPUS) | 317 | if (next_cpu >= NR_CPUS) |
@@ -359,7 +360,7 @@ void do_cpu_irq_mask(struct pt_regs *regs) | |||
359 | printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n", | 360 | printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n", |
360 | irq, smp_processor_id(), cpu); | 361 | irq, smp_processor_id(), cpu); |
361 | gsc_writel(irq + CPU_IRQ_BASE, | 362 | gsc_writel(irq + CPU_IRQ_BASE, |
362 | cpu_data[cpu].hpa); | 363 | per_cpu(cpu_data, cpu).hpa); |
363 | goto set_out; | 364 | goto set_out; |
364 | } | 365 | } |
365 | #endif | 366 | #endif |
@@ -421,5 +422,5 @@ void __init init_IRQ(void) | |||
421 | 422 | ||
422 | void ack_bad_irq(unsigned int irq) | 423 | void ack_bad_irq(unsigned int irq) |
423 | { | 424 | { |
424 | printk("unexpected IRQ %d\n", irq); | 425 | printk(KERN_WARNING "unexpected IRQ %d\n", irq); |
425 | } | 426 | } |
diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c index ccb68090781e..1ff366cb9685 100644 --- a/arch/parisc/kernel/pdc_cons.c +++ b/arch/parisc/kernel/pdc_cons.c | |||
@@ -52,7 +52,7 @@ | |||
52 | #include <linux/tty.h> | 52 | #include <linux/tty.h> |
53 | #include <asm/pdc.h> /* for iodc_call() proto and friends */ | 53 | #include <asm/pdc.h> /* for iodc_call() proto and friends */ |
54 | 54 | ||
55 | static spinlock_t pdc_console_lock = SPIN_LOCK_UNLOCKED; | 55 | static DEFINE_SPINLOCK(pdc_console_lock); |
56 | 56 | ||
57 | static void pdc_console_write(struct console *co, const char *s, unsigned count) | 57 | static void pdc_console_write(struct console *co, const char *s, unsigned count) |
58 | { | 58 | { |
diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c index f696f57faa15..75099efb3bf3 100644 --- a/arch/parisc/kernel/perf.c +++ b/arch/parisc/kernel/perf.c | |||
@@ -541,9 +541,9 @@ static int __init perf_init(void) | |||
541 | spin_lock_init(&perf_lock); | 541 | spin_lock_init(&perf_lock); |
542 | 542 | ||
543 | /* TODO: this only lets us access the first cpu.. what to do for SMP? */ | 543 | /* TODO: this only lets us access the first cpu.. what to do for SMP? */ |
544 | cpu_device = cpu_data[0].dev; | 544 | cpu_device = per_cpu(cpu_data, 0).dev; |
545 | printk("Performance monitoring counters enabled for %s\n", | 545 | printk("Performance monitoring counters enabled for %s\n", |
546 | cpu_data[0].dev->name); | 546 | per_cpu(cpu_data, 0).dev->name); |
547 | 547 | ||
548 | return 0; | 548 | return 0; |
549 | } | 549 | } |
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c index 370086fb8333..ecb609342feb 100644 --- a/arch/parisc/kernel/processor.c +++ b/arch/parisc/kernel/processor.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * Initial setup-routines for HP 9000 based hardware. | 3 | * Initial setup-routines for HP 9000 based hardware. |
4 | * | 4 | * |
5 | * Copyright (C) 1991, 1992, 1995 Linus Torvalds | 5 | * Copyright (C) 1991, 1992, 1995 Linus Torvalds |
6 | * Modifications for PA-RISC (C) 1999 Helge Deller <deller@gmx.de> | 6 | * Modifications for PA-RISC (C) 1999-2008 Helge Deller <deller@gmx.de> |
7 | * Modifications copyright 1999 SuSE GmbH (Philipp Rumpf) | 7 | * Modifications copyright 1999 SuSE GmbH (Philipp Rumpf) |
8 | * Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net> | 8 | * Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net> |
9 | * Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org> | 9 | * Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org> |
@@ -46,7 +46,7 @@ | |||
46 | struct system_cpuinfo_parisc boot_cpu_data __read_mostly; | 46 | struct system_cpuinfo_parisc boot_cpu_data __read_mostly; |
47 | EXPORT_SYMBOL(boot_cpu_data); | 47 | EXPORT_SYMBOL(boot_cpu_data); |
48 | 48 | ||
49 | struct cpuinfo_parisc cpu_data[NR_CPUS] __read_mostly; | 49 | DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data); |
50 | 50 | ||
51 | extern int update_cr16_clocksource(void); /* from time.c */ | 51 | extern int update_cr16_clocksource(void); /* from time.c */ |
52 | 52 | ||
@@ -69,6 +69,23 @@ extern int update_cr16_clocksource(void); /* from time.c */ | |||
69 | */ | 69 | */ |
70 | 70 | ||
71 | /** | 71 | /** |
72 | * init_cpu_profiler - enable/setup per cpu profiling hooks. | ||
73 | * @cpunum: The processor instance. | ||
74 | * | ||
75 | * FIXME: doesn't do much yet... | ||
76 | */ | ||
77 | static void __cpuinit | ||
78 | init_percpu_prof(unsigned long cpunum) | ||
79 | { | ||
80 | struct cpuinfo_parisc *p; | ||
81 | |||
82 | p = &per_cpu(cpu_data, cpunum); | ||
83 | p->prof_counter = 1; | ||
84 | p->prof_multiplier = 1; | ||
85 | } | ||
86 | |||
87 | |||
88 | /** | ||
72 | * processor_probe - Determine if processor driver should claim this device. | 89 | * processor_probe - Determine if processor driver should claim this device. |
73 | * @dev: The device which has been found. | 90 | * @dev: The device which has been found. |
74 | * | 91 | * |
@@ -147,7 +164,7 @@ static int __cpuinit processor_probe(struct parisc_device *dev) | |||
147 | } | 164 | } |
148 | #endif | 165 | #endif |
149 | 166 | ||
150 | p = &cpu_data[cpuid]; | 167 | p = &per_cpu(cpu_data, cpuid); |
151 | boot_cpu_data.cpu_count++; | 168 | boot_cpu_data.cpu_count++; |
152 | 169 | ||
153 | /* initialize counters - CPU 0 gets it_value set in time_init() */ | 170 | /* initialize counters - CPU 0 gets it_value set in time_init() */ |
@@ -162,12 +179,9 @@ static int __cpuinit processor_probe(struct parisc_device *dev) | |||
162 | #ifdef CONFIG_SMP | 179 | #ifdef CONFIG_SMP |
163 | /* | 180 | /* |
164 | ** FIXME: review if any other initialization is clobbered | 181 | ** FIXME: review if any other initialization is clobbered |
165 | ** for boot_cpu by the above memset(). | 182 | ** for boot_cpu by the above memset(). |
166 | */ | 183 | */ |
167 | 184 | init_percpu_prof(cpuid); | |
168 | /* stolen from init_percpu_prof() */ | ||
169 | cpu_data[cpuid].prof_counter = 1; | ||
170 | cpu_data[cpuid].prof_multiplier = 1; | ||
171 | #endif | 185 | #endif |
172 | 186 | ||
173 | /* | 187 | /* |
@@ -261,19 +275,6 @@ void __init collect_boot_cpu_data(void) | |||
261 | } | 275 | } |
262 | 276 | ||
263 | 277 | ||
264 | /** | ||
265 | * init_cpu_profiler - enable/setup per cpu profiling hooks. | ||
266 | * @cpunum: The processor instance. | ||
267 | * | ||
268 | * FIXME: doesn't do much yet... | ||
269 | */ | ||
270 | static inline void __init | ||
271 | init_percpu_prof(int cpunum) | ||
272 | { | ||
273 | cpu_data[cpunum].prof_counter = 1; | ||
274 | cpu_data[cpunum].prof_multiplier = 1; | ||
275 | } | ||
276 | |||
277 | 278 | ||
278 | /** | 279 | /** |
279 | * init_per_cpu - Handle individual processor initializations. | 280 | * init_per_cpu - Handle individual processor initializations. |
@@ -293,7 +294,7 @@ init_percpu_prof(int cpunum) | |||
293 | * | 294 | * |
294 | * o Enable CPU profiling hooks. | 295 | * o Enable CPU profiling hooks. |
295 | */ | 296 | */ |
296 | int __init init_per_cpu(int cpunum) | 297 | int __cpuinit init_per_cpu(int cpunum) |
297 | { | 298 | { |
298 | int ret; | 299 | int ret; |
299 | struct pdc_coproc_cfg coproc_cfg; | 300 | struct pdc_coproc_cfg coproc_cfg; |
@@ -307,8 +308,8 @@ int __init init_per_cpu(int cpunum) | |||
307 | /* FWIW, FP rev/model is a more accurate way to determine | 308 | /* FWIW, FP rev/model is a more accurate way to determine |
308 | ** CPU type. CPU rev/model has some ambiguous cases. | 309 | ** CPU type. CPU rev/model has some ambiguous cases. |
309 | */ | 310 | */ |
310 | cpu_data[cpunum].fp_rev = coproc_cfg.revision; | 311 | per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision; |
311 | cpu_data[cpunum].fp_model = coproc_cfg.model; | 312 | per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model; |
312 | 313 | ||
313 | printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n", | 314 | printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n", |
314 | cpunum, coproc_cfg.revision, coproc_cfg.model); | 315 | cpunum, coproc_cfg.revision, coproc_cfg.model); |
@@ -344,16 +345,17 @@ int __init init_per_cpu(int cpunum) | |||
344 | int | 345 | int |
345 | show_cpuinfo (struct seq_file *m, void *v) | 346 | show_cpuinfo (struct seq_file *m, void *v) |
346 | { | 347 | { |
347 | int n; | 348 | unsigned long cpu; |
348 | 349 | ||
349 | for(n=0; n<boot_cpu_data.cpu_count; n++) { | 350 | for_each_online_cpu(cpu) { |
351 | const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); | ||
350 | #ifdef CONFIG_SMP | 352 | #ifdef CONFIG_SMP |
351 | if (0 == cpu_data[n].hpa) | 353 | if (0 == cpuinfo->hpa) |
352 | continue; | 354 | continue; |
353 | #endif | 355 | #endif |
354 | seq_printf(m, "processor\t: %d\n" | 356 | seq_printf(m, "processor\t: %lu\n" |
355 | "cpu family\t: PA-RISC %s\n", | 357 | "cpu family\t: PA-RISC %s\n", |
356 | n, boot_cpu_data.family_name); | 358 | cpu, boot_cpu_data.family_name); |
357 | 359 | ||
358 | seq_printf(m, "cpu\t\t: %s\n", boot_cpu_data.cpu_name ); | 360 | seq_printf(m, "cpu\t\t: %s\n", boot_cpu_data.cpu_name ); |
359 | 361 | ||
@@ -365,8 +367,8 @@ show_cpuinfo (struct seq_file *m, void *v) | |||
365 | seq_printf(m, "model\t\t: %s\n" | 367 | seq_printf(m, "model\t\t: %s\n" |
366 | "model name\t: %s\n", | 368 | "model name\t: %s\n", |
367 | boot_cpu_data.pdc.sys_model_name, | 369 | boot_cpu_data.pdc.sys_model_name, |
368 | cpu_data[n].dev ? | 370 | cpuinfo->dev ? |
369 | cpu_data[n].dev->name : "Unknown" ); | 371 | cpuinfo->dev->name : "Unknown"); |
370 | 372 | ||
371 | seq_printf(m, "hversion\t: 0x%08x\n" | 373 | seq_printf(m, "hversion\t: 0x%08x\n" |
372 | "sversion\t: 0x%08x\n", | 374 | "sversion\t: 0x%08x\n", |
@@ -377,8 +379,8 @@ show_cpuinfo (struct seq_file *m, void *v) | |||
377 | show_cache_info(m); | 379 | show_cache_info(m); |
378 | 380 | ||
379 | seq_printf(m, "bogomips\t: %lu.%02lu\n", | 381 | seq_printf(m, "bogomips\t: %lu.%02lu\n", |
380 | cpu_data[n].loops_per_jiffy / (500000 / HZ), | 382 | cpuinfo->loops_per_jiffy / (500000 / HZ), |
381 | (cpu_data[n].loops_per_jiffy / (5000 / HZ)) % 100); | 383 | (cpuinfo->loops_per_jiffy / (5000 / HZ)) % 100); |
382 | 384 | ||
383 | seq_printf(m, "software id\t: %ld\n\n", | 385 | seq_printf(m, "software id\t: %ld\n\n", |
384 | boot_cpu_data.pdc.model.sw_id); | 386 | boot_cpu_data.pdc.model.sw_id); |
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index 7d27853ff8c8..82131ca8e05c 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c | |||
@@ -58,11 +58,6 @@ int parisc_bus_is_phys __read_mostly = 1; /* Assume no IOMMU is present */ | |||
58 | EXPORT_SYMBOL(parisc_bus_is_phys); | 58 | EXPORT_SYMBOL(parisc_bus_is_phys); |
59 | #endif | 59 | #endif |
60 | 60 | ||
61 | /* This sets the vmerge boundary and size, it's here because it has to | ||
62 | * be available on all platforms (zero means no-virtual merging) */ | ||
63 | unsigned long parisc_vmerge_boundary = 0; | ||
64 | unsigned long parisc_vmerge_max_size = 0; | ||
65 | |||
66 | void __init setup_cmdline(char **cmdline_p) | 61 | void __init setup_cmdline(char **cmdline_p) |
67 | { | 62 | { |
68 | extern unsigned int boot_args[]; | 63 | extern unsigned int boot_args[]; |
@@ -321,7 +316,7 @@ static int __init parisc_init(void) | |||
321 | 316 | ||
322 | processor_init(); | 317 | processor_init(); |
323 | printk(KERN_INFO "CPU(s): %d x %s at %d.%06d MHz\n", | 318 | printk(KERN_INFO "CPU(s): %d x %s at %d.%06d MHz\n", |
324 | boot_cpu_data.cpu_count, | 319 | num_present_cpus(), |
325 | boot_cpu_data.cpu_name, | 320 | boot_cpu_data.cpu_name, |
326 | boot_cpu_data.cpu_hz / 1000000, | 321 | boot_cpu_data.cpu_hz / 1000000, |
327 | boot_cpu_data.cpu_hz % 1000000 ); | 322 | boot_cpu_data.cpu_hz % 1000000 ); |
@@ -387,8 +382,8 @@ void start_parisc(void) | |||
387 | if (ret >= 0 && coproc_cfg.ccr_functional) { | 382 | if (ret >= 0 && coproc_cfg.ccr_functional) { |
388 | mtctl(coproc_cfg.ccr_functional, 10); | 383 | mtctl(coproc_cfg.ccr_functional, 10); |
389 | 384 | ||
390 | cpu_data[cpunum].fp_rev = coproc_cfg.revision; | 385 | per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision; |
391 | cpu_data[cpunum].fp_model = coproc_cfg.model; | 386 | per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model; |
392 | 387 | ||
393 | asm volatile ("fstd %fr0,8(%sp)"); | 388 | asm volatile ("fstd %fr0,8(%sp)"); |
394 | } else { | 389 | } else { |
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 80bc000523fa..9995d7ed5819 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c | |||
@@ -56,16 +56,17 @@ static int smp_debug_lvl = 0; | |||
56 | if (lvl >= smp_debug_lvl) \ | 56 | if (lvl >= smp_debug_lvl) \ |
57 | printk(printargs); | 57 | printk(printargs); |
58 | #else | 58 | #else |
59 | #define smp_debug(lvl, ...) | 59 | #define smp_debug(lvl, ...) do { } while(0) |
60 | #endif /* DEBUG_SMP */ | 60 | #endif /* DEBUG_SMP */ |
61 | 61 | ||
62 | DEFINE_SPINLOCK(smp_lock); | 62 | DEFINE_SPINLOCK(smp_lock); |
63 | 63 | ||
64 | volatile struct task_struct *smp_init_current_idle_task; | 64 | volatile struct task_struct *smp_init_current_idle_task; |
65 | 65 | ||
66 | static volatile int cpu_now_booting __read_mostly = 0; /* track which CPU is booting */ | 66 | /* track which CPU is booting */ |
67 | static volatile int cpu_now_booting __cpuinitdata; | ||
67 | 68 | ||
68 | static int parisc_max_cpus __read_mostly = 1; | 69 | static int parisc_max_cpus __cpuinitdata = 1; |
69 | 70 | ||
70 | DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED; | 71 | DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED; |
71 | 72 | ||
@@ -123,7 +124,7 @@ irqreturn_t | |||
123 | ipi_interrupt(int irq, void *dev_id) | 124 | ipi_interrupt(int irq, void *dev_id) |
124 | { | 125 | { |
125 | int this_cpu = smp_processor_id(); | 126 | int this_cpu = smp_processor_id(); |
126 | struct cpuinfo_parisc *p = &cpu_data[this_cpu]; | 127 | struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu); |
127 | unsigned long ops; | 128 | unsigned long ops; |
128 | unsigned long flags; | 129 | unsigned long flags; |
129 | 130 | ||
@@ -202,13 +203,13 @@ ipi_interrupt(int irq, void *dev_id) | |||
202 | static inline void | 203 | static inline void |
203 | ipi_send(int cpu, enum ipi_message_type op) | 204 | ipi_send(int cpu, enum ipi_message_type op) |
204 | { | 205 | { |
205 | struct cpuinfo_parisc *p = &cpu_data[cpu]; | 206 | struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu); |
206 | spinlock_t *lock = &per_cpu(ipi_lock, cpu); | 207 | spinlock_t *lock = &per_cpu(ipi_lock, cpu); |
207 | unsigned long flags; | 208 | unsigned long flags; |
208 | 209 | ||
209 | spin_lock_irqsave(lock, flags); | 210 | spin_lock_irqsave(lock, flags); |
210 | p->pending_ipi |= 1 << op; | 211 | p->pending_ipi |= 1 << op; |
211 | gsc_writel(IPI_IRQ - CPU_IRQ_BASE, cpu_data[cpu].hpa); | 212 | gsc_writel(IPI_IRQ - CPU_IRQ_BASE, p->hpa); |
212 | spin_unlock_irqrestore(lock, flags); | 213 | spin_unlock_irqrestore(lock, flags); |
213 | } | 214 | } |
214 | 215 | ||
@@ -224,10 +225,7 @@ send_IPI_mask(cpumask_t mask, enum ipi_message_type op) | |||
224 | static inline void | 225 | static inline void |
225 | send_IPI_single(int dest_cpu, enum ipi_message_type op) | 226 | send_IPI_single(int dest_cpu, enum ipi_message_type op) |
226 | { | 227 | { |
227 | if (dest_cpu == NO_PROC_ID) { | 228 | BUG_ON(dest_cpu == NO_PROC_ID); |
228 | BUG(); | ||
229 | return; | ||
230 | } | ||
231 | 229 | ||
232 | ipi_send(dest_cpu, op); | 230 | ipi_send(dest_cpu, op); |
233 | } | 231 | } |
@@ -309,8 +307,7 @@ smp_cpu_init(int cpunum) | |||
309 | /* Initialise the idle task for this CPU */ | 307 | /* Initialise the idle task for this CPU */ |
310 | atomic_inc(&init_mm.mm_count); | 308 | atomic_inc(&init_mm.mm_count); |
311 | current->active_mm = &init_mm; | 309 | current->active_mm = &init_mm; |
312 | if(current->mm) | 310 | BUG_ON(current->mm); |
313 | BUG(); | ||
314 | enter_lazy_tlb(&init_mm, current); | 311 | enter_lazy_tlb(&init_mm, current); |
315 | 312 | ||
316 | init_IRQ(); /* make sure no IRQs are enabled or pending */ | 313 | init_IRQ(); /* make sure no IRQs are enabled or pending */ |
@@ -345,6 +342,7 @@ void __init smp_callin(void) | |||
345 | */ | 342 | */ |
346 | int __cpuinit smp_boot_one_cpu(int cpuid) | 343 | int __cpuinit smp_boot_one_cpu(int cpuid) |
347 | { | 344 | { |
345 | const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid); | ||
348 | struct task_struct *idle; | 346 | struct task_struct *idle; |
349 | long timeout; | 347 | long timeout; |
350 | 348 | ||
@@ -376,7 +374,7 @@ int __cpuinit smp_boot_one_cpu(int cpuid) | |||
376 | smp_init_current_idle_task = idle ; | 374 | smp_init_current_idle_task = idle ; |
377 | mb(); | 375 | mb(); |
378 | 376 | ||
379 | printk("Releasing cpu %d now, hpa=%lx\n", cpuid, cpu_data[cpuid].hpa); | 377 | printk(KERN_INFO "Releasing cpu %d now, hpa=%lx\n", cpuid, p->hpa); |
380 | 378 | ||
381 | /* | 379 | /* |
382 | ** This gets PDC to release the CPU from a very tight loop. | 380 | ** This gets PDC to release the CPU from a very tight loop. |
@@ -387,7 +385,7 @@ int __cpuinit smp_boot_one_cpu(int cpuid) | |||
387 | ** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the | 385 | ** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the |
388 | ** contents of memory are valid." | 386 | ** contents of memory are valid." |
389 | */ | 387 | */ |
390 | gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, cpu_data[cpuid].hpa); | 388 | gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, p->hpa); |
391 | mb(); | 389 | mb(); |
392 | 390 | ||
393 | /* | 391 | /* |
@@ -419,12 +417,12 @@ alive: | |||
419 | return 0; | 417 | return 0; |
420 | } | 418 | } |
421 | 419 | ||
422 | void __devinit smp_prepare_boot_cpu(void) | 420 | void __init smp_prepare_boot_cpu(void) |
423 | { | 421 | { |
424 | int bootstrap_processor=cpu_data[0].cpuid; /* CPU ID of BSP */ | 422 | int bootstrap_processor = per_cpu(cpu_data, 0).cpuid; |
425 | 423 | ||
426 | /* Setup BSP mappings */ | 424 | /* Setup BSP mappings */ |
427 | printk("SMP: bootstrap CPU ID is %d\n",bootstrap_processor); | 425 | printk(KERN_INFO "SMP: bootstrap CPU ID is %d\n", bootstrap_processor); |
428 | 426 | ||
429 | cpu_set(bootstrap_processor, cpu_online_map); | 427 | cpu_set(bootstrap_processor, cpu_online_map); |
430 | cpu_set(bootstrap_processor, cpu_present_map); | 428 | cpu_set(bootstrap_processor, cpu_present_map); |
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 4d09203bc693..9d46c43a4152 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c | |||
@@ -60,7 +60,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id) | |||
60 | unsigned long cycles_elapsed, ticks_elapsed; | 60 | unsigned long cycles_elapsed, ticks_elapsed; |
61 | unsigned long cycles_remainder; | 61 | unsigned long cycles_remainder; |
62 | unsigned int cpu = smp_processor_id(); | 62 | unsigned int cpu = smp_processor_id(); |
63 | struct cpuinfo_parisc *cpuinfo = &cpu_data[cpu]; | 63 | struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); |
64 | 64 | ||
65 | /* gcc can optimize for "read-only" case with a local clocktick */ | 65 | /* gcc can optimize for "read-only" case with a local clocktick */ |
66 | unsigned long cpt = clocktick; | 66 | unsigned long cpt = clocktick; |
@@ -213,7 +213,7 @@ void __init start_cpu_itimer(void) | |||
213 | 213 | ||
214 | mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */ | 214 | mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */ |
215 | 215 | ||
216 | cpu_data[cpu].it_value = next_tick; | 216 | per_cpu(cpu_data, cpu).it_value = next_tick; |
217 | } | 217 | } |
218 | 218 | ||
219 | struct platform_device rtc_parisc_dev = { | 219 | struct platform_device rtc_parisc_dev = { |
diff --git a/arch/parisc/kernel/topology.c b/arch/parisc/kernel/topology.c index d71cb018a21e..f5159381fdd6 100644 --- a/arch/parisc/kernel/topology.c +++ b/arch/parisc/kernel/topology.c | |||
@@ -22,14 +22,14 @@ | |||
22 | #include <linux/cpu.h> | 22 | #include <linux/cpu.h> |
23 | #include <linux/cache.h> | 23 | #include <linux/cache.h> |
24 | 24 | ||
25 | static struct cpu cpu_devices[NR_CPUS] __read_mostly; | 25 | static DEFINE_PER_CPU(struct cpu, cpu_devices); |
26 | 26 | ||
27 | static int __init topology_init(void) | 27 | static int __init topology_init(void) |
28 | { | 28 | { |
29 | int num; | 29 | int num; |
30 | 30 | ||
31 | for_each_present_cpu(num) { | 31 | for_each_present_cpu(num) { |
32 | register_cpu(&cpu_devices[num], num); | 32 | register_cpu(&per_cpu(cpu_devices, num), num); |
33 | } | 33 | } |
34 | return 0; | 34 | return 0; |
35 | } | 35 | } |
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 4c771cd580ec..ba658d2086f7 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c | |||
@@ -745,6 +745,10 @@ void handle_interruption(int code, struct pt_regs *regs) | |||
745 | /* Fall Through */ | 745 | /* Fall Through */ |
746 | case 27: | 746 | case 27: |
747 | /* Data memory protection ID trap */ | 747 | /* Data memory protection ID trap */ |
748 | if (code == 27 && !user_mode(regs) && | ||
749 | fixup_exception(regs)) | ||
750 | return; | ||
751 | |||
748 | die_if_kernel("Protection id trap", regs, code); | 752 | die_if_kernel("Protection id trap", regs, code); |
749 | si.si_code = SEGV_MAPERR; | 753 | si.si_code = SEGV_MAPERR; |
750 | si.si_signo = SIGSEGV; | 754 | si.si_signo = SIGSEGV; |
@@ -821,8 +825,8 @@ void handle_interruption(int code, struct pt_regs *regs) | |||
821 | 825 | ||
822 | int __init check_ivt(void *iva) | 826 | int __init check_ivt(void *iva) |
823 | { | 827 | { |
828 | extern u32 os_hpmc_size; | ||
824 | extern const u32 os_hpmc[]; | 829 | extern const u32 os_hpmc[]; |
825 | extern const u32 os_hpmc_end[]; | ||
826 | 830 | ||
827 | int i; | 831 | int i; |
828 | u32 check = 0; | 832 | u32 check = 0; |
@@ -839,8 +843,7 @@ int __init check_ivt(void *iva) | |||
839 | *ivap++ = 0; | 843 | *ivap++ = 0; |
840 | 844 | ||
841 | /* Compute Checksum for HPMC handler */ | 845 | /* Compute Checksum for HPMC handler */ |
842 | 846 | length = os_hpmc_size; | |
843 | length = os_hpmc_end - os_hpmc; | ||
844 | ivap[7] = length; | 847 | ivap[7] = length; |
845 | 848 | ||
846 | hpmcp = (u32 *)os_hpmc; | 849 | hpmcp = (u32 *)os_hpmc; |
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c index 6773c582e457..69dad5a850a8 100644 --- a/arch/parisc/kernel/unwind.c +++ b/arch/parisc/kernel/unwind.c | |||
@@ -372,7 +372,7 @@ void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct | |||
372 | struct pt_regs *r = &t->thread.regs; | 372 | struct pt_regs *r = &t->thread.regs; |
373 | struct pt_regs *r2; | 373 | struct pt_regs *r2; |
374 | 374 | ||
375 | r2 = kmalloc(sizeof(struct pt_regs), GFP_KERNEL); | 375 | r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC); |
376 | if (!r2) | 376 | if (!r2) |
377 | return; | 377 | return; |
378 | *r2 = *r; | 378 | *r2 = *r; |
diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c index 9abed07db7fc..5069e8b2ca71 100644 --- a/arch/parisc/lib/iomap.c +++ b/arch/parisc/lib/iomap.c | |||
@@ -261,7 +261,7 @@ static const struct iomap_ops iomem_ops = { | |||
261 | iomem_write32r, | 261 | iomem_write32r, |
262 | }; | 262 | }; |
263 | 263 | ||
264 | const struct iomap_ops *iomap_ops[8] = { | 264 | static const struct iomap_ops *iomap_ops[8] = { |
265 | [0] = &ioport_ops, | 265 | [0] = &ioport_ops, |
266 | [7] = &iomem_ops | 266 | [7] = &iomem_ops |
267 | }; | 267 | }; |
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c index 2d68431fc22e..bbda909c866e 100644 --- a/arch/parisc/lib/memcpy.c +++ b/arch/parisc/lib/memcpy.c | |||
@@ -275,7 +275,7 @@ handle_store_error: | |||
275 | 275 | ||
276 | 276 | ||
277 | /* Returns 0 for success, otherwise, returns number of bytes not transferred. */ | 277 | /* Returns 0 for success, otherwise, returns number of bytes not transferred. */ |
278 | unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) | 278 | static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) |
279 | { | 279 | { |
280 | register unsigned long src, dst, t1, t2, t3; | 280 | register unsigned long src, dst, t1, t2, t3; |
281 | register unsigned char *pcs, *pcd; | 281 | register unsigned char *pcs, *pcd; |
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index b2e3e9a8cece..92c7fa4ecc3f 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c | |||
@@ -139,13 +139,41 @@ parisc_acctyp(unsigned long code, unsigned int inst) | |||
139 | } | 139 | } |
140 | #endif | 140 | #endif |
141 | 141 | ||
142 | int fixup_exception(struct pt_regs *regs) | ||
143 | { | ||
144 | const struct exception_table_entry *fix; | ||
145 | |||
146 | fix = search_exception_tables(regs->iaoq[0]); | ||
147 | if (fix) { | ||
148 | struct exception_data *d; | ||
149 | d = &__get_cpu_var(exception_data); | ||
150 | d->fault_ip = regs->iaoq[0]; | ||
151 | d->fault_space = regs->isr; | ||
152 | d->fault_addr = regs->ior; | ||
153 | |||
154 | regs->iaoq[0] = ((fix->fixup) & ~3); | ||
155 | /* | ||
156 | * NOTE: In some cases the faulting instruction | ||
157 | * may be in the delay slot of a branch. We | ||
158 | * don't want to take the branch, so we don't | ||
159 | * increment iaoq[1], instead we set it to be | ||
160 | * iaoq[0]+4, and clear the B bit in the PSW | ||
161 | */ | ||
162 | regs->iaoq[1] = regs->iaoq[0] + 4; | ||
163 | regs->gr[0] &= ~PSW_B; /* IPSW in gr[0] */ | ||
164 | |||
165 | return 1; | ||
166 | } | ||
167 | |||
168 | return 0; | ||
169 | } | ||
170 | |||
142 | void do_page_fault(struct pt_regs *regs, unsigned long code, | 171 | void do_page_fault(struct pt_regs *regs, unsigned long code, |
143 | unsigned long address) | 172 | unsigned long address) |
144 | { | 173 | { |
145 | struct vm_area_struct *vma, *prev_vma; | 174 | struct vm_area_struct *vma, *prev_vma; |
146 | struct task_struct *tsk = current; | 175 | struct task_struct *tsk = current; |
147 | struct mm_struct *mm = tsk->mm; | 176 | struct mm_struct *mm = tsk->mm; |
148 | const struct exception_table_entry *fix; | ||
149 | unsigned long acc_type; | 177 | unsigned long acc_type; |
150 | int fault; | 178 | int fault; |
151 | 179 | ||
@@ -229,32 +257,8 @@ bad_area: | |||
229 | 257 | ||
230 | no_context: | 258 | no_context: |
231 | 259 | ||
232 | if (!user_mode(regs)) { | 260 | if (!user_mode(regs) && fixup_exception(regs)) { |
233 | fix = search_exception_tables(regs->iaoq[0]); | 261 | return; |
234 | |||
235 | if (fix) { | ||
236 | struct exception_data *d; | ||
237 | |||
238 | d = &__get_cpu_var(exception_data); | ||
239 | d->fault_ip = regs->iaoq[0]; | ||
240 | d->fault_space = regs->isr; | ||
241 | d->fault_addr = regs->ior; | ||
242 | |||
243 | regs->iaoq[0] = ((fix->fixup) & ~3); | ||
244 | |||
245 | /* | ||
246 | * NOTE: In some cases the faulting instruction | ||
247 | * may be in the delay slot of a branch. We | ||
248 | * don't want to take the branch, so we don't | ||
249 | * increment iaoq[1], instead we set it to be | ||
250 | * iaoq[0]+4, and clear the B bit in the PSW | ||
251 | */ | ||
252 | |||
253 | regs->iaoq[1] = regs->iaoq[0] + 4; | ||
254 | regs->gr[0] &= ~PSW_B; /* IPSW in gr[0] */ | ||
255 | |||
256 | return; | ||
257 | } | ||
258 | } | 262 | } |
259 | 263 | ||
260 | parisc_terminate("Bad Address (null pointer deref?)", regs, code, address); | 264 | parisc_terminate("Bad Address (null pointer deref?)", regs, code, address); |
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index dcbf1be149f3..f21147f3626a 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c | |||
@@ -28,351 +28,18 @@ | |||
28 | #include <linux/async_tx.h> | 28 | #include <linux/async_tx.h> |
29 | 29 | ||
30 | #ifdef CONFIG_DMA_ENGINE | 30 | #ifdef CONFIG_DMA_ENGINE |
31 | static enum dma_state_client | 31 | static int __init async_tx_init(void) |
32 | dma_channel_add_remove(struct dma_client *client, | ||
33 | struct dma_chan *chan, enum dma_state state); | ||
34 | |||
35 | static struct dma_client async_tx_dma = { | ||
36 | .event_callback = dma_channel_add_remove, | ||
37 | /* .cap_mask == 0 defaults to all channels */ | ||
38 | }; | ||
39 | |||
40 | /** | ||
41 | * dma_cap_mask_all - enable iteration over all operation types | ||
42 | */ | ||
43 | static dma_cap_mask_t dma_cap_mask_all; | ||
44 | |||
45 | /** | ||
46 | * chan_ref_percpu - tracks channel allocations per core/opertion | ||
47 | */ | ||
48 | struct chan_ref_percpu { | ||
49 | struct dma_chan_ref *ref; | ||
50 | }; | ||
51 | |||
52 | static int channel_table_initialized; | ||
53 | static struct chan_ref_percpu *channel_table[DMA_TX_TYPE_END]; | ||
54 | |||
55 | /** | ||
56 | * async_tx_lock - protect modification of async_tx_master_list and serialize | ||
57 | * rebalance operations | ||
58 | */ | ||
59 | static spinlock_t async_tx_lock; | ||
60 | |||
61 | static LIST_HEAD(async_tx_master_list); | ||
62 | |||
63 | /* async_tx_issue_pending_all - start all transactions on all channels */ | ||
64 | void async_tx_issue_pending_all(void) | ||
65 | { | ||
66 | struct dma_chan_ref *ref; | ||
67 | |||
68 | rcu_read_lock(); | ||
69 | list_for_each_entry_rcu(ref, &async_tx_master_list, node) | ||
70 | ref->chan->device->device_issue_pending(ref->chan); | ||
71 | rcu_read_unlock(); | ||
72 | } | ||
73 | EXPORT_SYMBOL_GPL(async_tx_issue_pending_all); | ||
74 | |||
75 | /* dma_wait_for_async_tx - spin wait for a transcation to complete | ||
76 | * @tx: transaction to wait on | ||
77 | */ | ||
78 | enum dma_status | ||
79 | dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | ||
80 | { | ||
81 | enum dma_status status; | ||
82 | struct dma_async_tx_descriptor *iter; | ||
83 | struct dma_async_tx_descriptor *parent; | ||
84 | |||
85 | if (!tx) | ||
86 | return DMA_SUCCESS; | ||
87 | |||
88 | /* poll through the dependency chain, return when tx is complete */ | ||
89 | do { | ||
90 | iter = tx; | ||
91 | |||
92 | /* find the root of the unsubmitted dependency chain */ | ||
93 | do { | ||
94 | parent = iter->parent; | ||
95 | if (!parent) | ||
96 | break; | ||
97 | else | ||
98 | iter = parent; | ||
99 | } while (parent); | ||
100 | |||
101 | /* there is a small window for ->parent == NULL and | ||
102 | * ->cookie == -EBUSY | ||
103 | */ | ||
104 | while (iter->cookie == -EBUSY) | ||
105 | cpu_relax(); | ||
106 | |||
107 | status = dma_sync_wait(iter->chan, iter->cookie); | ||
108 | } while (status == DMA_IN_PROGRESS || (iter != tx)); | ||
109 | |||
110 | return status; | ||
111 | } | ||
112 | EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); | ||
113 | |||
114 | /* async_tx_run_dependencies - helper routine for dma drivers to process | ||
115 | * (start) dependent operations on their target channel | ||
116 | * @tx: transaction with dependencies | ||
117 | */ | ||
118 | void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx) | ||
119 | { | ||
120 | struct dma_async_tx_descriptor *dep = tx->next; | ||
121 | struct dma_async_tx_descriptor *dep_next; | ||
122 | struct dma_chan *chan; | ||
123 | |||
124 | if (!dep) | ||
125 | return; | ||
126 | |||
127 | chan = dep->chan; | ||
128 | |||
129 | /* keep submitting up until a channel switch is detected | ||
130 | * in that case we will be called again as a result of | ||
131 | * processing the interrupt from async_tx_channel_switch | ||
132 | */ | ||
133 | for (; dep; dep = dep_next) { | ||
134 | spin_lock_bh(&dep->lock); | ||
135 | dep->parent = NULL; | ||
136 | dep_next = dep->next; | ||
137 | if (dep_next && dep_next->chan == chan) | ||
138 | dep->next = NULL; /* ->next will be submitted */ | ||
139 | else | ||
140 | dep_next = NULL; /* submit current dep and terminate */ | ||
141 | spin_unlock_bh(&dep->lock); | ||
142 | |||
143 | dep->tx_submit(dep); | ||
144 | } | ||
145 | |||
146 | chan->device->device_issue_pending(chan); | ||
147 | } | ||
148 | EXPORT_SYMBOL_GPL(async_tx_run_dependencies); | ||
149 | |||
150 | static void | ||
151 | free_dma_chan_ref(struct rcu_head *rcu) | ||
152 | { | ||
153 | struct dma_chan_ref *ref; | ||
154 | ref = container_of(rcu, struct dma_chan_ref, rcu); | ||
155 | kfree(ref); | ||
156 | } | ||
157 | |||
158 | static void | ||
159 | init_dma_chan_ref(struct dma_chan_ref *ref, struct dma_chan *chan) | ||
160 | { | ||
161 | INIT_LIST_HEAD(&ref->node); | ||
162 | INIT_RCU_HEAD(&ref->rcu); | ||
163 | ref->chan = chan; | ||
164 | atomic_set(&ref->count, 0); | ||
165 | } | ||
166 | |||
167 | /** | ||
168 | * get_chan_ref_by_cap - returns the nth channel of the given capability | ||
169 | * defaults to returning the channel with the desired capability and the | ||
170 | * lowest reference count if the index can not be satisfied | ||
171 | * @cap: capability to match | ||
172 | * @index: nth channel desired, passing -1 has the effect of forcing the | ||
173 | * default return value | ||
174 | */ | ||
175 | static struct dma_chan_ref * | ||
176 | get_chan_ref_by_cap(enum dma_transaction_type cap, int index) | ||
177 | { | ||
178 | struct dma_chan_ref *ret_ref = NULL, *min_ref = NULL, *ref; | ||
179 | |||
180 | rcu_read_lock(); | ||
181 | list_for_each_entry_rcu(ref, &async_tx_master_list, node) | ||
182 | if (dma_has_cap(cap, ref->chan->device->cap_mask)) { | ||
183 | if (!min_ref) | ||
184 | min_ref = ref; | ||
185 | else if (atomic_read(&ref->count) < | ||
186 | atomic_read(&min_ref->count)) | ||
187 | min_ref = ref; | ||
188 | |||
189 | if (index-- == 0) { | ||
190 | ret_ref = ref; | ||
191 | break; | ||
192 | } | ||
193 | } | ||
194 | rcu_read_unlock(); | ||
195 | |||
196 | if (!ret_ref) | ||
197 | ret_ref = min_ref; | ||
198 | |||
199 | if (ret_ref) | ||
200 | atomic_inc(&ret_ref->count); | ||
201 | |||
202 | return ret_ref; | ||
203 | } | ||
204 | |||
205 | /** | ||
206 | * async_tx_rebalance - redistribute the available channels, optimize | ||
207 | * for cpu isolation in the SMP case, and opertaion isolation in the | ||
208 | * uniprocessor case | ||
209 | */ | ||
210 | static void async_tx_rebalance(void) | ||
211 | { | ||
212 | int cpu, cap, cpu_idx = 0; | ||
213 | unsigned long flags; | ||
214 | |||
215 | if (!channel_table_initialized) | ||
216 | return; | ||
217 | |||
218 | spin_lock_irqsave(&async_tx_lock, flags); | ||
219 | |||
220 | /* undo the last distribution */ | ||
221 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | ||
222 | for_each_possible_cpu(cpu) { | ||
223 | struct dma_chan_ref *ref = | ||
224 | per_cpu_ptr(channel_table[cap], cpu)->ref; | ||
225 | if (ref) { | ||
226 | atomic_set(&ref->count, 0); | ||
227 | per_cpu_ptr(channel_table[cap], cpu)->ref = | ||
228 | NULL; | ||
229 | } | ||
230 | } | ||
231 | |||
232 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | ||
233 | for_each_online_cpu(cpu) { | ||
234 | struct dma_chan_ref *new; | ||
235 | if (NR_CPUS > 1) | ||
236 | new = get_chan_ref_by_cap(cap, cpu_idx++); | ||
237 | else | ||
238 | new = get_chan_ref_by_cap(cap, -1); | ||
239 | |||
240 | per_cpu_ptr(channel_table[cap], cpu)->ref = new; | ||
241 | } | ||
242 | |||
243 | spin_unlock_irqrestore(&async_tx_lock, flags); | ||
244 | } | ||
245 | |||
246 | static enum dma_state_client | ||
247 | dma_channel_add_remove(struct dma_client *client, | ||
248 | struct dma_chan *chan, enum dma_state state) | ||
249 | { | ||
250 | unsigned long found, flags; | ||
251 | struct dma_chan_ref *master_ref, *ref; | ||
252 | enum dma_state_client ack = DMA_DUP; /* default: take no action */ | ||
253 | |||
254 | switch (state) { | ||
255 | case DMA_RESOURCE_AVAILABLE: | ||
256 | found = 0; | ||
257 | rcu_read_lock(); | ||
258 | list_for_each_entry_rcu(ref, &async_tx_master_list, node) | ||
259 | if (ref->chan == chan) { | ||
260 | found = 1; | ||
261 | break; | ||
262 | } | ||
263 | rcu_read_unlock(); | ||
264 | |||
265 | pr_debug("async_tx: dma resource available [%s]\n", | ||
266 | found ? "old" : "new"); | ||
267 | |||
268 | if (!found) | ||
269 | ack = DMA_ACK; | ||
270 | else | ||
271 | break; | ||
272 | |||
273 | /* add the channel to the generic management list */ | ||
274 | master_ref = kmalloc(sizeof(*master_ref), GFP_KERNEL); | ||
275 | if (master_ref) { | ||
276 | /* keep a reference until async_tx is unloaded */ | ||
277 | dma_chan_get(chan); | ||
278 | init_dma_chan_ref(master_ref, chan); | ||
279 | spin_lock_irqsave(&async_tx_lock, flags); | ||
280 | list_add_tail_rcu(&master_ref->node, | ||
281 | &async_tx_master_list); | ||
282 | spin_unlock_irqrestore(&async_tx_lock, | ||
283 | flags); | ||
284 | } else { | ||
285 | printk(KERN_WARNING "async_tx: unable to create" | ||
286 | " new master entry in response to" | ||
287 | " a DMA_RESOURCE_ADDED event" | ||
288 | " (-ENOMEM)\n"); | ||
289 | return 0; | ||
290 | } | ||
291 | |||
292 | async_tx_rebalance(); | ||
293 | break; | ||
294 | case DMA_RESOURCE_REMOVED: | ||
295 | found = 0; | ||
296 | spin_lock_irqsave(&async_tx_lock, flags); | ||
297 | list_for_each_entry(ref, &async_tx_master_list, node) | ||
298 | if (ref->chan == chan) { | ||
299 | /* permit backing devices to go away */ | ||
300 | dma_chan_put(ref->chan); | ||
301 | list_del_rcu(&ref->node); | ||
302 | call_rcu(&ref->rcu, free_dma_chan_ref); | ||
303 | found = 1; | ||
304 | break; | ||
305 | } | ||
306 | spin_unlock_irqrestore(&async_tx_lock, flags); | ||
307 | |||
308 | pr_debug("async_tx: dma resource removed [%s]\n", | ||
309 | found ? "ours" : "not ours"); | ||
310 | |||
311 | if (found) | ||
312 | ack = DMA_ACK; | ||
313 | else | ||
314 | break; | ||
315 | |||
316 | async_tx_rebalance(); | ||
317 | break; | ||
318 | case DMA_RESOURCE_SUSPEND: | ||
319 | case DMA_RESOURCE_RESUME: | ||
320 | printk(KERN_WARNING "async_tx: does not support dma channel" | ||
321 | " suspend/resume\n"); | ||
322 | break; | ||
323 | default: | ||
324 | BUG(); | ||
325 | } | ||
326 | |||
327 | return ack; | ||
328 | } | ||
329 | |||
330 | static int __init | ||
331 | async_tx_init(void) | ||
332 | { | 32 | { |
333 | enum dma_transaction_type cap; | 33 | dmaengine_get(); |
334 | |||
335 | spin_lock_init(&async_tx_lock); | ||
336 | bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); | ||
337 | |||
338 | /* an interrupt will never be an explicit operation type. | ||
339 | * clearing this bit prevents allocation to a slot in 'channel_table' | ||
340 | */ | ||
341 | clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); | ||
342 | |||
343 | for_each_dma_cap_mask(cap, dma_cap_mask_all) { | ||
344 | channel_table[cap] = alloc_percpu(struct chan_ref_percpu); | ||
345 | if (!channel_table[cap]) | ||
346 | goto err; | ||
347 | } | ||
348 | |||
349 | channel_table_initialized = 1; | ||
350 | dma_async_client_register(&async_tx_dma); | ||
351 | dma_async_client_chan_request(&async_tx_dma); | ||
352 | 34 | ||
353 | printk(KERN_INFO "async_tx: api initialized (async)\n"); | 35 | printk(KERN_INFO "async_tx: api initialized (async)\n"); |
354 | 36 | ||
355 | return 0; | 37 | return 0; |
356 | err: | ||
357 | printk(KERN_ERR "async_tx: initialization failure\n"); | ||
358 | |||
359 | while (--cap >= 0) | ||
360 | free_percpu(channel_table[cap]); | ||
361 | |||
362 | return 1; | ||
363 | } | 38 | } |
364 | 39 | ||
365 | static void __exit async_tx_exit(void) | 40 | static void __exit async_tx_exit(void) |
366 | { | 41 | { |
367 | enum dma_transaction_type cap; | 42 | dmaengine_put(); |
368 | |||
369 | channel_table_initialized = 0; | ||
370 | |||
371 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | ||
372 | if (channel_table[cap]) | ||
373 | free_percpu(channel_table[cap]); | ||
374 | |||
375 | dma_async_client_unregister(&async_tx_dma); | ||
376 | } | 43 | } |
377 | 44 | ||
378 | /** | 45 | /** |
@@ -387,16 +54,9 @@ __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, | |||
387 | { | 54 | { |
388 | /* see if we can keep the chain on one channel */ | 55 | /* see if we can keep the chain on one channel */ |
389 | if (depend_tx && | 56 | if (depend_tx && |
390 | dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) | 57 | dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) |
391 | return depend_tx->chan; | 58 | return depend_tx->chan; |
392 | else if (likely(channel_table_initialized)) { | 59 | return dma_find_channel(tx_type); |
393 | struct dma_chan_ref *ref; | ||
394 | int cpu = get_cpu(); | ||
395 | ref = per_cpu_ptr(channel_table[tx_type], cpu)->ref; | ||
396 | put_cpu(); | ||
397 | return ref ? ref->chan : NULL; | ||
398 | } else | ||
399 | return NULL; | ||
400 | } | 60 | } |
401 | EXPORT_SYMBOL_GPL(__async_tx_find_channel); | 61 | EXPORT_SYMBOL_GPL(__async_tx_find_channel); |
402 | #else | 62 | #else |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 175df54eb664..c507a9ac78f4 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -4556,7 +4556,7 @@ void ata_sg_clean(struct ata_queued_cmd *qc) | |||
4556 | struct scatterlist *sg = qc->sg; | 4556 | struct scatterlist *sg = qc->sg; |
4557 | int dir = qc->dma_dir; | 4557 | int dir = qc->dma_dir; |
4558 | 4558 | ||
4559 | WARN_ON(sg == NULL); | 4559 | WARN_ON_ONCE(sg == NULL); |
4560 | 4560 | ||
4561 | VPRINTK("unmapping %u sg elements\n", qc->n_elem); | 4561 | VPRINTK("unmapping %u sg elements\n", qc->n_elem); |
4562 | 4562 | ||
@@ -4776,7 +4776,7 @@ void ata_qc_free(struct ata_queued_cmd *qc) | |||
4776 | struct ata_port *ap = qc->ap; | 4776 | struct ata_port *ap = qc->ap; |
4777 | unsigned int tag; | 4777 | unsigned int tag; |
4778 | 4778 | ||
4779 | WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ | 4779 | WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ |
4780 | 4780 | ||
4781 | qc->flags = 0; | 4781 | qc->flags = 0; |
4782 | tag = qc->tag; | 4782 | tag = qc->tag; |
@@ -4791,8 +4791,8 @@ void __ata_qc_complete(struct ata_queued_cmd *qc) | |||
4791 | struct ata_port *ap = qc->ap; | 4791 | struct ata_port *ap = qc->ap; |
4792 | struct ata_link *link = qc->dev->link; | 4792 | struct ata_link *link = qc->dev->link; |
4793 | 4793 | ||
4794 | WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ | 4794 | WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ |
4795 | WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); | 4795 | WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); |
4796 | 4796 | ||
4797 | if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) | 4797 | if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) |
4798 | ata_sg_clean(qc); | 4798 | ata_sg_clean(qc); |
@@ -4878,7 +4878,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc) | |||
4878 | struct ata_device *dev = qc->dev; | 4878 | struct ata_device *dev = qc->dev; |
4879 | struct ata_eh_info *ehi = &dev->link->eh_info; | 4879 | struct ata_eh_info *ehi = &dev->link->eh_info; |
4880 | 4880 | ||
4881 | WARN_ON(ap->pflags & ATA_PFLAG_FROZEN); | 4881 | WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN); |
4882 | 4882 | ||
4883 | if (unlikely(qc->err_mask)) | 4883 | if (unlikely(qc->err_mask)) |
4884 | qc->flags |= ATA_QCFLAG_FAILED; | 4884 | qc->flags |= ATA_QCFLAG_FAILED; |
@@ -5000,16 +5000,16 @@ void ata_qc_issue(struct ata_queued_cmd *qc) | |||
5000 | * check is skipped for old EH because it reuses active qc to | 5000 | * check is skipped for old EH because it reuses active qc to |
5001 | * request ATAPI sense. | 5001 | * request ATAPI sense. |
5002 | */ | 5002 | */ |
5003 | WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag)); | 5003 | WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag)); |
5004 | 5004 | ||
5005 | if (ata_is_ncq(prot)) { | 5005 | if (ata_is_ncq(prot)) { |
5006 | WARN_ON(link->sactive & (1 << qc->tag)); | 5006 | WARN_ON_ONCE(link->sactive & (1 << qc->tag)); |
5007 | 5007 | ||
5008 | if (!link->sactive) | 5008 | if (!link->sactive) |
5009 | ap->nr_active_links++; | 5009 | ap->nr_active_links++; |
5010 | link->sactive |= 1 << qc->tag; | 5010 | link->sactive |= 1 << qc->tag; |
5011 | } else { | 5011 | } else { |
5012 | WARN_ON(link->sactive); | 5012 | WARN_ON_ONCE(link->sactive); |
5013 | 5013 | ||
5014 | ap->nr_active_links++; | 5014 | ap->nr_active_links++; |
5015 | link->active_tag = qc->tag; | 5015 | link->active_tag = qc->tag; |
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index c59ad76c84b1..0eae9b453556 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
@@ -578,7 +578,7 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) | |||
578 | } | 578 | } |
579 | 579 | ||
580 | if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { | 580 | if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { |
581 | WARN_ON(!ioaddr->ctl_addr); | 581 | WARN_ON_ONCE(!ioaddr->ctl_addr); |
582 | iowrite8(tf->hob_feature, ioaddr->feature_addr); | 582 | iowrite8(tf->hob_feature, ioaddr->feature_addr); |
583 | iowrite8(tf->hob_nsect, ioaddr->nsect_addr); | 583 | iowrite8(tf->hob_nsect, ioaddr->nsect_addr); |
584 | iowrite8(tf->hob_lbal, ioaddr->lbal_addr); | 584 | iowrite8(tf->hob_lbal, ioaddr->lbal_addr); |
@@ -651,7 +651,7 @@ void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf) | |||
651 | iowrite8(tf->ctl, ioaddr->ctl_addr); | 651 | iowrite8(tf->ctl, ioaddr->ctl_addr); |
652 | ap->last_ctl = tf->ctl; | 652 | ap->last_ctl = tf->ctl; |
653 | } else | 653 | } else |
654 | WARN_ON(1); | 654 | WARN_ON_ONCE(1); |
655 | } | 655 | } |
656 | } | 656 | } |
657 | EXPORT_SYMBOL_GPL(ata_sff_tf_read); | 657 | EXPORT_SYMBOL_GPL(ata_sff_tf_read); |
@@ -891,7 +891,7 @@ static void ata_pio_sectors(struct ata_queued_cmd *qc) | |||
891 | /* READ/WRITE MULTIPLE */ | 891 | /* READ/WRITE MULTIPLE */ |
892 | unsigned int nsect; | 892 | unsigned int nsect; |
893 | 893 | ||
894 | WARN_ON(qc->dev->multi_count == 0); | 894 | WARN_ON_ONCE(qc->dev->multi_count == 0); |
895 | 895 | ||
896 | nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size, | 896 | nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size, |
897 | qc->dev->multi_count); | 897 | qc->dev->multi_count); |
@@ -918,7 +918,7 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) | |||
918 | { | 918 | { |
919 | /* send SCSI cdb */ | 919 | /* send SCSI cdb */ |
920 | DPRINTK("send cdb\n"); | 920 | DPRINTK("send cdb\n"); |
921 | WARN_ON(qc->dev->cdb_len < 12); | 921 | WARN_ON_ONCE(qc->dev->cdb_len < 12); |
922 | 922 | ||
923 | ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); | 923 | ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); |
924 | ata_sff_sync(ap); | 924 | ata_sff_sync(ap); |
@@ -1014,7 +1014,7 @@ next_sg: | |||
1014 | } | 1014 | } |
1015 | 1015 | ||
1016 | /* consumed can be larger than count only for the last transfer */ | 1016 | /* consumed can be larger than count only for the last transfer */ |
1017 | WARN_ON(qc->cursg && count != consumed); | 1017 | WARN_ON_ONCE(qc->cursg && count != consumed); |
1018 | 1018 | ||
1019 | if (bytes) | 1019 | if (bytes) |
1020 | goto next_sg; | 1020 | goto next_sg; |
@@ -1172,13 +1172,13 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, | |||
1172 | unsigned long flags = 0; | 1172 | unsigned long flags = 0; |
1173 | int poll_next; | 1173 | int poll_next; |
1174 | 1174 | ||
1175 | WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0); | 1175 | WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0); |
1176 | 1176 | ||
1177 | /* Make sure ata_sff_qc_issue() does not throw things | 1177 | /* Make sure ata_sff_qc_issue() does not throw things |
1178 | * like DMA polling into the workqueue. Notice that | 1178 | * like DMA polling into the workqueue. Notice that |
1179 | * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING). | 1179 | * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING). |
1180 | */ | 1180 | */ |
1181 | WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc)); | 1181 | WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc)); |
1182 | 1182 | ||
1183 | fsm_start: | 1183 | fsm_start: |
1184 | DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n", | 1184 | DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n", |
@@ -1387,7 +1387,7 @@ fsm_start: | |||
1387 | DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n", | 1387 | DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n", |
1388 | ap->print_id, qc->dev->devno, status); | 1388 | ap->print_id, qc->dev->devno, status); |
1389 | 1389 | ||
1390 | WARN_ON(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM)); | 1390 | WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM)); |
1391 | 1391 | ||
1392 | ap->hsm_task_state = HSM_ST_IDLE; | 1392 | ap->hsm_task_state = HSM_ST_IDLE; |
1393 | 1393 | ||
@@ -1423,7 +1423,7 @@ void ata_pio_task(struct work_struct *work) | |||
1423 | int poll_next; | 1423 | int poll_next; |
1424 | 1424 | ||
1425 | fsm_start: | 1425 | fsm_start: |
1426 | WARN_ON(ap->hsm_task_state == HSM_ST_IDLE); | 1426 | WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE); |
1427 | 1427 | ||
1428 | /* | 1428 | /* |
1429 | * This is purely heuristic. This is a fast path. | 1429 | * This is purely heuristic. This is a fast path. |
@@ -1512,7 +1512,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) | |||
1512 | break; | 1512 | break; |
1513 | 1513 | ||
1514 | case ATA_PROT_DMA: | 1514 | case ATA_PROT_DMA: |
1515 | WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); | 1515 | WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); |
1516 | 1516 | ||
1517 | ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ | 1517 | ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ |
1518 | ap->ops->bmdma_setup(qc); /* set up bmdma */ | 1518 | ap->ops->bmdma_setup(qc); /* set up bmdma */ |
@@ -1564,7 +1564,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) | |||
1564 | break; | 1564 | break; |
1565 | 1565 | ||
1566 | case ATAPI_PROT_DMA: | 1566 | case ATAPI_PROT_DMA: |
1567 | WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); | 1567 | WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); |
1568 | 1568 | ||
1569 | ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ | 1569 | ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ |
1570 | ap->ops->bmdma_setup(qc); /* set up bmdma */ | 1570 | ap->ops->bmdma_setup(qc); /* set up bmdma */ |
@@ -1576,7 +1576,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) | |||
1576 | break; | 1576 | break; |
1577 | 1577 | ||
1578 | default: | 1578 | default: |
1579 | WARN_ON(1); | 1579 | WARN_ON_ONCE(1); |
1580 | return AC_ERR_SYSTEM; | 1580 | return AC_ERR_SYSTEM; |
1581 | } | 1581 | } |
1582 | 1582 | ||
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c index d883e1b8bb8c..55433849bfa6 100644 --- a/drivers/dca/dca-core.c +++ b/drivers/dca/dca-core.c | |||
@@ -270,6 +270,6 @@ static void __exit dca_exit(void) | |||
270 | dca_sysfs_exit(); | 270 | dca_sysfs_exit(); |
271 | } | 271 | } |
272 | 272 | ||
273 | subsys_initcall(dca_init); | 273 | arch_initcall(dca_init); |
274 | module_exit(dca_exit); | 274 | module_exit(dca_exit); |
275 | 275 | ||
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 904e57558bb5..e34b06420816 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -33,7 +33,6 @@ config INTEL_IOATDMA | |||
33 | config INTEL_IOP_ADMA | 33 | config INTEL_IOP_ADMA |
34 | tristate "Intel IOP ADMA support" | 34 | tristate "Intel IOP ADMA support" |
35 | depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX | 35 | depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX |
36 | select ASYNC_CORE | ||
37 | select DMA_ENGINE | 36 | select DMA_ENGINE |
38 | help | 37 | help |
39 | Enable support for the Intel(R) IOP Series RAID engines. | 38 | Enable support for the Intel(R) IOP Series RAID engines. |
@@ -59,7 +58,6 @@ config FSL_DMA | |||
59 | config MV_XOR | 58 | config MV_XOR |
60 | bool "Marvell XOR engine support" | 59 | bool "Marvell XOR engine support" |
61 | depends on PLAT_ORION | 60 | depends on PLAT_ORION |
62 | select ASYNC_CORE | ||
63 | select DMA_ENGINE | 61 | select DMA_ENGINE |
64 | ---help--- | 62 | ---help--- |
65 | Enable support for the Marvell XOR engine. | 63 | Enable support for the Marvell XOR engine. |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 657996517374..403dbe781122 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -31,32 +31,18 @@ | |||
31 | * | 31 | * |
32 | * LOCKING: | 32 | * LOCKING: |
33 | * | 33 | * |
34 | * The subsystem keeps two global lists, dma_device_list and dma_client_list. | 34 | * The subsystem keeps a global list of dma_device structs it is protected by a |
35 | * Both of these are protected by a mutex, dma_list_mutex. | 35 | * mutex, dma_list_mutex. |
36 | * | ||
37 | * A subsystem can get access to a channel by calling dmaengine_get() followed | ||
38 | * by dma_find_channel(), or if it has need for an exclusive channel it can call | ||
39 | * dma_request_channel(). Once a channel is allocated a reference is taken | ||
40 | * against its corresponding driver to disable removal. | ||
36 | * | 41 | * |
37 | * Each device has a channels list, which runs unlocked but is never modified | 42 | * Each device has a channels list, which runs unlocked but is never modified |
38 | * once the device is registered, it's just setup by the driver. | 43 | * once the device is registered, it's just setup by the driver. |
39 | * | 44 | * |
40 | * Each client is responsible for keeping track of the channels it uses. See | 45 | * See Documentation/dmaengine.txt for more details |
41 | * the definition of dma_event_callback in dmaengine.h. | ||
42 | * | ||
43 | * Each device has a kref, which is initialized to 1 when the device is | ||
44 | * registered. A kref_get is done for each device registered. When the | ||
45 | * device is released, the corresponding kref_put is done in the release | ||
46 | * method. Every time one of the device's channels is allocated to a client, | ||
47 | * a kref_get occurs. When the channel is freed, the corresponding kref_put | ||
48 | * happens. The device's release function does a completion, so | ||
49 | * unregister_device does a remove event, device_unregister, a kref_put | ||
50 | * for the first reference, then waits on the completion for all other | ||
51 | * references to finish. | ||
52 | * | ||
53 | * Each channel has an open-coded implementation of Rusty Russell's "bigref," | ||
54 | * with a kref and a per_cpu local_t. A dma_chan_get is called when a client | ||
55 | * signals that it wants to use a channel, and dma_chan_put is called when | ||
56 | * a channel is removed or a client using it is unregistered. A client can | ||
57 | * take extra references per outstanding transaction, as is the case with | ||
58 | * the NET DMA client. The release function does a kref_put on the device. | ||
59 | * -ChrisL, DanW | ||
60 | */ | 46 | */ |
61 | 47 | ||
62 | #include <linux/init.h> | 48 | #include <linux/init.h> |
@@ -70,54 +56,85 @@ | |||
70 | #include <linux/rcupdate.h> | 56 | #include <linux/rcupdate.h> |
71 | #include <linux/mutex.h> | 57 | #include <linux/mutex.h> |
72 | #include <linux/jiffies.h> | 58 | #include <linux/jiffies.h> |
59 | #include <linux/rculist.h> | ||
60 | #include <linux/idr.h> | ||
73 | 61 | ||
74 | static DEFINE_MUTEX(dma_list_mutex); | 62 | static DEFINE_MUTEX(dma_list_mutex); |
75 | static LIST_HEAD(dma_device_list); | 63 | static LIST_HEAD(dma_device_list); |
76 | static LIST_HEAD(dma_client_list); | 64 | static long dmaengine_ref_count; |
65 | static struct idr dma_idr; | ||
77 | 66 | ||
78 | /* --- sysfs implementation --- */ | 67 | /* --- sysfs implementation --- */ |
79 | 68 | ||
69 | /** | ||
70 | * dev_to_dma_chan - convert a device pointer to the its sysfs container object | ||
71 | * @dev - device node | ||
72 | * | ||
73 | * Must be called under dma_list_mutex | ||
74 | */ | ||
75 | static struct dma_chan *dev_to_dma_chan(struct device *dev) | ||
76 | { | ||
77 | struct dma_chan_dev *chan_dev; | ||
78 | |||
79 | chan_dev = container_of(dev, typeof(*chan_dev), device); | ||
80 | return chan_dev->chan; | ||
81 | } | ||
82 | |||
80 | static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf) | 83 | static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf) |
81 | { | 84 | { |
82 | struct dma_chan *chan = to_dma_chan(dev); | 85 | struct dma_chan *chan; |
83 | unsigned long count = 0; | 86 | unsigned long count = 0; |
84 | int i; | 87 | int i; |
88 | int err; | ||
85 | 89 | ||
86 | for_each_possible_cpu(i) | 90 | mutex_lock(&dma_list_mutex); |
87 | count += per_cpu_ptr(chan->local, i)->memcpy_count; | 91 | chan = dev_to_dma_chan(dev); |
92 | if (chan) { | ||
93 | for_each_possible_cpu(i) | ||
94 | count += per_cpu_ptr(chan->local, i)->memcpy_count; | ||
95 | err = sprintf(buf, "%lu\n", count); | ||
96 | } else | ||
97 | err = -ENODEV; | ||
98 | mutex_unlock(&dma_list_mutex); | ||
88 | 99 | ||
89 | return sprintf(buf, "%lu\n", count); | 100 | return err; |
90 | } | 101 | } |
91 | 102 | ||
92 | static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr, | 103 | static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr, |
93 | char *buf) | 104 | char *buf) |
94 | { | 105 | { |
95 | struct dma_chan *chan = to_dma_chan(dev); | 106 | struct dma_chan *chan; |
96 | unsigned long count = 0; | 107 | unsigned long count = 0; |
97 | int i; | 108 | int i; |
109 | int err; | ||
98 | 110 | ||
99 | for_each_possible_cpu(i) | 111 | mutex_lock(&dma_list_mutex); |
100 | count += per_cpu_ptr(chan->local, i)->bytes_transferred; | 112 | chan = dev_to_dma_chan(dev); |
113 | if (chan) { | ||
114 | for_each_possible_cpu(i) | ||
115 | count += per_cpu_ptr(chan->local, i)->bytes_transferred; | ||
116 | err = sprintf(buf, "%lu\n", count); | ||
117 | } else | ||
118 | err = -ENODEV; | ||
119 | mutex_unlock(&dma_list_mutex); | ||
101 | 120 | ||
102 | return sprintf(buf, "%lu\n", count); | 121 | return err; |
103 | } | 122 | } |
104 | 123 | ||
105 | static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf) | 124 | static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf) |
106 | { | 125 | { |
107 | struct dma_chan *chan = to_dma_chan(dev); | 126 | struct dma_chan *chan; |
108 | int in_use = 0; | 127 | int err; |
109 | |||
110 | if (unlikely(chan->slow_ref) && | ||
111 | atomic_read(&chan->refcount.refcount) > 1) | ||
112 | in_use = 1; | ||
113 | else { | ||
114 | if (local_read(&(per_cpu_ptr(chan->local, | ||
115 | get_cpu())->refcount)) > 0) | ||
116 | in_use = 1; | ||
117 | put_cpu(); | ||
118 | } | ||
119 | 128 | ||
120 | return sprintf(buf, "%d\n", in_use); | 129 | mutex_lock(&dma_list_mutex); |
130 | chan = dev_to_dma_chan(dev); | ||
131 | if (chan) | ||
132 | err = sprintf(buf, "%d\n", chan->client_count); | ||
133 | else | ||
134 | err = -ENODEV; | ||
135 | mutex_unlock(&dma_list_mutex); | ||
136 | |||
137 | return err; | ||
121 | } | 138 | } |
122 | 139 | ||
123 | static struct device_attribute dma_attrs[] = { | 140 | static struct device_attribute dma_attrs[] = { |
@@ -127,76 +144,110 @@ static struct device_attribute dma_attrs[] = { | |||
127 | __ATTR_NULL | 144 | __ATTR_NULL |
128 | }; | 145 | }; |
129 | 146 | ||
130 | static void dma_async_device_cleanup(struct kref *kref); | 147 | static void chan_dev_release(struct device *dev) |
131 | |||
132 | static void dma_dev_release(struct device *dev) | ||
133 | { | 148 | { |
134 | struct dma_chan *chan = to_dma_chan(dev); | 149 | struct dma_chan_dev *chan_dev; |
135 | kref_put(&chan->device->refcount, dma_async_device_cleanup); | 150 | |
151 | chan_dev = container_of(dev, typeof(*chan_dev), device); | ||
152 | if (atomic_dec_and_test(chan_dev->idr_ref)) { | ||
153 | mutex_lock(&dma_list_mutex); | ||
154 | idr_remove(&dma_idr, chan_dev->dev_id); | ||
155 | mutex_unlock(&dma_list_mutex); | ||
156 | kfree(chan_dev->idr_ref); | ||
157 | } | ||
158 | kfree(chan_dev); | ||
136 | } | 159 | } |
137 | 160 | ||
138 | static struct class dma_devclass = { | 161 | static struct class dma_devclass = { |
139 | .name = "dma", | 162 | .name = "dma", |
140 | .dev_attrs = dma_attrs, | 163 | .dev_attrs = dma_attrs, |
141 | .dev_release = dma_dev_release, | 164 | .dev_release = chan_dev_release, |
142 | }; | 165 | }; |
143 | 166 | ||
144 | /* --- client and device registration --- */ | 167 | /* --- client and device registration --- */ |
145 | 168 | ||
146 | #define dma_chan_satisfies_mask(chan, mask) \ | 169 | #define dma_device_satisfies_mask(device, mask) \ |
147 | __dma_chan_satisfies_mask((chan), &(mask)) | 170 | __dma_device_satisfies_mask((device), &(mask)) |
148 | static int | 171 | static int |
149 | __dma_chan_satisfies_mask(struct dma_chan *chan, dma_cap_mask_t *want) | 172 | __dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want) |
150 | { | 173 | { |
151 | dma_cap_mask_t has; | 174 | dma_cap_mask_t has; |
152 | 175 | ||
153 | bitmap_and(has.bits, want->bits, chan->device->cap_mask.bits, | 176 | bitmap_and(has.bits, want->bits, device->cap_mask.bits, |
154 | DMA_TX_TYPE_END); | 177 | DMA_TX_TYPE_END); |
155 | return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); | 178 | return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); |
156 | } | 179 | } |
157 | 180 | ||
181 | static struct module *dma_chan_to_owner(struct dma_chan *chan) | ||
182 | { | ||
183 | return chan->device->dev->driver->owner; | ||
184 | } | ||
185 | |||
158 | /** | 186 | /** |
159 | * dma_client_chan_alloc - try to allocate channels to a client | 187 | * balance_ref_count - catch up the channel reference count |
160 | * @client: &dma_client | 188 | * @chan - channel to balance ->client_count versus dmaengine_ref_count |
161 | * | 189 | * |
162 | * Called with dma_list_mutex held. | 190 | * balance_ref_count must be called under dma_list_mutex |
163 | */ | 191 | */ |
164 | static void dma_client_chan_alloc(struct dma_client *client) | 192 | static void balance_ref_count(struct dma_chan *chan) |
165 | { | 193 | { |
166 | struct dma_device *device; | 194 | struct module *owner = dma_chan_to_owner(chan); |
167 | struct dma_chan *chan; | ||
168 | int desc; /* allocated descriptor count */ | ||
169 | enum dma_state_client ack; | ||
170 | 195 | ||
171 | /* Find a channel */ | 196 | while (chan->client_count < dmaengine_ref_count) { |
172 | list_for_each_entry(device, &dma_device_list, global_node) { | 197 | __module_get(owner); |
173 | /* Does the client require a specific DMA controller? */ | 198 | chan->client_count++; |
174 | if (client->slave && client->slave->dma_dev | 199 | } |
175 | && client->slave->dma_dev != device->dev) | 200 | } |
176 | continue; | ||
177 | 201 | ||
178 | list_for_each_entry(chan, &device->channels, device_node) { | 202 | /** |
179 | if (!dma_chan_satisfies_mask(chan, client->cap_mask)) | 203 | * dma_chan_get - try to grab a dma channel's parent driver module |
180 | continue; | 204 | * @chan - channel to grab |
205 | * | ||
206 | * Must be called under dma_list_mutex | ||
207 | */ | ||
208 | static int dma_chan_get(struct dma_chan *chan) | ||
209 | { | ||
210 | int err = -ENODEV; | ||
211 | struct module *owner = dma_chan_to_owner(chan); | ||
212 | |||
213 | if (chan->client_count) { | ||
214 | __module_get(owner); | ||
215 | err = 0; | ||
216 | } else if (try_module_get(owner)) | ||
217 | err = 0; | ||
218 | |||
219 | if (err == 0) | ||
220 | chan->client_count++; | ||
221 | |||
222 | /* allocate upon first client reference */ | ||
223 | if (chan->client_count == 1 && err == 0) { | ||
224 | int desc_cnt = chan->device->device_alloc_chan_resources(chan); | ||
225 | |||
226 | if (desc_cnt < 0) { | ||
227 | err = desc_cnt; | ||
228 | chan->client_count = 0; | ||
229 | module_put(owner); | ||
230 | } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) | ||
231 | balance_ref_count(chan); | ||
232 | } | ||
181 | 233 | ||
182 | desc = chan->device->device_alloc_chan_resources( | 234 | return err; |
183 | chan, client); | 235 | } |
184 | if (desc >= 0) { | ||
185 | ack = client->event_callback(client, | ||
186 | chan, | ||
187 | DMA_RESOURCE_AVAILABLE); | ||
188 | 236 | ||
189 | /* we are done once this client rejects | 237 | /** |
190 | * an available resource | 238 | * dma_chan_put - drop a reference to a dma channel's parent driver module |
191 | */ | 239 | * @chan - channel to release |
192 | if (ack == DMA_ACK) { | 240 | * |
193 | dma_chan_get(chan); | 241 | * Must be called under dma_list_mutex |
194 | chan->client_count++; | 242 | */ |
195 | } else if (ack == DMA_NAK) | 243 | static void dma_chan_put(struct dma_chan *chan) |
196 | return; | 244 | { |
197 | } | 245 | if (!chan->client_count) |
198 | } | 246 | return; /* this channel failed alloc_chan_resources */ |
199 | } | 247 | chan->client_count--; |
248 | module_put(dma_chan_to_owner(chan)); | ||
249 | if (chan->client_count == 0) | ||
250 | chan->device->device_free_chan_resources(chan); | ||
200 | } | 251 | } |
201 | 252 | ||
202 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) | 253 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) |
@@ -218,138 +269,342 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) | |||
218 | EXPORT_SYMBOL(dma_sync_wait); | 269 | EXPORT_SYMBOL(dma_sync_wait); |
219 | 270 | ||
220 | /** | 271 | /** |
221 | * dma_chan_cleanup - release a DMA channel's resources | 272 | * dma_cap_mask_all - enable iteration over all operation types |
222 | * @kref: kernel reference structure that contains the DMA channel device | 273 | */ |
274 | static dma_cap_mask_t dma_cap_mask_all; | ||
275 | |||
276 | /** | ||
277 | * dma_chan_tbl_ent - tracks channel allocations per core/operation | ||
278 | * @chan - associated channel for this entry | ||
279 | */ | ||
280 | struct dma_chan_tbl_ent { | ||
281 | struct dma_chan *chan; | ||
282 | }; | ||
283 | |||
284 | /** | ||
285 | * channel_table - percpu lookup table for memory-to-memory offload providers | ||
223 | */ | 286 | */ |
224 | void dma_chan_cleanup(struct kref *kref) | 287 | static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END]; |
288 | |||
289 | static int __init dma_channel_table_init(void) | ||
225 | { | 290 | { |
226 | struct dma_chan *chan = container_of(kref, struct dma_chan, refcount); | 291 | enum dma_transaction_type cap; |
227 | chan->device->device_free_chan_resources(chan); | 292 | int err = 0; |
228 | kref_put(&chan->device->refcount, dma_async_device_cleanup); | 293 | |
294 | bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); | ||
295 | |||
296 | /* 'interrupt', 'private', and 'slave' are channel capabilities, | ||
297 | * but are not associated with an operation so they do not need | ||
298 | * an entry in the channel_table | ||
299 | */ | ||
300 | clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); | ||
301 | clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); | ||
302 | clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); | ||
303 | |||
304 | for_each_dma_cap_mask(cap, dma_cap_mask_all) { | ||
305 | channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); | ||
306 | if (!channel_table[cap]) { | ||
307 | err = -ENOMEM; | ||
308 | break; | ||
309 | } | ||
310 | } | ||
311 | |||
312 | if (err) { | ||
313 | pr_err("dmaengine: initialization failure\n"); | ||
314 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | ||
315 | if (channel_table[cap]) | ||
316 | free_percpu(channel_table[cap]); | ||
317 | } | ||
318 | |||
319 | return err; | ||
229 | } | 320 | } |
230 | EXPORT_SYMBOL(dma_chan_cleanup); | 321 | arch_initcall(dma_channel_table_init); |
231 | 322 | ||
232 | static void dma_chan_free_rcu(struct rcu_head *rcu) | 323 | /** |
324 | * dma_find_channel - find a channel to carry out the operation | ||
325 | * @tx_type: transaction type | ||
326 | */ | ||
327 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) | ||
233 | { | 328 | { |
234 | struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu); | 329 | struct dma_chan *chan; |
235 | int bias = 0x7FFFFFFF; | 330 | int cpu; |
236 | int i; | 331 | |
237 | for_each_possible_cpu(i) | 332 | WARN_ONCE(dmaengine_ref_count == 0, |
238 | bias -= local_read(&per_cpu_ptr(chan->local, i)->refcount); | 333 | "client called %s without a reference", __func__); |
239 | atomic_sub(bias, &chan->refcount.refcount); | 334 | |
240 | kref_put(&chan->refcount, dma_chan_cleanup); | 335 | cpu = get_cpu(); |
336 | chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan; | ||
337 | put_cpu(); | ||
338 | |||
339 | return chan; | ||
241 | } | 340 | } |
341 | EXPORT_SYMBOL(dma_find_channel); | ||
242 | 342 | ||
243 | static void dma_chan_release(struct dma_chan *chan) | 343 | /** |
344 | * dma_issue_pending_all - flush all pending operations across all channels | ||
345 | */ | ||
346 | void dma_issue_pending_all(void) | ||
244 | { | 347 | { |
245 | atomic_add(0x7FFFFFFF, &chan->refcount.refcount); | 348 | struct dma_device *device; |
246 | chan->slow_ref = 1; | 349 | struct dma_chan *chan; |
247 | call_rcu(&chan->rcu, dma_chan_free_rcu); | 350 | |
351 | WARN_ONCE(dmaengine_ref_count == 0, | ||
352 | "client called %s without a reference", __func__); | ||
353 | |||
354 | rcu_read_lock(); | ||
355 | list_for_each_entry_rcu(device, &dma_device_list, global_node) { | ||
356 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | ||
357 | continue; | ||
358 | list_for_each_entry(chan, &device->channels, device_node) | ||
359 | if (chan->client_count) | ||
360 | device->device_issue_pending(chan); | ||
361 | } | ||
362 | rcu_read_unlock(); | ||
248 | } | 363 | } |
364 | EXPORT_SYMBOL(dma_issue_pending_all); | ||
249 | 365 | ||
250 | /** | 366 | /** |
251 | * dma_chans_notify_available - broadcast available channels to the clients | 367 | * nth_chan - returns the nth channel of the given capability |
368 | * @cap: capability to match | ||
369 | * @n: nth channel desired | ||
370 | * | ||
371 | * Defaults to returning the channel with the desired capability and the | ||
372 | * lowest reference count when 'n' cannot be satisfied. Must be called | ||
373 | * under dma_list_mutex. | ||
252 | */ | 374 | */ |
253 | static void dma_clients_notify_available(void) | 375 | static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n) |
254 | { | 376 | { |
255 | struct dma_client *client; | 377 | struct dma_device *device; |
378 | struct dma_chan *chan; | ||
379 | struct dma_chan *ret = NULL; | ||
380 | struct dma_chan *min = NULL; | ||
256 | 381 | ||
257 | mutex_lock(&dma_list_mutex); | 382 | list_for_each_entry(device, &dma_device_list, global_node) { |
383 | if (!dma_has_cap(cap, device->cap_mask) || | ||
384 | dma_has_cap(DMA_PRIVATE, device->cap_mask)) | ||
385 | continue; | ||
386 | list_for_each_entry(chan, &device->channels, device_node) { | ||
387 | if (!chan->client_count) | ||
388 | continue; | ||
389 | if (!min) | ||
390 | min = chan; | ||
391 | else if (chan->table_count < min->table_count) | ||
392 | min = chan; | ||
393 | |||
394 | if (n-- == 0) { | ||
395 | ret = chan; | ||
396 | break; /* done */ | ||
397 | } | ||
398 | } | ||
399 | if (ret) | ||
400 | break; /* done */ | ||
401 | } | ||
258 | 402 | ||
259 | list_for_each_entry(client, &dma_client_list, global_node) | 403 | if (!ret) |
260 | dma_client_chan_alloc(client); | 404 | ret = min; |
261 | 405 | ||
262 | mutex_unlock(&dma_list_mutex); | 406 | if (ret) |
407 | ret->table_count++; | ||
408 | |||
409 | return ret; | ||
263 | } | 410 | } |
264 | 411 | ||
265 | /** | 412 | /** |
266 | * dma_chans_notify_available - tell the clients that a channel is going away | 413 | * dma_channel_rebalance - redistribute the available channels |
267 | * @chan: channel on its way out | 414 | * |
415 | * Optimize for cpu isolation (each cpu gets a dedicated channel for an | ||
416 | * operation type) in the SMP case, and operation isolation (avoid | ||
417 | * multi-tasking channels) in the non-SMP case. Must be called under | ||
418 | * dma_list_mutex. | ||
268 | */ | 419 | */ |
269 | static void dma_clients_notify_removed(struct dma_chan *chan) | 420 | static void dma_channel_rebalance(void) |
270 | { | 421 | { |
271 | struct dma_client *client; | 422 | struct dma_chan *chan; |
272 | enum dma_state_client ack; | 423 | struct dma_device *device; |
424 | int cpu; | ||
425 | int cap; | ||
426 | int n; | ||
273 | 427 | ||
274 | mutex_lock(&dma_list_mutex); | 428 | /* undo the last distribution */ |
429 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | ||
430 | for_each_possible_cpu(cpu) | ||
431 | per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; | ||
432 | |||
433 | list_for_each_entry(device, &dma_device_list, global_node) { | ||
434 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | ||
435 | continue; | ||
436 | list_for_each_entry(chan, &device->channels, device_node) | ||
437 | chan->table_count = 0; | ||
438 | } | ||
275 | 439 | ||
276 | list_for_each_entry(client, &dma_client_list, global_node) { | 440 | /* don't populate the channel_table if no clients are available */ |
277 | ack = client->event_callback(client, chan, | 441 | if (!dmaengine_ref_count) |
278 | DMA_RESOURCE_REMOVED); | 442 | return; |
279 | 443 | ||
280 | /* client was holding resources for this channel so | 444 | /* redistribute available channels */ |
281 | * free it | 445 | n = 0; |
282 | */ | 446 | for_each_dma_cap_mask(cap, dma_cap_mask_all) |
283 | if (ack == DMA_ACK) { | 447 | for_each_online_cpu(cpu) { |
284 | dma_chan_put(chan); | 448 | if (num_possible_cpus() > 1) |
285 | chan->client_count--; | 449 | chan = nth_chan(cap, n++); |
450 | else | ||
451 | chan = nth_chan(cap, -1); | ||
452 | |||
453 | per_cpu_ptr(channel_table[cap], cpu)->chan = chan; | ||
454 | } | ||
455 | } | ||
456 | |||
457 | static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev, | ||
458 | dma_filter_fn fn, void *fn_param) | ||
459 | { | ||
460 | struct dma_chan *chan; | ||
461 | |||
462 | if (!__dma_device_satisfies_mask(dev, mask)) { | ||
463 | pr_debug("%s: wrong capabilities\n", __func__); | ||
464 | return NULL; | ||
465 | } | ||
466 | /* devices with multiple channels need special handling as we need to | ||
467 | * ensure that all channels are either private or public. | ||
468 | */ | ||
469 | if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask)) | ||
470 | list_for_each_entry(chan, &dev->channels, device_node) { | ||
471 | /* some channels are already publicly allocated */ | ||
472 | if (chan->client_count) | ||
473 | return NULL; | ||
286 | } | 474 | } |
475 | |||
476 | list_for_each_entry(chan, &dev->channels, device_node) { | ||
477 | if (chan->client_count) { | ||
478 | pr_debug("%s: %s busy\n", | ||
479 | __func__, dma_chan_name(chan)); | ||
480 | continue; | ||
481 | } | ||
482 | if (fn && !fn(chan, fn_param)) { | ||
483 | pr_debug("%s: %s filter said false\n", | ||
484 | __func__, dma_chan_name(chan)); | ||
485 | continue; | ||
486 | } | ||
487 | return chan; | ||
287 | } | 488 | } |
288 | 489 | ||
289 | mutex_unlock(&dma_list_mutex); | 490 | return NULL; |
290 | } | 491 | } |
291 | 492 | ||
292 | /** | 493 | /** |
293 | * dma_async_client_register - register a &dma_client | 494 | * dma_request_channel - try to allocate an exclusive channel |
294 | * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask' | 495 | * @mask: capabilities that the channel must satisfy |
496 | * @fn: optional callback to disposition available channels | ||
497 | * @fn_param: opaque parameter to pass to dma_filter_fn | ||
295 | */ | 498 | */ |
296 | void dma_async_client_register(struct dma_client *client) | 499 | struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param) |
297 | { | 500 | { |
298 | /* validate client data */ | 501 | struct dma_device *device, *_d; |
299 | BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) && | 502 | struct dma_chan *chan = NULL; |
300 | !client->slave); | 503 | int err; |
301 | 504 | ||
505 | /* Find a channel */ | ||
506 | mutex_lock(&dma_list_mutex); | ||
507 | list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { | ||
508 | chan = private_candidate(mask, device, fn, fn_param); | ||
509 | if (chan) { | ||
510 | /* Found a suitable channel, try to grab, prep, and | ||
511 | * return it. We first set DMA_PRIVATE to disable | ||
512 | * balance_ref_count as this channel will not be | ||
513 | * published in the general-purpose allocator | ||
514 | */ | ||
515 | dma_cap_set(DMA_PRIVATE, device->cap_mask); | ||
516 | err = dma_chan_get(chan); | ||
517 | |||
518 | if (err == -ENODEV) { | ||
519 | pr_debug("%s: %s module removed\n", __func__, | ||
520 | dma_chan_name(chan)); | ||
521 | list_del_rcu(&device->global_node); | ||
522 | } else if (err) | ||
523 | pr_err("dmaengine: failed to get %s: (%d)\n", | ||
524 | dma_chan_name(chan), err); | ||
525 | else | ||
526 | break; | ||
527 | chan = NULL; | ||
528 | } | ||
529 | } | ||
530 | mutex_unlock(&dma_list_mutex); | ||
531 | |||
532 | pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail", | ||
533 | chan ? dma_chan_name(chan) : NULL); | ||
534 | |||
535 | return chan; | ||
536 | } | ||
537 | EXPORT_SYMBOL_GPL(__dma_request_channel); | ||
538 | |||
539 | void dma_release_channel(struct dma_chan *chan) | ||
540 | { | ||
302 | mutex_lock(&dma_list_mutex); | 541 | mutex_lock(&dma_list_mutex); |
303 | list_add_tail(&client->global_node, &dma_client_list); | 542 | WARN_ONCE(chan->client_count != 1, |
543 | "chan reference count %d != 1\n", chan->client_count); | ||
544 | dma_chan_put(chan); | ||
304 | mutex_unlock(&dma_list_mutex); | 545 | mutex_unlock(&dma_list_mutex); |
305 | } | 546 | } |
306 | EXPORT_SYMBOL(dma_async_client_register); | 547 | EXPORT_SYMBOL_GPL(dma_release_channel); |
307 | 548 | ||
308 | /** | 549 | /** |
309 | * dma_async_client_unregister - unregister a client and free the &dma_client | 550 | * dmaengine_get - register interest in dma_channels |
310 | * @client: &dma_client to free | ||
311 | * | ||
312 | * Force frees any allocated DMA channels, frees the &dma_client memory | ||
313 | */ | 551 | */ |
314 | void dma_async_client_unregister(struct dma_client *client) | 552 | void dmaengine_get(void) |
315 | { | 553 | { |
316 | struct dma_device *device; | 554 | struct dma_device *device, *_d; |
317 | struct dma_chan *chan; | 555 | struct dma_chan *chan; |
318 | enum dma_state_client ack; | 556 | int err; |
319 | |||
320 | if (!client) | ||
321 | return; | ||
322 | 557 | ||
323 | mutex_lock(&dma_list_mutex); | 558 | mutex_lock(&dma_list_mutex); |
324 | /* free all channels the client is holding */ | 559 | dmaengine_ref_count++; |
325 | list_for_each_entry(device, &dma_device_list, global_node) | ||
326 | list_for_each_entry(chan, &device->channels, device_node) { | ||
327 | ack = client->event_callback(client, chan, | ||
328 | DMA_RESOURCE_REMOVED); | ||
329 | 560 | ||
330 | if (ack == DMA_ACK) { | 561 | /* try to grab channels */ |
331 | dma_chan_put(chan); | 562 | list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { |
332 | chan->client_count--; | 563 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) |
333 | } | 564 | continue; |
565 | list_for_each_entry(chan, &device->channels, device_node) { | ||
566 | err = dma_chan_get(chan); | ||
567 | if (err == -ENODEV) { | ||
568 | /* module removed before we could use it */ | ||
569 | list_del_rcu(&device->global_node); | ||
570 | break; | ||
571 | } else if (err) | ||
572 | pr_err("dmaengine: failed to get %s: (%d)\n", | ||
573 | dma_chan_name(chan), err); | ||
334 | } | 574 | } |
575 | } | ||
335 | 576 | ||
336 | list_del(&client->global_node); | 577 | /* if this is the first reference and there were channels |
578 | * waiting we need to rebalance to get those channels | ||
579 | * incorporated into the channel table | ||
580 | */ | ||
581 | if (dmaengine_ref_count == 1) | ||
582 | dma_channel_rebalance(); | ||
337 | mutex_unlock(&dma_list_mutex); | 583 | mutex_unlock(&dma_list_mutex); |
338 | } | 584 | } |
339 | EXPORT_SYMBOL(dma_async_client_unregister); | 585 | EXPORT_SYMBOL(dmaengine_get); |
340 | 586 | ||
341 | /** | 587 | /** |
342 | * dma_async_client_chan_request - send all available channels to the | 588 | * dmaengine_put - let dma drivers be removed when ref_count == 0 |
343 | * client that satisfy the capability mask | ||
344 | * @client - requester | ||
345 | */ | 589 | */ |
346 | void dma_async_client_chan_request(struct dma_client *client) | 590 | void dmaengine_put(void) |
347 | { | 591 | { |
592 | struct dma_device *device; | ||
593 | struct dma_chan *chan; | ||
594 | |||
348 | mutex_lock(&dma_list_mutex); | 595 | mutex_lock(&dma_list_mutex); |
349 | dma_client_chan_alloc(client); | 596 | dmaengine_ref_count--; |
597 | BUG_ON(dmaengine_ref_count < 0); | ||
598 | /* drop channel references */ | ||
599 | list_for_each_entry(device, &dma_device_list, global_node) { | ||
600 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | ||
601 | continue; | ||
602 | list_for_each_entry(chan, &device->channels, device_node) | ||
603 | dma_chan_put(chan); | ||
604 | } | ||
350 | mutex_unlock(&dma_list_mutex); | 605 | mutex_unlock(&dma_list_mutex); |
351 | } | 606 | } |
352 | EXPORT_SYMBOL(dma_async_client_chan_request); | 607 | EXPORT_SYMBOL(dmaengine_put); |
353 | 608 | ||
354 | /** | 609 | /** |
355 | * dma_async_device_register - registers DMA devices found | 610 | * dma_async_device_register - registers DMA devices found |
@@ -357,9 +612,9 @@ EXPORT_SYMBOL(dma_async_client_chan_request); | |||
357 | */ | 612 | */ |
358 | int dma_async_device_register(struct dma_device *device) | 613 | int dma_async_device_register(struct dma_device *device) |
359 | { | 614 | { |
360 | static int id; | ||
361 | int chancnt = 0, rc; | 615 | int chancnt = 0, rc; |
362 | struct dma_chan* chan; | 616 | struct dma_chan* chan; |
617 | atomic_t *idr_ref; | ||
363 | 618 | ||
364 | if (!device) | 619 | if (!device) |
365 | return -ENODEV; | 620 | return -ENODEV; |
@@ -386,57 +641,83 @@ int dma_async_device_register(struct dma_device *device) | |||
386 | BUG_ON(!device->device_issue_pending); | 641 | BUG_ON(!device->device_issue_pending); |
387 | BUG_ON(!device->dev); | 642 | BUG_ON(!device->dev); |
388 | 643 | ||
389 | init_completion(&device->done); | 644 | idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); |
390 | kref_init(&device->refcount); | 645 | if (!idr_ref) |
391 | 646 | return -ENOMEM; | |
647 | atomic_set(idr_ref, 0); | ||
648 | idr_retry: | ||
649 | if (!idr_pre_get(&dma_idr, GFP_KERNEL)) | ||
650 | return -ENOMEM; | ||
392 | mutex_lock(&dma_list_mutex); | 651 | mutex_lock(&dma_list_mutex); |
393 | device->dev_id = id++; | 652 | rc = idr_get_new(&dma_idr, NULL, &device->dev_id); |
394 | mutex_unlock(&dma_list_mutex); | 653 | mutex_unlock(&dma_list_mutex); |
654 | if (rc == -EAGAIN) | ||
655 | goto idr_retry; | ||
656 | else if (rc != 0) | ||
657 | return rc; | ||
395 | 658 | ||
396 | /* represent channels in sysfs. Probably want devs too */ | 659 | /* represent channels in sysfs. Probably want devs too */ |
397 | list_for_each_entry(chan, &device->channels, device_node) { | 660 | list_for_each_entry(chan, &device->channels, device_node) { |
398 | chan->local = alloc_percpu(typeof(*chan->local)); | 661 | chan->local = alloc_percpu(typeof(*chan->local)); |
399 | if (chan->local == NULL) | 662 | if (chan->local == NULL) |
400 | continue; | 663 | continue; |
664 | chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); | ||
665 | if (chan->dev == NULL) { | ||
666 | free_percpu(chan->local); | ||
667 | continue; | ||
668 | } | ||
401 | 669 | ||
402 | chan->chan_id = chancnt++; | 670 | chan->chan_id = chancnt++; |
403 | chan->dev.class = &dma_devclass; | 671 | chan->dev->device.class = &dma_devclass; |
404 | chan->dev.parent = device->dev; | 672 | chan->dev->device.parent = device->dev; |
405 | dev_set_name(&chan->dev, "dma%dchan%d", | 673 | chan->dev->chan = chan; |
674 | chan->dev->idr_ref = idr_ref; | ||
675 | chan->dev->dev_id = device->dev_id; | ||
676 | atomic_inc(idr_ref); | ||
677 | dev_set_name(&chan->dev->device, "dma%dchan%d", | ||
406 | device->dev_id, chan->chan_id); | 678 | device->dev_id, chan->chan_id); |
407 | 679 | ||
408 | rc = device_register(&chan->dev); | 680 | rc = device_register(&chan->dev->device); |
409 | if (rc) { | 681 | if (rc) { |
410 | chancnt--; | ||
411 | free_percpu(chan->local); | 682 | free_percpu(chan->local); |
412 | chan->local = NULL; | 683 | chan->local = NULL; |
413 | goto err_out; | 684 | goto err_out; |
414 | } | 685 | } |
415 | |||
416 | /* One for the channel, one of the class device */ | ||
417 | kref_get(&device->refcount); | ||
418 | kref_get(&device->refcount); | ||
419 | kref_init(&chan->refcount); | ||
420 | chan->client_count = 0; | 686 | chan->client_count = 0; |
421 | chan->slow_ref = 0; | ||
422 | INIT_RCU_HEAD(&chan->rcu); | ||
423 | } | 687 | } |
688 | device->chancnt = chancnt; | ||
424 | 689 | ||
425 | mutex_lock(&dma_list_mutex); | 690 | mutex_lock(&dma_list_mutex); |
426 | list_add_tail(&device->global_node, &dma_device_list); | 691 | /* take references on public channels */ |
692 | if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) | ||
693 | list_for_each_entry(chan, &device->channels, device_node) { | ||
694 | /* if clients are already waiting for channels we need | ||
695 | * to take references on their behalf | ||
696 | */ | ||
697 | if (dma_chan_get(chan) == -ENODEV) { | ||
698 | /* note we can only get here for the first | ||
699 | * channel as the remaining channels are | ||
700 | * guaranteed to get a reference | ||
701 | */ | ||
702 | rc = -ENODEV; | ||
703 | mutex_unlock(&dma_list_mutex); | ||
704 | goto err_out; | ||
705 | } | ||
706 | } | ||
707 | list_add_tail_rcu(&device->global_node, &dma_device_list); | ||
708 | dma_channel_rebalance(); | ||
427 | mutex_unlock(&dma_list_mutex); | 709 | mutex_unlock(&dma_list_mutex); |
428 | 710 | ||
429 | dma_clients_notify_available(); | ||
430 | |||
431 | return 0; | 711 | return 0; |
432 | 712 | ||
433 | err_out: | 713 | err_out: |
434 | list_for_each_entry(chan, &device->channels, device_node) { | 714 | list_for_each_entry(chan, &device->channels, device_node) { |
435 | if (chan->local == NULL) | 715 | if (chan->local == NULL) |
436 | continue; | 716 | continue; |
437 | kref_put(&device->refcount, dma_async_device_cleanup); | 717 | mutex_lock(&dma_list_mutex); |
438 | device_unregister(&chan->dev); | 718 | chan->dev->chan = NULL; |
439 | chancnt--; | 719 | mutex_unlock(&dma_list_mutex); |
720 | device_unregister(&chan->dev->device); | ||
440 | free_percpu(chan->local); | 721 | free_percpu(chan->local); |
441 | } | 722 | } |
442 | return rc; | 723 | return rc; |
@@ -444,37 +725,30 @@ err_out: | |||
444 | EXPORT_SYMBOL(dma_async_device_register); | 725 | EXPORT_SYMBOL(dma_async_device_register); |
445 | 726 | ||
446 | /** | 727 | /** |
447 | * dma_async_device_cleanup - function called when all references are released | 728 | * dma_async_device_unregister - unregister a DMA device |
448 | * @kref: kernel reference object | ||
449 | */ | ||
450 | static void dma_async_device_cleanup(struct kref *kref) | ||
451 | { | ||
452 | struct dma_device *device; | ||
453 | |||
454 | device = container_of(kref, struct dma_device, refcount); | ||
455 | complete(&device->done); | ||
456 | } | ||
457 | |||
458 | /** | ||
459 | * dma_async_device_unregister - unregisters DMA devices | ||
460 | * @device: &dma_device | 729 | * @device: &dma_device |
730 | * | ||
731 | * This routine is called by dma driver exit routines, dmaengine holds module | ||
732 | * references to prevent it being called while channels are in use. | ||
461 | */ | 733 | */ |
462 | void dma_async_device_unregister(struct dma_device *device) | 734 | void dma_async_device_unregister(struct dma_device *device) |
463 | { | 735 | { |
464 | struct dma_chan *chan; | 736 | struct dma_chan *chan; |
465 | 737 | ||
466 | mutex_lock(&dma_list_mutex); | 738 | mutex_lock(&dma_list_mutex); |
467 | list_del(&device->global_node); | 739 | list_del_rcu(&device->global_node); |
740 | dma_channel_rebalance(); | ||
468 | mutex_unlock(&dma_list_mutex); | 741 | mutex_unlock(&dma_list_mutex); |
469 | 742 | ||
470 | list_for_each_entry(chan, &device->channels, device_node) { | 743 | list_for_each_entry(chan, &device->channels, device_node) { |
471 | dma_clients_notify_removed(chan); | 744 | WARN_ONCE(chan->client_count, |
472 | device_unregister(&chan->dev); | 745 | "%s called while %d clients hold a reference\n", |
473 | dma_chan_release(chan); | 746 | __func__, chan->client_count); |
747 | mutex_lock(&dma_list_mutex); | ||
748 | chan->dev->chan = NULL; | ||
749 | mutex_unlock(&dma_list_mutex); | ||
750 | device_unregister(&chan->dev->device); | ||
474 | } | 751 | } |
475 | |||
476 | kref_put(&device->refcount, dma_async_device_cleanup); | ||
477 | wait_for_completion(&device->done); | ||
478 | } | 752 | } |
479 | EXPORT_SYMBOL(dma_async_device_unregister); | 753 | EXPORT_SYMBOL(dma_async_device_unregister); |
480 | 754 | ||
@@ -626,10 +900,96 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | |||
626 | } | 900 | } |
627 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); | 901 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); |
628 | 902 | ||
903 | /* dma_wait_for_async_tx - spin wait for a transaction to complete | ||
904 | * @tx: in-flight transaction to wait on | ||
905 | * | ||
906 | * This routine assumes that tx was obtained from a call to async_memcpy, | ||
907 | * async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped | ||
908 | * and submitted). Walking the parent chain is only meant to cover for DMA | ||
909 | * drivers that do not implement the DMA_INTERRUPT capability and may race with | ||
910 | * the driver's descriptor cleanup routine. | ||
911 | */ | ||
912 | enum dma_status | ||
913 | dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | ||
914 | { | ||
915 | enum dma_status status; | ||
916 | struct dma_async_tx_descriptor *iter; | ||
917 | struct dma_async_tx_descriptor *parent; | ||
918 | |||
919 | if (!tx) | ||
920 | return DMA_SUCCESS; | ||
921 | |||
922 | WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for" | ||
923 | " %s\n", __func__, dma_chan_name(tx->chan)); | ||
924 | |||
925 | /* poll through the dependency chain, return when tx is complete */ | ||
926 | do { | ||
927 | iter = tx; | ||
928 | |||
929 | /* find the root of the unsubmitted dependency chain */ | ||
930 | do { | ||
931 | parent = iter->parent; | ||
932 | if (!parent) | ||
933 | break; | ||
934 | else | ||
935 | iter = parent; | ||
936 | } while (parent); | ||
937 | |||
938 | /* there is a small window for ->parent == NULL and | ||
939 | * ->cookie == -EBUSY | ||
940 | */ | ||
941 | while (iter->cookie == -EBUSY) | ||
942 | cpu_relax(); | ||
943 | |||
944 | status = dma_sync_wait(iter->chan, iter->cookie); | ||
945 | } while (status == DMA_IN_PROGRESS || (iter != tx)); | ||
946 | |||
947 | return status; | ||
948 | } | ||
949 | EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); | ||
950 | |||
951 | /* dma_run_dependencies - helper routine for dma drivers to process | ||
952 | * (start) dependent operations on their target channel | ||
953 | * @tx: transaction with dependencies | ||
954 | */ | ||
955 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx) | ||
956 | { | ||
957 | struct dma_async_tx_descriptor *dep = tx->next; | ||
958 | struct dma_async_tx_descriptor *dep_next; | ||
959 | struct dma_chan *chan; | ||
960 | |||
961 | if (!dep) | ||
962 | return; | ||
963 | |||
964 | chan = dep->chan; | ||
965 | |||
966 | /* keep submitting up until a channel switch is detected | ||
967 | * in that case we will be called again as a result of | ||
968 | * processing the interrupt from async_tx_channel_switch | ||
969 | */ | ||
970 | for (; dep; dep = dep_next) { | ||
971 | spin_lock_bh(&dep->lock); | ||
972 | dep->parent = NULL; | ||
973 | dep_next = dep->next; | ||
974 | if (dep_next && dep_next->chan == chan) | ||
975 | dep->next = NULL; /* ->next will be submitted */ | ||
976 | else | ||
977 | dep_next = NULL; /* submit current dep and terminate */ | ||
978 | spin_unlock_bh(&dep->lock); | ||
979 | |||
980 | dep->tx_submit(dep); | ||
981 | } | ||
982 | |||
983 | chan->device->device_issue_pending(chan); | ||
984 | } | ||
985 | EXPORT_SYMBOL_GPL(dma_run_dependencies); | ||
986 | |||
629 | static int __init dma_bus_init(void) | 987 | static int __init dma_bus_init(void) |
630 | { | 988 | { |
989 | idr_init(&dma_idr); | ||
631 | mutex_init(&dma_list_mutex); | 990 | mutex_init(&dma_list_mutex); |
632 | return class_register(&dma_devclass); | 991 | return class_register(&dma_devclass); |
633 | } | 992 | } |
634 | subsys_initcall(dma_bus_init); | 993 | arch_initcall(dma_bus_init); |
994 | |||
635 | 995 | ||
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index ed9636bfb54a..3603f1ea5b28 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
@@ -35,7 +35,7 @@ MODULE_PARM_DESC(threads_per_chan, | |||
35 | 35 | ||
36 | static unsigned int max_channels; | 36 | static unsigned int max_channels; |
37 | module_param(max_channels, uint, S_IRUGO); | 37 | module_param(max_channels, uint, S_IRUGO); |
38 | MODULE_PARM_DESC(nr_channels, | 38 | MODULE_PARM_DESC(max_channels, |
39 | "Maximum number of channels to use (default: all)"); | 39 | "Maximum number of channels to use (default: all)"); |
40 | 40 | ||
41 | /* | 41 | /* |
@@ -71,7 +71,7 @@ struct dmatest_chan { | |||
71 | 71 | ||
72 | /* | 72 | /* |
73 | * These are protected by dma_list_mutex since they're only used by | 73 | * These are protected by dma_list_mutex since they're only used by |
74 | * the DMA client event callback | 74 | * the DMA filter function callback |
75 | */ | 75 | */ |
76 | static LIST_HEAD(dmatest_channels); | 76 | static LIST_HEAD(dmatest_channels); |
77 | static unsigned int nr_channels; | 77 | static unsigned int nr_channels; |
@@ -80,7 +80,7 @@ static bool dmatest_match_channel(struct dma_chan *chan) | |||
80 | { | 80 | { |
81 | if (test_channel[0] == '\0') | 81 | if (test_channel[0] == '\0') |
82 | return true; | 82 | return true; |
83 | return strcmp(dev_name(&chan->dev), test_channel) == 0; | 83 | return strcmp(dma_chan_name(chan), test_channel) == 0; |
84 | } | 84 | } |
85 | 85 | ||
86 | static bool dmatest_match_device(struct dma_device *device) | 86 | static bool dmatest_match_device(struct dma_device *device) |
@@ -215,7 +215,6 @@ static int dmatest_func(void *data) | |||
215 | 215 | ||
216 | smp_rmb(); | 216 | smp_rmb(); |
217 | chan = thread->chan; | 217 | chan = thread->chan; |
218 | dma_chan_get(chan); | ||
219 | 218 | ||
220 | while (!kthread_should_stop()) { | 219 | while (!kthread_should_stop()) { |
221 | total_tests++; | 220 | total_tests++; |
@@ -293,7 +292,6 @@ static int dmatest_func(void *data) | |||
293 | } | 292 | } |
294 | 293 | ||
295 | ret = 0; | 294 | ret = 0; |
296 | dma_chan_put(chan); | ||
297 | kfree(thread->dstbuf); | 295 | kfree(thread->dstbuf); |
298 | err_dstbuf: | 296 | err_dstbuf: |
299 | kfree(thread->srcbuf); | 297 | kfree(thread->srcbuf); |
@@ -319,21 +317,16 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc) | |||
319 | kfree(dtc); | 317 | kfree(dtc); |
320 | } | 318 | } |
321 | 319 | ||
322 | static enum dma_state_client dmatest_add_channel(struct dma_chan *chan) | 320 | static int dmatest_add_channel(struct dma_chan *chan) |
323 | { | 321 | { |
324 | struct dmatest_chan *dtc; | 322 | struct dmatest_chan *dtc; |
325 | struct dmatest_thread *thread; | 323 | struct dmatest_thread *thread; |
326 | unsigned int i; | 324 | unsigned int i; |
327 | 325 | ||
328 | /* Have we already been told about this channel? */ | ||
329 | list_for_each_entry(dtc, &dmatest_channels, node) | ||
330 | if (dtc->chan == chan) | ||
331 | return DMA_DUP; | ||
332 | |||
333 | dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); | 326 | dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); |
334 | if (!dtc) { | 327 | if (!dtc) { |
335 | pr_warning("dmatest: No memory for %s\n", dev_name(&chan->dev)); | 328 | pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan)); |
336 | return DMA_NAK; | 329 | return -ENOMEM; |
337 | } | 330 | } |
338 | 331 | ||
339 | dtc->chan = chan; | 332 | dtc->chan = chan; |
@@ -343,16 +336,16 @@ static enum dma_state_client dmatest_add_channel(struct dma_chan *chan) | |||
343 | thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); | 336 | thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); |
344 | if (!thread) { | 337 | if (!thread) { |
345 | pr_warning("dmatest: No memory for %s-test%u\n", | 338 | pr_warning("dmatest: No memory for %s-test%u\n", |
346 | dev_name(&chan->dev), i); | 339 | dma_chan_name(chan), i); |
347 | break; | 340 | break; |
348 | } | 341 | } |
349 | thread->chan = dtc->chan; | 342 | thread->chan = dtc->chan; |
350 | smp_wmb(); | 343 | smp_wmb(); |
351 | thread->task = kthread_run(dmatest_func, thread, "%s-test%u", | 344 | thread->task = kthread_run(dmatest_func, thread, "%s-test%u", |
352 | dev_name(&chan->dev), i); | 345 | dma_chan_name(chan), i); |
353 | if (IS_ERR(thread->task)) { | 346 | if (IS_ERR(thread->task)) { |
354 | pr_warning("dmatest: Failed to run thread %s-test%u\n", | 347 | pr_warning("dmatest: Failed to run thread %s-test%u\n", |
355 | dev_name(&chan->dev), i); | 348 | dma_chan_name(chan), i); |
356 | kfree(thread); | 349 | kfree(thread); |
357 | break; | 350 | break; |
358 | } | 351 | } |
@@ -362,86 +355,62 @@ static enum dma_state_client dmatest_add_channel(struct dma_chan *chan) | |||
362 | list_add_tail(&thread->node, &dtc->threads); | 355 | list_add_tail(&thread->node, &dtc->threads); |
363 | } | 356 | } |
364 | 357 | ||
365 | pr_info("dmatest: Started %u threads using %s\n", i, dev_name(&chan->dev)); | 358 | pr_info("dmatest: Started %u threads using %s\n", i, dma_chan_name(chan)); |
366 | 359 | ||
367 | list_add_tail(&dtc->node, &dmatest_channels); | 360 | list_add_tail(&dtc->node, &dmatest_channels); |
368 | nr_channels++; | 361 | nr_channels++; |
369 | 362 | ||
370 | return DMA_ACK; | 363 | return 0; |
371 | } | ||
372 | |||
373 | static enum dma_state_client dmatest_remove_channel(struct dma_chan *chan) | ||
374 | { | ||
375 | struct dmatest_chan *dtc, *_dtc; | ||
376 | |||
377 | list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) { | ||
378 | if (dtc->chan == chan) { | ||
379 | list_del(&dtc->node); | ||
380 | dmatest_cleanup_channel(dtc); | ||
381 | pr_debug("dmatest: lost channel %s\n", | ||
382 | dev_name(&chan->dev)); | ||
383 | return DMA_ACK; | ||
384 | } | ||
385 | } | ||
386 | |||
387 | return DMA_DUP; | ||
388 | } | 364 | } |
389 | 365 | ||
390 | /* | 366 | static bool filter(struct dma_chan *chan, void *param) |
391 | * Start testing threads as new channels are assigned to us, and kill | ||
392 | * them when the channels go away. | ||
393 | * | ||
394 | * When we unregister the client, all channels are removed so this | ||
395 | * will also take care of cleaning things up when the module is | ||
396 | * unloaded. | ||
397 | */ | ||
398 | static enum dma_state_client | ||
399 | dmatest_event(struct dma_client *client, struct dma_chan *chan, | ||
400 | enum dma_state state) | ||
401 | { | 367 | { |
402 | enum dma_state_client ack = DMA_NAK; | 368 | if (!dmatest_match_channel(chan) || !dmatest_match_device(chan->device)) |
403 | 369 | return false; | |
404 | switch (state) { | 370 | else |
405 | case DMA_RESOURCE_AVAILABLE: | 371 | return true; |
406 | if (!dmatest_match_channel(chan) | ||
407 | || !dmatest_match_device(chan->device)) | ||
408 | ack = DMA_DUP; | ||
409 | else if (max_channels && nr_channels >= max_channels) | ||
410 | ack = DMA_NAK; | ||
411 | else | ||
412 | ack = dmatest_add_channel(chan); | ||
413 | break; | ||
414 | |||
415 | case DMA_RESOURCE_REMOVED: | ||
416 | ack = dmatest_remove_channel(chan); | ||
417 | break; | ||
418 | |||
419 | default: | ||
420 | pr_info("dmatest: Unhandled event %u (%s)\n", | ||
421 | state, dev_name(&chan->dev)); | ||
422 | break; | ||
423 | } | ||
424 | |||
425 | return ack; | ||
426 | } | 372 | } |
427 | 373 | ||
428 | static struct dma_client dmatest_client = { | ||
429 | .event_callback = dmatest_event, | ||
430 | }; | ||
431 | |||
432 | static int __init dmatest_init(void) | 374 | static int __init dmatest_init(void) |
433 | { | 375 | { |
434 | dma_cap_set(DMA_MEMCPY, dmatest_client.cap_mask); | 376 | dma_cap_mask_t mask; |
435 | dma_async_client_register(&dmatest_client); | 377 | struct dma_chan *chan; |
436 | dma_async_client_chan_request(&dmatest_client); | 378 | int err = 0; |
379 | |||
380 | dma_cap_zero(mask); | ||
381 | dma_cap_set(DMA_MEMCPY, mask); | ||
382 | for (;;) { | ||
383 | chan = dma_request_channel(mask, filter, NULL); | ||
384 | if (chan) { | ||
385 | err = dmatest_add_channel(chan); | ||
386 | if (err == 0) | ||
387 | continue; | ||
388 | else { | ||
389 | dma_release_channel(chan); | ||
390 | break; /* add_channel failed, punt */ | ||
391 | } | ||
392 | } else | ||
393 | break; /* no more channels available */ | ||
394 | if (max_channels && nr_channels >= max_channels) | ||
395 | break; /* we have all we need */ | ||
396 | } | ||
437 | 397 | ||
438 | return 0; | 398 | return err; |
439 | } | 399 | } |
440 | module_init(dmatest_init); | 400 | /* when compiled-in wait for drivers to load first */ |
401 | late_initcall(dmatest_init); | ||
441 | 402 | ||
442 | static void __exit dmatest_exit(void) | 403 | static void __exit dmatest_exit(void) |
443 | { | 404 | { |
444 | dma_async_client_unregister(&dmatest_client); | 405 | struct dmatest_chan *dtc, *_dtc; |
406 | |||
407 | list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) { | ||
408 | list_del(&dtc->node); | ||
409 | dmatest_cleanup_channel(dtc); | ||
410 | pr_debug("dmatest: dropped channel %s\n", | ||
411 | dma_chan_name(dtc->chan)); | ||
412 | dma_release_channel(dtc->chan); | ||
413 | } | ||
445 | } | 414 | } |
446 | module_exit(dmatest_exit); | 415 | module_exit(dmatest_exit); |
447 | 416 | ||
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 0778d99aea7c..6b702cc46b3d 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c | |||
@@ -70,6 +70,15 @@ | |||
70 | * the controller, though. | 70 | * the controller, though. |
71 | */ | 71 | */ |
72 | 72 | ||
73 | static struct device *chan2dev(struct dma_chan *chan) | ||
74 | { | ||
75 | return &chan->dev->device; | ||
76 | } | ||
77 | static struct device *chan2parent(struct dma_chan *chan) | ||
78 | { | ||
79 | return chan->dev->device.parent; | ||
80 | } | ||
81 | |||
73 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) | 82 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) |
74 | { | 83 | { |
75 | return list_entry(dwc->active_list.next, struct dw_desc, desc_node); | 84 | return list_entry(dwc->active_list.next, struct dw_desc, desc_node); |
@@ -93,12 +102,12 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) | |||
93 | ret = desc; | 102 | ret = desc; |
94 | break; | 103 | break; |
95 | } | 104 | } |
96 | dev_dbg(&dwc->chan.dev, "desc %p not ACKed\n", desc); | 105 | dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); |
97 | i++; | 106 | i++; |
98 | } | 107 | } |
99 | spin_unlock_bh(&dwc->lock); | 108 | spin_unlock_bh(&dwc->lock); |
100 | 109 | ||
101 | dev_vdbg(&dwc->chan.dev, "scanned %u descriptors on freelist\n", i); | 110 | dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); |
102 | 111 | ||
103 | return ret; | 112 | return ret; |
104 | } | 113 | } |
@@ -108,10 +117,10 @@ static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
108 | struct dw_desc *child; | 117 | struct dw_desc *child; |
109 | 118 | ||
110 | list_for_each_entry(child, &desc->txd.tx_list, desc_node) | 119 | list_for_each_entry(child, &desc->txd.tx_list, desc_node) |
111 | dma_sync_single_for_cpu(dwc->chan.dev.parent, | 120 | dma_sync_single_for_cpu(chan2parent(&dwc->chan), |
112 | child->txd.phys, sizeof(child->lli), | 121 | child->txd.phys, sizeof(child->lli), |
113 | DMA_TO_DEVICE); | 122 | DMA_TO_DEVICE); |
114 | dma_sync_single_for_cpu(dwc->chan.dev.parent, | 123 | dma_sync_single_for_cpu(chan2parent(&dwc->chan), |
115 | desc->txd.phys, sizeof(desc->lli), | 124 | desc->txd.phys, sizeof(desc->lli), |
116 | DMA_TO_DEVICE); | 125 | DMA_TO_DEVICE); |
117 | } | 126 | } |
@@ -129,11 +138,11 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
129 | 138 | ||
130 | spin_lock_bh(&dwc->lock); | 139 | spin_lock_bh(&dwc->lock); |
131 | list_for_each_entry(child, &desc->txd.tx_list, desc_node) | 140 | list_for_each_entry(child, &desc->txd.tx_list, desc_node) |
132 | dev_vdbg(&dwc->chan.dev, | 141 | dev_vdbg(chan2dev(&dwc->chan), |
133 | "moving child desc %p to freelist\n", | 142 | "moving child desc %p to freelist\n", |
134 | child); | 143 | child); |
135 | list_splice_init(&desc->txd.tx_list, &dwc->free_list); | 144 | list_splice_init(&desc->txd.tx_list, &dwc->free_list); |
136 | dev_vdbg(&dwc->chan.dev, "moving desc %p to freelist\n", desc); | 145 | dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); |
137 | list_add(&desc->desc_node, &dwc->free_list); | 146 | list_add(&desc->desc_node, &dwc->free_list); |
138 | spin_unlock_bh(&dwc->lock); | 147 | spin_unlock_bh(&dwc->lock); |
139 | } | 148 | } |
@@ -163,9 +172,9 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | |||
163 | 172 | ||
164 | /* ASSERT: channel is idle */ | 173 | /* ASSERT: channel is idle */ |
165 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 174 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
166 | dev_err(&dwc->chan.dev, | 175 | dev_err(chan2dev(&dwc->chan), |
167 | "BUG: Attempted to start non-idle channel\n"); | 176 | "BUG: Attempted to start non-idle channel\n"); |
168 | dev_err(&dwc->chan.dev, | 177 | dev_err(chan2dev(&dwc->chan), |
169 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | 178 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", |
170 | channel_readl(dwc, SAR), | 179 | channel_readl(dwc, SAR), |
171 | channel_readl(dwc, DAR), | 180 | channel_readl(dwc, DAR), |
@@ -193,7 +202,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
193 | void *param; | 202 | void *param; |
194 | struct dma_async_tx_descriptor *txd = &desc->txd; | 203 | struct dma_async_tx_descriptor *txd = &desc->txd; |
195 | 204 | ||
196 | dev_vdbg(&dwc->chan.dev, "descriptor %u complete\n", txd->cookie); | 205 | dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); |
197 | 206 | ||
198 | dwc->completed = txd->cookie; | 207 | dwc->completed = txd->cookie; |
199 | callback = txd->callback; | 208 | callback = txd->callback; |
@@ -208,11 +217,11 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
208 | * mapped before they were submitted... | 217 | * mapped before they were submitted... |
209 | */ | 218 | */ |
210 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) | 219 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) |
211 | dma_unmap_page(dwc->chan.dev.parent, desc->lli.dar, desc->len, | 220 | dma_unmap_page(chan2parent(&dwc->chan), desc->lli.dar, |
212 | DMA_FROM_DEVICE); | 221 | desc->len, DMA_FROM_DEVICE); |
213 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) | 222 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) |
214 | dma_unmap_page(dwc->chan.dev.parent, desc->lli.sar, desc->len, | 223 | dma_unmap_page(chan2parent(&dwc->chan), desc->lli.sar, |
215 | DMA_TO_DEVICE); | 224 | desc->len, DMA_TO_DEVICE); |
216 | 225 | ||
217 | /* | 226 | /* |
218 | * The API requires that no submissions are done from a | 227 | * The API requires that no submissions are done from a |
@@ -228,7 +237,7 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
228 | LIST_HEAD(list); | 237 | LIST_HEAD(list); |
229 | 238 | ||
230 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 239 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
231 | dev_err(&dwc->chan.dev, | 240 | dev_err(chan2dev(&dwc->chan), |
232 | "BUG: XFER bit set, but channel not idle!\n"); | 241 | "BUG: XFER bit set, but channel not idle!\n"); |
233 | 242 | ||
234 | /* Try to continue after resetting the channel... */ | 243 | /* Try to continue after resetting the channel... */ |
@@ -273,7 +282,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
273 | return; | 282 | return; |
274 | } | 283 | } |
275 | 284 | ||
276 | dev_vdbg(&dwc->chan.dev, "scan_descriptors: llp=0x%x\n", llp); | 285 | dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); |
277 | 286 | ||
278 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { | 287 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { |
279 | if (desc->lli.llp == llp) | 288 | if (desc->lli.llp == llp) |
@@ -292,7 +301,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
292 | dwc_descriptor_complete(dwc, desc); | 301 | dwc_descriptor_complete(dwc, desc); |
293 | } | 302 | } |
294 | 303 | ||
295 | dev_err(&dwc->chan.dev, | 304 | dev_err(chan2dev(&dwc->chan), |
296 | "BUG: All descriptors done, but channel not idle!\n"); | 305 | "BUG: All descriptors done, but channel not idle!\n"); |
297 | 306 | ||
298 | /* Try to continue after resetting the channel... */ | 307 | /* Try to continue after resetting the channel... */ |
@@ -308,7 +317,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
308 | 317 | ||
309 | static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) | 318 | static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) |
310 | { | 319 | { |
311 | dev_printk(KERN_CRIT, &dwc->chan.dev, | 320 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
312 | " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", | 321 | " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", |
313 | lli->sar, lli->dar, lli->llp, | 322 | lli->sar, lli->dar, lli->llp, |
314 | lli->ctlhi, lli->ctllo); | 323 | lli->ctlhi, lli->ctllo); |
@@ -342,9 +351,9 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
342 | * controller flagged an error instead of scribbling over | 351 | * controller flagged an error instead of scribbling over |
343 | * random memory locations. | 352 | * random memory locations. |
344 | */ | 353 | */ |
345 | dev_printk(KERN_CRIT, &dwc->chan.dev, | 354 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
346 | "Bad descriptor submitted for DMA!\n"); | 355 | "Bad descriptor submitted for DMA!\n"); |
347 | dev_printk(KERN_CRIT, &dwc->chan.dev, | 356 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
348 | " cookie: %d\n", bad_desc->txd.cookie); | 357 | " cookie: %d\n", bad_desc->txd.cookie); |
349 | dwc_dump_lli(dwc, &bad_desc->lli); | 358 | dwc_dump_lli(dwc, &bad_desc->lli); |
350 | list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node) | 359 | list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node) |
@@ -442,12 +451,12 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) | |||
442 | * for DMA. But this is hard to do in a race-free manner. | 451 | * for DMA. But this is hard to do in a race-free manner. |
443 | */ | 452 | */ |
444 | if (list_empty(&dwc->active_list)) { | 453 | if (list_empty(&dwc->active_list)) { |
445 | dev_vdbg(&tx->chan->dev, "tx_submit: started %u\n", | 454 | dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", |
446 | desc->txd.cookie); | 455 | desc->txd.cookie); |
447 | dwc_dostart(dwc, desc); | 456 | dwc_dostart(dwc, desc); |
448 | list_add_tail(&desc->desc_node, &dwc->active_list); | 457 | list_add_tail(&desc->desc_node, &dwc->active_list); |
449 | } else { | 458 | } else { |
450 | dev_vdbg(&tx->chan->dev, "tx_submit: queued %u\n", | 459 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", |
451 | desc->txd.cookie); | 460 | desc->txd.cookie); |
452 | 461 | ||
453 | list_add_tail(&desc->desc_node, &dwc->queue); | 462 | list_add_tail(&desc->desc_node, &dwc->queue); |
@@ -472,11 +481,11 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
472 | unsigned int dst_width; | 481 | unsigned int dst_width; |
473 | u32 ctllo; | 482 | u32 ctllo; |
474 | 483 | ||
475 | dev_vdbg(&chan->dev, "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n", | 484 | dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n", |
476 | dest, src, len, flags); | 485 | dest, src, len, flags); |
477 | 486 | ||
478 | if (unlikely(!len)) { | 487 | if (unlikely(!len)) { |
479 | dev_dbg(&chan->dev, "prep_dma_memcpy: length is zero!\n"); | 488 | dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); |
480 | return NULL; | 489 | return NULL; |
481 | } | 490 | } |
482 | 491 | ||
@@ -516,7 +525,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
516 | first = desc; | 525 | first = desc; |
517 | } else { | 526 | } else { |
518 | prev->lli.llp = desc->txd.phys; | 527 | prev->lli.llp = desc->txd.phys; |
519 | dma_sync_single_for_device(chan->dev.parent, | 528 | dma_sync_single_for_device(chan2parent(chan), |
520 | prev->txd.phys, sizeof(prev->lli), | 529 | prev->txd.phys, sizeof(prev->lli), |
521 | DMA_TO_DEVICE); | 530 | DMA_TO_DEVICE); |
522 | list_add_tail(&desc->desc_node, | 531 | list_add_tail(&desc->desc_node, |
@@ -531,7 +540,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
531 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | 540 | prev->lli.ctllo |= DWC_CTLL_INT_EN; |
532 | 541 | ||
533 | prev->lli.llp = 0; | 542 | prev->lli.llp = 0; |
534 | dma_sync_single_for_device(chan->dev.parent, | 543 | dma_sync_single_for_device(chan2parent(chan), |
535 | prev->txd.phys, sizeof(prev->lli), | 544 | prev->txd.phys, sizeof(prev->lli), |
536 | DMA_TO_DEVICE); | 545 | DMA_TO_DEVICE); |
537 | 546 | ||
@@ -562,15 +571,15 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
562 | struct scatterlist *sg; | 571 | struct scatterlist *sg; |
563 | size_t total_len = 0; | 572 | size_t total_len = 0; |
564 | 573 | ||
565 | dev_vdbg(&chan->dev, "prep_dma_slave\n"); | 574 | dev_vdbg(chan2dev(chan), "prep_dma_slave\n"); |
566 | 575 | ||
567 | if (unlikely(!dws || !sg_len)) | 576 | if (unlikely(!dws || !sg_len)) |
568 | return NULL; | 577 | return NULL; |
569 | 578 | ||
570 | reg_width = dws->slave.reg_width; | 579 | reg_width = dws->reg_width; |
571 | prev = first = NULL; | 580 | prev = first = NULL; |
572 | 581 | ||
573 | sg_len = dma_map_sg(chan->dev.parent, sgl, sg_len, direction); | 582 | sg_len = dma_map_sg(chan2parent(chan), sgl, sg_len, direction); |
574 | 583 | ||
575 | switch (direction) { | 584 | switch (direction) { |
576 | case DMA_TO_DEVICE: | 585 | case DMA_TO_DEVICE: |
@@ -579,7 +588,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
579 | | DWC_CTLL_DST_FIX | 588 | | DWC_CTLL_DST_FIX |
580 | | DWC_CTLL_SRC_INC | 589 | | DWC_CTLL_SRC_INC |
581 | | DWC_CTLL_FC_M2P); | 590 | | DWC_CTLL_FC_M2P); |
582 | reg = dws->slave.tx_reg; | 591 | reg = dws->tx_reg; |
583 | for_each_sg(sgl, sg, sg_len, i) { | 592 | for_each_sg(sgl, sg, sg_len, i) { |
584 | struct dw_desc *desc; | 593 | struct dw_desc *desc; |
585 | u32 len; | 594 | u32 len; |
@@ -587,7 +596,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
587 | 596 | ||
588 | desc = dwc_desc_get(dwc); | 597 | desc = dwc_desc_get(dwc); |
589 | if (!desc) { | 598 | if (!desc) { |
590 | dev_err(&chan->dev, | 599 | dev_err(chan2dev(chan), |
591 | "not enough descriptors available\n"); | 600 | "not enough descriptors available\n"); |
592 | goto err_desc_get; | 601 | goto err_desc_get; |
593 | } | 602 | } |
@@ -607,7 +616,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
607 | first = desc; | 616 | first = desc; |
608 | } else { | 617 | } else { |
609 | prev->lli.llp = desc->txd.phys; | 618 | prev->lli.llp = desc->txd.phys; |
610 | dma_sync_single_for_device(chan->dev.parent, | 619 | dma_sync_single_for_device(chan2parent(chan), |
611 | prev->txd.phys, | 620 | prev->txd.phys, |
612 | sizeof(prev->lli), | 621 | sizeof(prev->lli), |
613 | DMA_TO_DEVICE); | 622 | DMA_TO_DEVICE); |
@@ -625,7 +634,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
625 | | DWC_CTLL_SRC_FIX | 634 | | DWC_CTLL_SRC_FIX |
626 | | DWC_CTLL_FC_P2M); | 635 | | DWC_CTLL_FC_P2M); |
627 | 636 | ||
628 | reg = dws->slave.rx_reg; | 637 | reg = dws->rx_reg; |
629 | for_each_sg(sgl, sg, sg_len, i) { | 638 | for_each_sg(sgl, sg, sg_len, i) { |
630 | struct dw_desc *desc; | 639 | struct dw_desc *desc; |
631 | u32 len; | 640 | u32 len; |
@@ -633,7 +642,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
633 | 642 | ||
634 | desc = dwc_desc_get(dwc); | 643 | desc = dwc_desc_get(dwc); |
635 | if (!desc) { | 644 | if (!desc) { |
636 | dev_err(&chan->dev, | 645 | dev_err(chan2dev(chan), |
637 | "not enough descriptors available\n"); | 646 | "not enough descriptors available\n"); |
638 | goto err_desc_get; | 647 | goto err_desc_get; |
639 | } | 648 | } |
@@ -653,7 +662,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
653 | first = desc; | 662 | first = desc; |
654 | } else { | 663 | } else { |
655 | prev->lli.llp = desc->txd.phys; | 664 | prev->lli.llp = desc->txd.phys; |
656 | dma_sync_single_for_device(chan->dev.parent, | 665 | dma_sync_single_for_device(chan2parent(chan), |
657 | prev->txd.phys, | 666 | prev->txd.phys, |
658 | sizeof(prev->lli), | 667 | sizeof(prev->lli), |
659 | DMA_TO_DEVICE); | 668 | DMA_TO_DEVICE); |
@@ -673,7 +682,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
673 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | 682 | prev->lli.ctllo |= DWC_CTLL_INT_EN; |
674 | 683 | ||
675 | prev->lli.llp = 0; | 684 | prev->lli.llp = 0; |
676 | dma_sync_single_for_device(chan->dev.parent, | 685 | dma_sync_single_for_device(chan2parent(chan), |
677 | prev->txd.phys, sizeof(prev->lli), | 686 | prev->txd.phys, sizeof(prev->lli), |
678 | DMA_TO_DEVICE); | 687 | DMA_TO_DEVICE); |
679 | 688 | ||
@@ -758,29 +767,21 @@ static void dwc_issue_pending(struct dma_chan *chan) | |||
758 | spin_unlock_bh(&dwc->lock); | 767 | spin_unlock_bh(&dwc->lock); |
759 | } | 768 | } |
760 | 769 | ||
761 | static int dwc_alloc_chan_resources(struct dma_chan *chan, | 770 | static int dwc_alloc_chan_resources(struct dma_chan *chan) |
762 | struct dma_client *client) | ||
763 | { | 771 | { |
764 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 772 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
765 | struct dw_dma *dw = to_dw_dma(chan->device); | 773 | struct dw_dma *dw = to_dw_dma(chan->device); |
766 | struct dw_desc *desc; | 774 | struct dw_desc *desc; |
767 | struct dma_slave *slave; | ||
768 | struct dw_dma_slave *dws; | 775 | struct dw_dma_slave *dws; |
769 | int i; | 776 | int i; |
770 | u32 cfghi; | 777 | u32 cfghi; |
771 | u32 cfglo; | 778 | u32 cfglo; |
772 | 779 | ||
773 | dev_vdbg(&chan->dev, "alloc_chan_resources\n"); | 780 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); |
774 | |||
775 | /* Channels doing slave DMA can only handle one client. */ | ||
776 | if (dwc->dws || client->slave) { | ||
777 | if (chan->client_count) | ||
778 | return -EBUSY; | ||
779 | } | ||
780 | 781 | ||
781 | /* ASSERT: channel is idle */ | 782 | /* ASSERT: channel is idle */ |
782 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 783 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
783 | dev_dbg(&chan->dev, "DMA channel not idle?\n"); | 784 | dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); |
784 | return -EIO; | 785 | return -EIO; |
785 | } | 786 | } |
786 | 787 | ||
@@ -789,23 +790,17 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan, | |||
789 | cfghi = DWC_CFGH_FIFO_MODE; | 790 | cfghi = DWC_CFGH_FIFO_MODE; |
790 | cfglo = 0; | 791 | cfglo = 0; |
791 | 792 | ||
792 | slave = client->slave; | 793 | dws = dwc->dws; |
793 | if (slave) { | 794 | if (dws) { |
794 | /* | 795 | /* |
795 | * We need controller-specific data to set up slave | 796 | * We need controller-specific data to set up slave |
796 | * transfers. | 797 | * transfers. |
797 | */ | 798 | */ |
798 | BUG_ON(!slave->dma_dev || slave->dma_dev != dw->dma.dev); | 799 | BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); |
799 | |||
800 | dws = container_of(slave, struct dw_dma_slave, slave); | ||
801 | 800 | ||
802 | dwc->dws = dws; | ||
803 | cfghi = dws->cfg_hi; | 801 | cfghi = dws->cfg_hi; |
804 | cfglo = dws->cfg_lo; | 802 | cfglo = dws->cfg_lo; |
805 | } else { | ||
806 | dwc->dws = NULL; | ||
807 | } | 803 | } |
808 | |||
809 | channel_writel(dwc, CFG_LO, cfglo); | 804 | channel_writel(dwc, CFG_LO, cfglo); |
810 | channel_writel(dwc, CFG_HI, cfghi); | 805 | channel_writel(dwc, CFG_HI, cfghi); |
811 | 806 | ||
@@ -822,7 +817,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan, | |||
822 | 817 | ||
823 | desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); | 818 | desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); |
824 | if (!desc) { | 819 | if (!desc) { |
825 | dev_info(&chan->dev, | 820 | dev_info(chan2dev(chan), |
826 | "only allocated %d descriptors\n", i); | 821 | "only allocated %d descriptors\n", i); |
827 | spin_lock_bh(&dwc->lock); | 822 | spin_lock_bh(&dwc->lock); |
828 | break; | 823 | break; |
@@ -832,7 +827,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan, | |||
832 | desc->txd.tx_submit = dwc_tx_submit; | 827 | desc->txd.tx_submit = dwc_tx_submit; |
833 | desc->txd.flags = DMA_CTRL_ACK; | 828 | desc->txd.flags = DMA_CTRL_ACK; |
834 | INIT_LIST_HEAD(&desc->txd.tx_list); | 829 | INIT_LIST_HEAD(&desc->txd.tx_list); |
835 | desc->txd.phys = dma_map_single(chan->dev.parent, &desc->lli, | 830 | desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli, |
836 | sizeof(desc->lli), DMA_TO_DEVICE); | 831 | sizeof(desc->lli), DMA_TO_DEVICE); |
837 | dwc_desc_put(dwc, desc); | 832 | dwc_desc_put(dwc, desc); |
838 | 833 | ||
@@ -847,7 +842,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan, | |||
847 | 842 | ||
848 | spin_unlock_bh(&dwc->lock); | 843 | spin_unlock_bh(&dwc->lock); |
849 | 844 | ||
850 | dev_dbg(&chan->dev, | 845 | dev_dbg(chan2dev(chan), |
851 | "alloc_chan_resources allocated %d descriptors\n", i); | 846 | "alloc_chan_resources allocated %d descriptors\n", i); |
852 | 847 | ||
853 | return i; | 848 | return i; |
@@ -860,7 +855,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
860 | struct dw_desc *desc, *_desc; | 855 | struct dw_desc *desc, *_desc; |
861 | LIST_HEAD(list); | 856 | LIST_HEAD(list); |
862 | 857 | ||
863 | dev_dbg(&chan->dev, "free_chan_resources (descs allocated=%u)\n", | 858 | dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", |
864 | dwc->descs_allocated); | 859 | dwc->descs_allocated); |
865 | 860 | ||
866 | /* ASSERT: channel is idle */ | 861 | /* ASSERT: channel is idle */ |
@@ -881,13 +876,13 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
881 | spin_unlock_bh(&dwc->lock); | 876 | spin_unlock_bh(&dwc->lock); |
882 | 877 | ||
883 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { | 878 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { |
884 | dev_vdbg(&chan->dev, " freeing descriptor %p\n", desc); | 879 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); |
885 | dma_unmap_single(chan->dev.parent, desc->txd.phys, | 880 | dma_unmap_single(chan2parent(chan), desc->txd.phys, |
886 | sizeof(desc->lli), DMA_TO_DEVICE); | 881 | sizeof(desc->lli), DMA_TO_DEVICE); |
887 | kfree(desc); | 882 | kfree(desc); |
888 | } | 883 | } |
889 | 884 | ||
890 | dev_vdbg(&chan->dev, "free_chan_resources done\n"); | 885 | dev_vdbg(chan2dev(chan), "free_chan_resources done\n"); |
891 | } | 886 | } |
892 | 887 | ||
893 | /*----------------------------------------------------------------------*/ | 888 | /*----------------------------------------------------------------------*/ |
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 0b95dcce447e..ca70a21afc68 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -366,8 +366,7 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor( | |||
366 | * | 366 | * |
367 | * Return - The number of descriptors allocated. | 367 | * Return - The number of descriptors allocated. |
368 | */ | 368 | */ |
369 | static int fsl_dma_alloc_chan_resources(struct dma_chan *chan, | 369 | static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) |
370 | struct dma_client *client) | ||
371 | { | 370 | { |
372 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | 371 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); |
373 | 372 | ||
@@ -823,7 +822,7 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev, | |||
823 | */ | 822 | */ |
824 | WARN_ON(fdev->feature != new_fsl_chan->feature); | 823 | WARN_ON(fdev->feature != new_fsl_chan->feature); |
825 | 824 | ||
826 | new_fsl_chan->dev = &new_fsl_chan->common.dev; | 825 | new_fsl_chan->dev = &new_fsl_chan->common.dev->device; |
827 | new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start, | 826 | new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start, |
828 | new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); | 827 | new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); |
829 | 828 | ||
diff --git a/drivers/dma/ioat.c b/drivers/dma/ioat.c index 9b16a3af9a0a..4105d6575b64 100644 --- a/drivers/dma/ioat.c +++ b/drivers/dma/ioat.c | |||
@@ -75,60 +75,10 @@ static int ioat_dca_enabled = 1; | |||
75 | module_param(ioat_dca_enabled, int, 0644); | 75 | module_param(ioat_dca_enabled, int, 0644); |
76 | MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); | 76 | MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); |
77 | 77 | ||
78 | static int ioat_setup_functionality(struct pci_dev *pdev, void __iomem *iobase) | ||
79 | { | ||
80 | struct ioat_device *device = pci_get_drvdata(pdev); | ||
81 | u8 version; | ||
82 | int err = 0; | ||
83 | |||
84 | version = readb(iobase + IOAT_VER_OFFSET); | ||
85 | switch (version) { | ||
86 | case IOAT_VER_1_2: | ||
87 | device->dma = ioat_dma_probe(pdev, iobase); | ||
88 | if (device->dma && ioat_dca_enabled) | ||
89 | device->dca = ioat_dca_init(pdev, iobase); | ||
90 | break; | ||
91 | case IOAT_VER_2_0: | ||
92 | device->dma = ioat_dma_probe(pdev, iobase); | ||
93 | if (device->dma && ioat_dca_enabled) | ||
94 | device->dca = ioat2_dca_init(pdev, iobase); | ||
95 | break; | ||
96 | case IOAT_VER_3_0: | ||
97 | device->dma = ioat_dma_probe(pdev, iobase); | ||
98 | if (device->dma && ioat_dca_enabled) | ||
99 | device->dca = ioat3_dca_init(pdev, iobase); | ||
100 | break; | ||
101 | default: | ||
102 | err = -ENODEV; | ||
103 | break; | ||
104 | } | ||
105 | if (!device->dma) | ||
106 | err = -ENODEV; | ||
107 | return err; | ||
108 | } | ||
109 | |||
110 | static void ioat_shutdown_functionality(struct pci_dev *pdev) | ||
111 | { | ||
112 | struct ioat_device *device = pci_get_drvdata(pdev); | ||
113 | |||
114 | dev_err(&pdev->dev, "Removing dma and dca services\n"); | ||
115 | if (device->dca) { | ||
116 | unregister_dca_provider(device->dca); | ||
117 | free_dca_provider(device->dca); | ||
118 | device->dca = NULL; | ||
119 | } | ||
120 | |||
121 | if (device->dma) { | ||
122 | ioat_dma_remove(device->dma); | ||
123 | device->dma = NULL; | ||
124 | } | ||
125 | } | ||
126 | |||
127 | static struct pci_driver ioat_pci_driver = { | 78 | static struct pci_driver ioat_pci_driver = { |
128 | .name = "ioatdma", | 79 | .name = "ioatdma", |
129 | .id_table = ioat_pci_tbl, | 80 | .id_table = ioat_pci_tbl, |
130 | .probe = ioat_probe, | 81 | .probe = ioat_probe, |
131 | .shutdown = ioat_shutdown_functionality, | ||
132 | .remove = __devexit_p(ioat_remove), | 82 | .remove = __devexit_p(ioat_remove), |
133 | }; | 83 | }; |
134 | 84 | ||
@@ -179,7 +129,29 @@ static int __devinit ioat_probe(struct pci_dev *pdev, | |||
179 | 129 | ||
180 | pci_set_master(pdev); | 130 | pci_set_master(pdev); |
181 | 131 | ||
182 | err = ioat_setup_functionality(pdev, iobase); | 132 | switch (readb(iobase + IOAT_VER_OFFSET)) { |
133 | case IOAT_VER_1_2: | ||
134 | device->dma = ioat_dma_probe(pdev, iobase); | ||
135 | if (device->dma && ioat_dca_enabled) | ||
136 | device->dca = ioat_dca_init(pdev, iobase); | ||
137 | break; | ||
138 | case IOAT_VER_2_0: | ||
139 | device->dma = ioat_dma_probe(pdev, iobase); | ||
140 | if (device->dma && ioat_dca_enabled) | ||
141 | device->dca = ioat2_dca_init(pdev, iobase); | ||
142 | break; | ||
143 | case IOAT_VER_3_0: | ||
144 | device->dma = ioat_dma_probe(pdev, iobase); | ||
145 | if (device->dma && ioat_dca_enabled) | ||
146 | device->dca = ioat3_dca_init(pdev, iobase); | ||
147 | break; | ||
148 | default: | ||
149 | err = -ENODEV; | ||
150 | break; | ||
151 | } | ||
152 | if (!device->dma) | ||
153 | err = -ENODEV; | ||
154 | |||
183 | if (err) | 155 | if (err) |
184 | goto err_version; | 156 | goto err_version; |
185 | 157 | ||
@@ -198,17 +170,21 @@ err_enable_device: | |||
198 | return err; | 170 | return err; |
199 | } | 171 | } |
200 | 172 | ||
201 | /* | ||
202 | * It is unsafe to remove this module: if removed while a requested | ||
203 | * dma is outstanding, esp. from tcp, it is possible to hang while | ||
204 | * waiting for something that will never finish. However, if you're | ||
205 | * feeling lucky, this usually works just fine. | ||
206 | */ | ||
207 | static void __devexit ioat_remove(struct pci_dev *pdev) | 173 | static void __devexit ioat_remove(struct pci_dev *pdev) |
208 | { | 174 | { |
209 | struct ioat_device *device = pci_get_drvdata(pdev); | 175 | struct ioat_device *device = pci_get_drvdata(pdev); |
210 | 176 | ||
211 | ioat_shutdown_functionality(pdev); | 177 | dev_err(&pdev->dev, "Removing dma and dca services\n"); |
178 | if (device->dca) { | ||
179 | unregister_dca_provider(device->dca); | ||
180 | free_dca_provider(device->dca); | ||
181 | device->dca = NULL; | ||
182 | } | ||
183 | |||
184 | if (device->dma) { | ||
185 | ioat_dma_remove(device->dma); | ||
186 | device->dma = NULL; | ||
187 | } | ||
212 | 188 | ||
213 | kfree(device); | 189 | kfree(device); |
214 | } | 190 | } |
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index 6607fdd00b1c..b3759c4b6536 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c | |||
@@ -734,8 +734,7 @@ static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan) | |||
734 | * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors | 734 | * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors |
735 | * @chan: the channel to be filled out | 735 | * @chan: the channel to be filled out |
736 | */ | 736 | */ |
737 | static int ioat_dma_alloc_chan_resources(struct dma_chan *chan, | 737 | static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) |
738 | struct dma_client *client) | ||
739 | { | 738 | { |
740 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | 739 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); |
741 | struct ioat_desc_sw *desc; | 740 | struct ioat_desc_sw *desc; |
@@ -1341,12 +1340,11 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) | |||
1341 | */ | 1340 | */ |
1342 | #define IOAT_TEST_SIZE 2000 | 1341 | #define IOAT_TEST_SIZE 2000 |
1343 | 1342 | ||
1344 | DECLARE_COMPLETION(test_completion); | ||
1345 | static void ioat_dma_test_callback(void *dma_async_param) | 1343 | static void ioat_dma_test_callback(void *dma_async_param) |
1346 | { | 1344 | { |
1347 | printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n", | 1345 | struct completion *cmp = dma_async_param; |
1348 | dma_async_param); | 1346 | |
1349 | complete(&test_completion); | 1347 | complete(cmp); |
1350 | } | 1348 | } |
1351 | 1349 | ||
1352 | /** | 1350 | /** |
@@ -1363,6 +1361,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device) | |||
1363 | dma_addr_t dma_dest, dma_src; | 1361 | dma_addr_t dma_dest, dma_src; |
1364 | dma_cookie_t cookie; | 1362 | dma_cookie_t cookie; |
1365 | int err = 0; | 1363 | int err = 0; |
1364 | struct completion cmp; | ||
1366 | 1365 | ||
1367 | src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); | 1366 | src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); |
1368 | if (!src) | 1367 | if (!src) |
@@ -1381,7 +1380,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device) | |||
1381 | dma_chan = container_of(device->common.channels.next, | 1380 | dma_chan = container_of(device->common.channels.next, |
1382 | struct dma_chan, | 1381 | struct dma_chan, |
1383 | device_node); | 1382 | device_node); |
1384 | if (device->common.device_alloc_chan_resources(dma_chan, NULL) < 1) { | 1383 | if (device->common.device_alloc_chan_resources(dma_chan) < 1) { |
1385 | dev_err(&device->pdev->dev, | 1384 | dev_err(&device->pdev->dev, |
1386 | "selftest cannot allocate chan resource\n"); | 1385 | "selftest cannot allocate chan resource\n"); |
1387 | err = -ENODEV; | 1386 | err = -ENODEV; |
@@ -1402,8 +1401,9 @@ static int ioat_dma_self_test(struct ioatdma_device *device) | |||
1402 | } | 1401 | } |
1403 | 1402 | ||
1404 | async_tx_ack(tx); | 1403 | async_tx_ack(tx); |
1404 | init_completion(&cmp); | ||
1405 | tx->callback = ioat_dma_test_callback; | 1405 | tx->callback = ioat_dma_test_callback; |
1406 | tx->callback_param = (void *)0x8086; | 1406 | tx->callback_param = &cmp; |
1407 | cookie = tx->tx_submit(tx); | 1407 | cookie = tx->tx_submit(tx); |
1408 | if (cookie < 0) { | 1408 | if (cookie < 0) { |
1409 | dev_err(&device->pdev->dev, | 1409 | dev_err(&device->pdev->dev, |
@@ -1413,7 +1413,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device) | |||
1413 | } | 1413 | } |
1414 | device->common.device_issue_pending(dma_chan); | 1414 | device->common.device_issue_pending(dma_chan); |
1415 | 1415 | ||
1416 | wait_for_completion_timeout(&test_completion, msecs_to_jiffies(3000)); | 1416 | wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); |
1417 | 1417 | ||
1418 | if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL) | 1418 | if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL) |
1419 | != DMA_SUCCESS) { | 1419 | != DMA_SUCCESS) { |
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 6be317262200..ea5440dd10dc 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -24,7 +24,6 @@ | |||
24 | 24 | ||
25 | #include <linux/init.h> | 25 | #include <linux/init.h> |
26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
27 | #include <linux/async_tx.h> | ||
28 | #include <linux/delay.h> | 27 | #include <linux/delay.h> |
29 | #include <linux/dma-mapping.h> | 28 | #include <linux/dma-mapping.h> |
30 | #include <linux/spinlock.h> | 29 | #include <linux/spinlock.h> |
@@ -116,7 +115,7 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, | |||
116 | } | 115 | } |
117 | 116 | ||
118 | /* run dependent operations */ | 117 | /* run dependent operations */ |
119 | async_tx_run_dependencies(&desc->async_tx); | 118 | dma_run_dependencies(&desc->async_tx); |
120 | 119 | ||
121 | return cookie; | 120 | return cookie; |
122 | } | 121 | } |
@@ -270,8 +269,6 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) | |||
270 | break; | 269 | break; |
271 | } | 270 | } |
272 | 271 | ||
273 | BUG_ON(!seen_current); | ||
274 | |||
275 | if (cookie > 0) { | 272 | if (cookie > 0) { |
276 | iop_chan->completed_cookie = cookie; | 273 | iop_chan->completed_cookie = cookie; |
277 | pr_debug("\tcompleted cookie %d\n", cookie); | 274 | pr_debug("\tcompleted cookie %d\n", cookie); |
@@ -471,8 +468,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan); | |||
471 | * greater than 2x the number slots needed to satisfy a device->max_xor | 468 | * greater than 2x the number slots needed to satisfy a device->max_xor |
472 | * request. | 469 | * request. |
473 | * */ | 470 | * */ |
474 | static int iop_adma_alloc_chan_resources(struct dma_chan *chan, | 471 | static int iop_adma_alloc_chan_resources(struct dma_chan *chan) |
475 | struct dma_client *client) | ||
476 | { | 472 | { |
477 | char *hw_desc; | 473 | char *hw_desc; |
478 | int idx; | 474 | int idx; |
@@ -866,7 +862,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device) | |||
866 | dma_chan = container_of(device->common.channels.next, | 862 | dma_chan = container_of(device->common.channels.next, |
867 | struct dma_chan, | 863 | struct dma_chan, |
868 | device_node); | 864 | device_node); |
869 | if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) { | 865 | if (iop_adma_alloc_chan_resources(dma_chan) < 1) { |
870 | err = -ENODEV; | 866 | err = -ENODEV; |
871 | goto out; | 867 | goto out; |
872 | } | 868 | } |
@@ -964,7 +960,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | |||
964 | dma_chan = container_of(device->common.channels.next, | 960 | dma_chan = container_of(device->common.channels.next, |
965 | struct dma_chan, | 961 | struct dma_chan, |
966 | device_node); | 962 | device_node); |
967 | if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) { | 963 | if (iop_adma_alloc_chan_resources(dma_chan) < 1) { |
968 | err = -ENODEV; | 964 | err = -ENODEV; |
969 | goto out; | 965 | goto out; |
970 | } | 966 | } |
@@ -1115,26 +1111,13 @@ static int __devexit iop_adma_remove(struct platform_device *dev) | |||
1115 | struct iop_adma_device *device = platform_get_drvdata(dev); | 1111 | struct iop_adma_device *device = platform_get_drvdata(dev); |
1116 | struct dma_chan *chan, *_chan; | 1112 | struct dma_chan *chan, *_chan; |
1117 | struct iop_adma_chan *iop_chan; | 1113 | struct iop_adma_chan *iop_chan; |
1118 | int i; | ||
1119 | struct iop_adma_platform_data *plat_data = dev->dev.platform_data; | 1114 | struct iop_adma_platform_data *plat_data = dev->dev.platform_data; |
1120 | 1115 | ||
1121 | dma_async_device_unregister(&device->common); | 1116 | dma_async_device_unregister(&device->common); |
1122 | 1117 | ||
1123 | for (i = 0; i < 3; i++) { | ||
1124 | unsigned int irq; | ||
1125 | irq = platform_get_irq(dev, i); | ||
1126 | free_irq(irq, device); | ||
1127 | } | ||
1128 | |||
1129 | dma_free_coherent(&dev->dev, plat_data->pool_size, | 1118 | dma_free_coherent(&dev->dev, plat_data->pool_size, |
1130 | device->dma_desc_pool_virt, device->dma_desc_pool); | 1119 | device->dma_desc_pool_virt, device->dma_desc_pool); |
1131 | 1120 | ||
1132 | do { | ||
1133 | struct resource *res; | ||
1134 | res = platform_get_resource(dev, IORESOURCE_MEM, 0); | ||
1135 | release_mem_region(res->start, res->end - res->start); | ||
1136 | } while (0); | ||
1137 | |||
1138 | list_for_each_entry_safe(chan, _chan, &device->common.channels, | 1121 | list_for_each_entry_safe(chan, _chan, &device->common.channels, |
1139 | device_node) { | 1122 | device_node) { |
1140 | iop_chan = to_iop_adma_chan(chan); | 1123 | iop_chan = to_iop_adma_chan(chan); |
@@ -1255,7 +1238,6 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) | |||
1255 | spin_lock_init(&iop_chan->lock); | 1238 | spin_lock_init(&iop_chan->lock); |
1256 | INIT_LIST_HEAD(&iop_chan->chain); | 1239 | INIT_LIST_HEAD(&iop_chan->chain); |
1257 | INIT_LIST_HEAD(&iop_chan->all_slots); | 1240 | INIT_LIST_HEAD(&iop_chan->all_slots); |
1258 | INIT_RCU_HEAD(&iop_chan->common.rcu); | ||
1259 | iop_chan->common.device = dma_dev; | 1241 | iop_chan->common.device = dma_dev; |
1260 | list_add_tail(&iop_chan->common.device_node, &dma_dev->channels); | 1242 | list_add_tail(&iop_chan->common.device_node, &dma_dev->channels); |
1261 | 1243 | ||
@@ -1431,16 +1413,12 @@ static int __init iop_adma_init (void) | |||
1431 | return platform_driver_register(&iop_adma_driver); | 1413 | return platform_driver_register(&iop_adma_driver); |
1432 | } | 1414 | } |
1433 | 1415 | ||
1434 | /* it's currently unsafe to unload this module */ | ||
1435 | #if 0 | ||
1436 | static void __exit iop_adma_exit (void) | 1416 | static void __exit iop_adma_exit (void) |
1437 | { | 1417 | { |
1438 | platform_driver_unregister(&iop_adma_driver); | 1418 | platform_driver_unregister(&iop_adma_driver); |
1439 | return; | 1419 | return; |
1440 | } | 1420 | } |
1441 | module_exit(iop_adma_exit); | 1421 | module_exit(iop_adma_exit); |
1442 | #endif | ||
1443 | |||
1444 | module_init(iop_adma_init); | 1422 | module_init(iop_adma_init); |
1445 | 1423 | ||
1446 | MODULE_AUTHOR("Intel Corporation"); | 1424 | MODULE_AUTHOR("Intel Corporation"); |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index bcda17426411..d35cbd1ff0b3 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -18,7 +18,6 @@ | |||
18 | 18 | ||
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/async_tx.h> | ||
22 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
23 | #include <linux/dma-mapping.h> | 22 | #include <linux/dma-mapping.h> |
24 | #include <linux/spinlock.h> | 23 | #include <linux/spinlock.h> |
@@ -340,7 +339,7 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, | |||
340 | } | 339 | } |
341 | 340 | ||
342 | /* run dependent operations */ | 341 | /* run dependent operations */ |
343 | async_tx_run_dependencies(&desc->async_tx); | 342 | dma_run_dependencies(&desc->async_tx); |
344 | 343 | ||
345 | return cookie; | 344 | return cookie; |
346 | } | 345 | } |
@@ -607,8 +606,7 @@ submit_done: | |||
607 | } | 606 | } |
608 | 607 | ||
609 | /* returns the number of allocated descriptors */ | 608 | /* returns the number of allocated descriptors */ |
610 | static int mv_xor_alloc_chan_resources(struct dma_chan *chan, | 609 | static int mv_xor_alloc_chan_resources(struct dma_chan *chan) |
611 | struct dma_client *client) | ||
612 | { | 610 | { |
613 | char *hw_desc; | 611 | char *hw_desc; |
614 | int idx; | 612 | int idx; |
@@ -958,7 +956,7 @@ static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device) | |||
958 | dma_chan = container_of(device->common.channels.next, | 956 | dma_chan = container_of(device->common.channels.next, |
959 | struct dma_chan, | 957 | struct dma_chan, |
960 | device_node); | 958 | device_node); |
961 | if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) { | 959 | if (mv_xor_alloc_chan_resources(dma_chan) < 1) { |
962 | err = -ENODEV; | 960 | err = -ENODEV; |
963 | goto out; | 961 | goto out; |
964 | } | 962 | } |
@@ -1053,7 +1051,7 @@ mv_xor_xor_self_test(struct mv_xor_device *device) | |||
1053 | dma_chan = container_of(device->common.channels.next, | 1051 | dma_chan = container_of(device->common.channels.next, |
1054 | struct dma_chan, | 1052 | struct dma_chan, |
1055 | device_node); | 1053 | device_node); |
1056 | if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) { | 1054 | if (mv_xor_alloc_chan_resources(dma_chan) < 1) { |
1057 | err = -ENODEV; | 1055 | err = -ENODEV; |
1058 | goto out; | 1056 | goto out; |
1059 | } | 1057 | } |
@@ -1221,7 +1219,6 @@ static int __devinit mv_xor_probe(struct platform_device *pdev) | |||
1221 | INIT_LIST_HEAD(&mv_chan->chain); | 1219 | INIT_LIST_HEAD(&mv_chan->chain); |
1222 | INIT_LIST_HEAD(&mv_chan->completed_slots); | 1220 | INIT_LIST_HEAD(&mv_chan->completed_slots); |
1223 | INIT_LIST_HEAD(&mv_chan->all_slots); | 1221 | INIT_LIST_HEAD(&mv_chan->all_slots); |
1224 | INIT_RCU_HEAD(&mv_chan->common.rcu); | ||
1225 | mv_chan->common.device = dma_dev; | 1222 | mv_chan->common.device = dma_dev; |
1226 | 1223 | ||
1227 | list_add_tail(&mv_chan->common.device_node, &dma_dev->channels); | 1224 | list_add_tail(&mv_chan->common.device_node, &dma_dev->channels); |
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 1e97916914ad..76bfe16c09b1 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c | |||
@@ -55,7 +55,6 @@ enum atmel_mci_state { | |||
55 | 55 | ||
56 | struct atmel_mci_dma { | 56 | struct atmel_mci_dma { |
57 | #ifdef CONFIG_MMC_ATMELMCI_DMA | 57 | #ifdef CONFIG_MMC_ATMELMCI_DMA |
58 | struct dma_client client; | ||
59 | struct dma_chan *chan; | 58 | struct dma_chan *chan; |
60 | struct dma_async_tx_descriptor *data_desc; | 59 | struct dma_async_tx_descriptor *data_desc; |
61 | #endif | 60 | #endif |
@@ -593,10 +592,8 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) | |||
593 | 592 | ||
594 | /* If we don't have a channel, we can't do DMA */ | 593 | /* If we don't have a channel, we can't do DMA */ |
595 | chan = host->dma.chan; | 594 | chan = host->dma.chan; |
596 | if (chan) { | 595 | if (chan) |
597 | dma_chan_get(chan); | ||
598 | host->data_chan = chan; | 596 | host->data_chan = chan; |
599 | } | ||
600 | 597 | ||
601 | if (!chan) | 598 | if (!chan) |
602 | return -ENODEV; | 599 | return -ENODEV; |
@@ -1443,60 +1440,6 @@ static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id) | |||
1443 | return IRQ_HANDLED; | 1440 | return IRQ_HANDLED; |
1444 | } | 1441 | } |
1445 | 1442 | ||
1446 | #ifdef CONFIG_MMC_ATMELMCI_DMA | ||
1447 | |||
1448 | static inline struct atmel_mci * | ||
1449 | dma_client_to_atmel_mci(struct dma_client *client) | ||
1450 | { | ||
1451 | return container_of(client, struct atmel_mci, dma.client); | ||
1452 | } | ||
1453 | |||
1454 | static enum dma_state_client atmci_dma_event(struct dma_client *client, | ||
1455 | struct dma_chan *chan, enum dma_state state) | ||
1456 | { | ||
1457 | struct atmel_mci *host; | ||
1458 | enum dma_state_client ret = DMA_NAK; | ||
1459 | |||
1460 | host = dma_client_to_atmel_mci(client); | ||
1461 | |||
1462 | switch (state) { | ||
1463 | case DMA_RESOURCE_AVAILABLE: | ||
1464 | spin_lock_bh(&host->lock); | ||
1465 | if (!host->dma.chan) { | ||
1466 | host->dma.chan = chan; | ||
1467 | ret = DMA_ACK; | ||
1468 | } | ||
1469 | spin_unlock_bh(&host->lock); | ||
1470 | |||
1471 | if (ret == DMA_ACK) | ||
1472 | dev_info(&host->pdev->dev, | ||
1473 | "Using %s for DMA transfers\n", | ||
1474 | chan->dev.bus_id); | ||
1475 | break; | ||
1476 | |||
1477 | case DMA_RESOURCE_REMOVED: | ||
1478 | spin_lock_bh(&host->lock); | ||
1479 | if (host->dma.chan == chan) { | ||
1480 | host->dma.chan = NULL; | ||
1481 | ret = DMA_ACK; | ||
1482 | } | ||
1483 | spin_unlock_bh(&host->lock); | ||
1484 | |||
1485 | if (ret == DMA_ACK) | ||
1486 | dev_info(&host->pdev->dev, | ||
1487 | "Lost %s, falling back to PIO\n", | ||
1488 | chan->dev.bus_id); | ||
1489 | break; | ||
1490 | |||
1491 | default: | ||
1492 | break; | ||
1493 | } | ||
1494 | |||
1495 | |||
1496 | return ret; | ||
1497 | } | ||
1498 | #endif /* CONFIG_MMC_ATMELMCI_DMA */ | ||
1499 | |||
1500 | static int __init atmci_init_slot(struct atmel_mci *host, | 1443 | static int __init atmci_init_slot(struct atmel_mci *host, |
1501 | struct mci_slot_pdata *slot_data, unsigned int id, | 1444 | struct mci_slot_pdata *slot_data, unsigned int id, |
1502 | u32 sdc_reg) | 1445 | u32 sdc_reg) |
@@ -1600,6 +1543,18 @@ static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot, | |||
1600 | mmc_free_host(slot->mmc); | 1543 | mmc_free_host(slot->mmc); |
1601 | } | 1544 | } |
1602 | 1545 | ||
1546 | #ifdef CONFIG_MMC_ATMELMCI_DMA | ||
1547 | static bool filter(struct dma_chan *chan, void *slave) | ||
1548 | { | ||
1549 | struct dw_dma_slave *dws = slave; | ||
1550 | |||
1551 | if (dws->dma_dev == chan->device->dev) | ||
1552 | return true; | ||
1553 | else | ||
1554 | return false; | ||
1555 | } | ||
1556 | #endif | ||
1557 | |||
1603 | static int __init atmci_probe(struct platform_device *pdev) | 1558 | static int __init atmci_probe(struct platform_device *pdev) |
1604 | { | 1559 | { |
1605 | struct mci_platform_data *pdata; | 1560 | struct mci_platform_data *pdata; |
@@ -1652,22 +1607,20 @@ static int __init atmci_probe(struct platform_device *pdev) | |||
1652 | goto err_request_irq; | 1607 | goto err_request_irq; |
1653 | 1608 | ||
1654 | #ifdef CONFIG_MMC_ATMELMCI_DMA | 1609 | #ifdef CONFIG_MMC_ATMELMCI_DMA |
1655 | if (pdata->dma_slave) { | 1610 | if (pdata->dma_slave.dma_dev) { |
1656 | struct dma_slave *slave = pdata->dma_slave; | 1611 | struct dw_dma_slave *dws = &pdata->dma_slave; |
1612 | dma_cap_mask_t mask; | ||
1657 | 1613 | ||
1658 | slave->tx_reg = regs->start + MCI_TDR; | 1614 | dws->tx_reg = regs->start + MCI_TDR; |
1659 | slave->rx_reg = regs->start + MCI_RDR; | 1615 | dws->rx_reg = regs->start + MCI_RDR; |
1660 | 1616 | ||
1661 | /* Try to grab a DMA channel */ | 1617 | /* Try to grab a DMA channel */ |
1662 | host->dma.client.event_callback = atmci_dma_event; | 1618 | dma_cap_zero(mask); |
1663 | dma_cap_set(DMA_SLAVE, host->dma.client.cap_mask); | 1619 | dma_cap_set(DMA_SLAVE, mask); |
1664 | host->dma.client.slave = slave; | 1620 | host->dma.chan = dma_request_channel(mask, filter, dws); |
1665 | |||
1666 | dma_async_client_register(&host->dma.client); | ||
1667 | dma_async_client_chan_request(&host->dma.client); | ||
1668 | } else { | ||
1669 | dev_notice(&pdev->dev, "DMA not available, using PIO\n"); | ||
1670 | } | 1621 | } |
1622 | if (!host->dma.chan) | ||
1623 | dev_notice(&pdev->dev, "DMA not available, using PIO\n"); | ||
1671 | #endif /* CONFIG_MMC_ATMELMCI_DMA */ | 1624 | #endif /* CONFIG_MMC_ATMELMCI_DMA */ |
1672 | 1625 | ||
1673 | platform_set_drvdata(pdev, host); | 1626 | platform_set_drvdata(pdev, host); |
@@ -1699,8 +1652,8 @@ static int __init atmci_probe(struct platform_device *pdev) | |||
1699 | 1652 | ||
1700 | err_init_slot: | 1653 | err_init_slot: |
1701 | #ifdef CONFIG_MMC_ATMELMCI_DMA | 1654 | #ifdef CONFIG_MMC_ATMELMCI_DMA |
1702 | if (pdata->dma_slave) | 1655 | if (host->dma.chan) |
1703 | dma_async_client_unregister(&host->dma.client); | 1656 | dma_release_channel(host->dma.chan); |
1704 | #endif | 1657 | #endif |
1705 | free_irq(irq, host); | 1658 | free_irq(irq, host); |
1706 | err_request_irq: | 1659 | err_request_irq: |
@@ -1731,8 +1684,8 @@ static int __exit atmci_remove(struct platform_device *pdev) | |||
1731 | clk_disable(host->mck); | 1684 | clk_disable(host->mck); |
1732 | 1685 | ||
1733 | #ifdef CONFIG_MMC_ATMELMCI_DMA | 1686 | #ifdef CONFIG_MMC_ATMELMCI_DMA |
1734 | if (host->dma.client.slave) | 1687 | if (host->dma.chan) |
1735 | dma_async_client_unregister(&host->dma.client); | 1688 | dma_release_channel(host->dma.chan); |
1736 | #endif | 1689 | #endif |
1737 | 1690 | ||
1738 | free_irq(platform_get_irq(pdev, 0), host); | 1691 | free_irq(platform_get_irq(pdev, 0), host); |
@@ -1761,7 +1714,7 @@ static void __exit atmci_exit(void) | |||
1761 | platform_driver_unregister(&atmci_driver); | 1714 | platform_driver_unregister(&atmci_driver); |
1762 | } | 1715 | } |
1763 | 1716 | ||
1764 | module_init(atmci_init); | 1717 | late_initcall(atmci_init); /* try to load after dma driver when built-in */ |
1765 | module_exit(atmci_exit); | 1718 | module_exit(atmci_exit); |
1766 | 1719 | ||
1767 | MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver"); | 1720 | MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver"); |
diff --git a/drivers/parisc/asp.c b/drivers/parisc/asp.c index 821369135369..7931133526c4 100644 --- a/drivers/parisc/asp.c +++ b/drivers/parisc/asp.c | |||
@@ -71,8 +71,7 @@ static void asp_choose_irq(struct parisc_device *dev, void *ctrl) | |||
71 | */ | 71 | */ |
72 | #define ASP_INTERRUPT_ADDR 0xf0800000 | 72 | #define ASP_INTERRUPT_ADDR 0xf0800000 |
73 | 73 | ||
74 | int __init | 74 | static int __init asp_init_chip(struct parisc_device *dev) |
75 | asp_init_chip(struct parisc_device *dev) | ||
76 | { | 75 | { |
77 | struct gsc_irq gsc_irq; | 76 | struct gsc_irq gsc_irq; |
78 | int ret; | 77 | int ret; |
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index dcc1e9958d2f..cd4dd7ed2c06 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c | |||
@@ -555,7 +555,7 @@ static u32 hint_lookup[] = { | |||
555 | * (Load Coherence Index) instruction. The 8 bits used for the virtual | 555 | * (Load Coherence Index) instruction. The 8 bits used for the virtual |
556 | * index are bits 12:19 of the value returned by LCI. | 556 | * index are bits 12:19 of the value returned by LCI. |
557 | */ | 557 | */ |
558 | void CCIO_INLINE | 558 | static void CCIO_INLINE |
559 | ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba, | 559 | ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba, |
560 | unsigned long hints) | 560 | unsigned long hints) |
561 | { | 561 | { |
@@ -1578,8 +1578,6 @@ static int __init ccio_probe(struct parisc_device *dev) | |||
1578 | 1578 | ||
1579 | ioc_count++; | 1579 | ioc_count++; |
1580 | 1580 | ||
1581 | parisc_vmerge_boundary = IOVP_SIZE; | ||
1582 | parisc_vmerge_max_size = BITS_PER_LONG * IOVP_SIZE; | ||
1583 | parisc_has_iommu(); | 1581 | parisc_has_iommu(); |
1584 | return 0; | 1582 | return 0; |
1585 | } | 1583 | } |
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c index 77cc8bfef8c9..d539d9df88e7 100644 --- a/drivers/parisc/dino.c +++ b/drivers/parisc/dino.c | |||
@@ -287,7 +287,7 @@ DINO_PORT_OUT(b, 8, 3) | |||
287 | DINO_PORT_OUT(w, 16, 2) | 287 | DINO_PORT_OUT(w, 16, 2) |
288 | DINO_PORT_OUT(l, 32, 0) | 288 | DINO_PORT_OUT(l, 32, 0) |
289 | 289 | ||
290 | struct pci_port_ops dino_port_ops = { | 290 | static struct pci_port_ops dino_port_ops = { |
291 | .inb = dino_in8, | 291 | .inb = dino_in8, |
292 | .inw = dino_in16, | 292 | .inw = dino_in16, |
293 | .inl = dino_in32, | 293 | .inl = dino_in32, |
@@ -690,7 +690,7 @@ dino_fixup_bus(struct pci_bus *bus) | |||
690 | } | 690 | } |
691 | 691 | ||
692 | 692 | ||
693 | struct pci_bios_ops dino_bios_ops = { | 693 | static struct pci_bios_ops dino_bios_ops = { |
694 | .init = dino_bios_init, | 694 | .init = dino_bios_init, |
695 | .fixup_bus = dino_fixup_bus | 695 | .fixup_bus = dino_fixup_bus |
696 | }; | 696 | }; |
diff --git a/drivers/parisc/hppb.c b/drivers/parisc/hppb.c index 65eee67aa2ae..13856415b432 100644 --- a/drivers/parisc/hppb.c +++ b/drivers/parisc/hppb.c | |||
@@ -29,7 +29,7 @@ struct hppb_card { | |||
29 | struct hppb_card *next; | 29 | struct hppb_card *next; |
30 | }; | 30 | }; |
31 | 31 | ||
32 | struct hppb_card hppb_card_head = { | 32 | static struct hppb_card hppb_card_head = { |
33 | .hpa = 0, | 33 | .hpa = 0, |
34 | .next = NULL, | 34 | .next = NULL, |
35 | }; | 35 | }; |
diff --git a/drivers/parisc/lasi.c b/drivers/parisc/lasi.c index bee510098ce8..e65727ca9fc0 100644 --- a/drivers/parisc/lasi.c +++ b/drivers/parisc/lasi.c | |||
@@ -107,7 +107,7 @@ lasi_init_irq(struct gsc_asic *this_lasi) | |||
107 | 107 | ||
108 | #else | 108 | #else |
109 | 109 | ||
110 | void __init lasi_led_init(unsigned long lasi_hpa) | 110 | static void __init lasi_led_init(unsigned long lasi_hpa) |
111 | { | 111 | { |
112 | unsigned long datareg; | 112 | unsigned long datareg; |
113 | 113 | ||
@@ -163,8 +163,7 @@ static void lasi_power_off(void) | |||
163 | gsc_writel(0x02, datareg); | 163 | gsc_writel(0x02, datareg); |
164 | } | 164 | } |
165 | 165 | ||
166 | int __init | 166 | static int __init lasi_init_chip(struct parisc_device *dev) |
167 | lasi_init_chip(struct parisc_device *dev) | ||
168 | { | 167 | { |
169 | extern void (*chassis_power_off)(void); | 168 | extern void (*chassis_power_off)(void); |
170 | struct gsc_asic *lasi; | 169 | struct gsc_asic *lasi; |
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c index a28c8946deaa..d8233de8c75d 100644 --- a/drivers/parisc/lba_pci.c +++ b/drivers/parisc/lba_pci.c | |||
@@ -824,7 +824,7 @@ lba_fixup_bus(struct pci_bus *bus) | |||
824 | } | 824 | } |
825 | 825 | ||
826 | 826 | ||
827 | struct pci_bios_ops lba_bios_ops = { | 827 | static struct pci_bios_ops lba_bios_ops = { |
828 | .init = lba_bios_init, | 828 | .init = lba_bios_init, |
829 | .fixup_bus = lba_fixup_bus, | 829 | .fixup_bus = lba_fixup_bus, |
830 | }; | 830 | }; |
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index bc73b96346ff..3fac8f81d59d 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c | |||
@@ -561,7 +561,7 @@ typedef unsigned long space_t; | |||
561 | * IOMMU uses little endian for the pdir. | 561 | * IOMMU uses little endian for the pdir. |
562 | */ | 562 | */ |
563 | 563 | ||
564 | void SBA_INLINE | 564 | static void SBA_INLINE |
565 | sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba, | 565 | sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba, |
566 | unsigned long hint) | 566 | unsigned long hint) |
567 | { | 567 | { |
@@ -1874,7 +1874,7 @@ static struct parisc_device_id sba_tbl[] = { | |||
1874 | { 0, } | 1874 | { 0, } |
1875 | }; | 1875 | }; |
1876 | 1876 | ||
1877 | int sba_driver_callback(struct parisc_device *); | 1877 | static int sba_driver_callback(struct parisc_device *); |
1878 | 1878 | ||
1879 | static struct parisc_driver sba_driver = { | 1879 | static struct parisc_driver sba_driver = { |
1880 | .name = MODULE_NAME, | 1880 | .name = MODULE_NAME, |
@@ -1887,8 +1887,7 @@ static struct parisc_driver sba_driver = { | |||
1887 | ** If so, initialize the chip and tell other partners in crime they | 1887 | ** If so, initialize the chip and tell other partners in crime they |
1888 | ** have work to do. | 1888 | ** have work to do. |
1889 | */ | 1889 | */ |
1890 | int | 1890 | static int sba_driver_callback(struct parisc_device *dev) |
1891 | sba_driver_callback(struct parisc_device *dev) | ||
1892 | { | 1891 | { |
1893 | struct sba_device *sba_dev; | 1892 | struct sba_device *sba_dev; |
1894 | u32 func_class; | 1893 | u32 func_class; |
@@ -1979,8 +1978,6 @@ sba_driver_callback(struct parisc_device *dev) | |||
1979 | proc_create("sba_iommu-bitmap", 0, root, &sba_proc_bitmap_fops); | 1978 | proc_create("sba_iommu-bitmap", 0, root, &sba_proc_bitmap_fops); |
1980 | #endif | 1979 | #endif |
1981 | 1980 | ||
1982 | parisc_vmerge_boundary = IOVP_SIZE; | ||
1983 | parisc_vmerge_max_size = IOVP_SIZE * BITS_PER_LONG; | ||
1984 | parisc_has_iommu(); | 1981 | parisc_has_iommu(); |
1985 | return 0; | 1982 | return 0; |
1986 | } | 1983 | } |
diff --git a/drivers/parisc/wax.c b/drivers/parisc/wax.c index 892a83bbe73d..da9d5ad1353c 100644 --- a/drivers/parisc/wax.c +++ b/drivers/parisc/wax.c | |||
@@ -68,8 +68,7 @@ wax_init_irq(struct gsc_asic *wax) | |||
68 | // gsc_writel(0xFFFFFFFF, base+0x2000); /* RS232-B on Wax */ | 68 | // gsc_writel(0xFFFFFFFF, base+0x2000); /* RS232-B on Wax */ |
69 | } | 69 | } |
70 | 70 | ||
71 | int __init | 71 | static int __init wax_init_chip(struct parisc_device *dev) |
72 | wax_init_chip(struct parisc_device *dev) | ||
73 | { | 72 | { |
74 | struct gsc_asic *wax; | 73 | struct gsc_asic *wax; |
75 | struct parisc_device *parent; | 74 | struct parisc_device *parent; |
diff --git a/drivers/rtc/rtc-parisc.c b/drivers/rtc/rtc-parisc.c index 346d633655e7..c6bfa6fe1a2a 100644 --- a/drivers/rtc/rtc-parisc.c +++ b/drivers/rtc/rtc-parisc.c | |||
@@ -34,7 +34,8 @@ static int parisc_get_time(struct device *dev, struct rtc_time *tm) | |||
34 | static int parisc_set_time(struct device *dev, struct rtc_time *tm) | 34 | static int parisc_set_time(struct device *dev, struct rtc_time *tm) |
35 | { | 35 | { |
36 | struct parisc_rtc *p = dev_get_drvdata(dev); | 36 | struct parisc_rtc *p = dev_get_drvdata(dev); |
37 | unsigned long flags, ret; | 37 | unsigned long flags; |
38 | int ret; | ||
38 | 39 | ||
39 | spin_lock_irqsave(&p->lock, flags); | 40 | spin_lock_irqsave(&p->lock, flags); |
40 | ret = set_rtc_time(tm); | 41 | ret = set_rtc_time(tm); |
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 0f50d4cc4360..45f6297821bd 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h | |||
@@ -59,9 +59,7 @@ enum async_tx_flags { | |||
59 | }; | 59 | }; |
60 | 60 | ||
61 | #ifdef CONFIG_DMA_ENGINE | 61 | #ifdef CONFIG_DMA_ENGINE |
62 | void async_tx_issue_pending_all(void); | 62 | #define async_tx_issue_pending_all dma_issue_pending_all |
63 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); | ||
64 | void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx); | ||
65 | #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL | 63 | #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL |
66 | #include <asm/async_tx.h> | 64 | #include <asm/async_tx.h> |
67 | #else | 65 | #else |
@@ -77,19 +75,6 @@ static inline void async_tx_issue_pending_all(void) | |||
77 | do { } while (0); | 75 | do { } while (0); |
78 | } | 76 | } |
79 | 77 | ||
80 | static inline enum dma_status | ||
81 | dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | ||
82 | { | ||
83 | return DMA_SUCCESS; | ||
84 | } | ||
85 | |||
86 | static inline void | ||
87 | async_tx_run_dependencies(struct dma_async_tx_descriptor *tx, | ||
88 | struct dma_chan *host_chan) | ||
89 | { | ||
90 | do { } while (0); | ||
91 | } | ||
92 | |||
93 | static inline struct dma_chan * | 78 | static inline struct dma_chan * |
94 | async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, | 79 | async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, |
95 | enum dma_transaction_type tx_type, struct page **dst, int dst_count, | 80 | enum dma_transaction_type tx_type, struct page **dst, int dst_count, |
diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h index 2a2213eefd85..2f1f95737acb 100644 --- a/include/linux/atmel-mci.h +++ b/include/linux/atmel-mci.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | #define ATMEL_MCI_MAX_NR_SLOTS 2 | 4 | #define ATMEL_MCI_MAX_NR_SLOTS 2 |
5 | 5 | ||
6 | struct dma_slave; | 6 | #include <linux/dw_dmac.h> |
7 | 7 | ||
8 | /** | 8 | /** |
9 | * struct mci_slot_pdata - board-specific per-slot configuration | 9 | * struct mci_slot_pdata - board-specific per-slot configuration |
@@ -28,11 +28,11 @@ struct mci_slot_pdata { | |||
28 | 28 | ||
29 | /** | 29 | /** |
30 | * struct mci_platform_data - board-specific MMC/SDcard configuration | 30 | * struct mci_platform_data - board-specific MMC/SDcard configuration |
31 | * @dma_slave: DMA slave interface to use in data transfers, or NULL. | 31 | * @dma_slave: DMA slave interface to use in data transfers. |
32 | * @slot: Per-slot configuration data. | 32 | * @slot: Per-slot configuration data. |
33 | */ | 33 | */ |
34 | struct mci_platform_data { | 34 | struct mci_platform_data { |
35 | struct dma_slave *dma_slave; | 35 | struct dw_dma_slave dma_slave; |
36 | struct mci_slot_pdata slot[ATMEL_MCI_MAX_NR_SLOTS]; | 36 | struct mci_slot_pdata slot[ATMEL_MCI_MAX_NR_SLOTS]; |
37 | }; | 37 | }; |
38 | 38 | ||
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index adb0b084eb5a..64dea2ab326c 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -29,32 +29,6 @@ | |||
29 | #include <linux/dma-mapping.h> | 29 | #include <linux/dma-mapping.h> |
30 | 30 | ||
31 | /** | 31 | /** |
32 | * enum dma_state - resource PNP/power management state | ||
33 | * @DMA_RESOURCE_SUSPEND: DMA device going into low power state | ||
34 | * @DMA_RESOURCE_RESUME: DMA device returning to full power | ||
35 | * @DMA_RESOURCE_AVAILABLE: DMA device available to the system | ||
36 | * @DMA_RESOURCE_REMOVED: DMA device removed from the system | ||
37 | */ | ||
38 | enum dma_state { | ||
39 | DMA_RESOURCE_SUSPEND, | ||
40 | DMA_RESOURCE_RESUME, | ||
41 | DMA_RESOURCE_AVAILABLE, | ||
42 | DMA_RESOURCE_REMOVED, | ||
43 | }; | ||
44 | |||
45 | /** | ||
46 | * enum dma_state_client - state of the channel in the client | ||
47 | * @DMA_ACK: client would like to use, or was using this channel | ||
48 | * @DMA_DUP: client has already seen this channel, or is not using this channel | ||
49 | * @DMA_NAK: client does not want to see any more channels | ||
50 | */ | ||
51 | enum dma_state_client { | ||
52 | DMA_ACK, | ||
53 | DMA_DUP, | ||
54 | DMA_NAK, | ||
55 | }; | ||
56 | |||
57 | /** | ||
58 | * typedef dma_cookie_t - an opaque DMA cookie | 32 | * typedef dma_cookie_t - an opaque DMA cookie |
59 | * | 33 | * |
60 | * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code | 34 | * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code |
@@ -89,23 +63,13 @@ enum dma_transaction_type { | |||
89 | DMA_MEMSET, | 63 | DMA_MEMSET, |
90 | DMA_MEMCPY_CRC32C, | 64 | DMA_MEMCPY_CRC32C, |
91 | DMA_INTERRUPT, | 65 | DMA_INTERRUPT, |
66 | DMA_PRIVATE, | ||
92 | DMA_SLAVE, | 67 | DMA_SLAVE, |
93 | }; | 68 | }; |
94 | 69 | ||
95 | /* last transaction type for creation of the capabilities mask */ | 70 | /* last transaction type for creation of the capabilities mask */ |
96 | #define DMA_TX_TYPE_END (DMA_SLAVE + 1) | 71 | #define DMA_TX_TYPE_END (DMA_SLAVE + 1) |
97 | 72 | ||
98 | /** | ||
99 | * enum dma_slave_width - DMA slave register access width. | ||
100 | * @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses | ||
101 | * @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses | ||
102 | * @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses | ||
103 | */ | ||
104 | enum dma_slave_width { | ||
105 | DMA_SLAVE_WIDTH_8BIT, | ||
106 | DMA_SLAVE_WIDTH_16BIT, | ||
107 | DMA_SLAVE_WIDTH_32BIT, | ||
108 | }; | ||
109 | 73 | ||
110 | /** | 74 | /** |
111 | * enum dma_ctrl_flags - DMA flags to augment operation preparation, | 75 | * enum dma_ctrl_flags - DMA flags to augment operation preparation, |
@@ -132,32 +96,6 @@ enum dma_ctrl_flags { | |||
132 | typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; | 96 | typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; |
133 | 97 | ||
134 | /** | 98 | /** |
135 | * struct dma_slave - Information about a DMA slave | ||
136 | * @dev: device acting as DMA slave | ||
137 | * @dma_dev: required DMA master device. If non-NULL, the client can not be | ||
138 | * bound to other masters than this. | ||
139 | * @tx_reg: physical address of data register used for | ||
140 | * memory-to-peripheral transfers | ||
141 | * @rx_reg: physical address of data register used for | ||
142 | * peripheral-to-memory transfers | ||
143 | * @reg_width: peripheral register width | ||
144 | * | ||
145 | * If dma_dev is non-NULL, the client can not be bound to other DMA | ||
146 | * masters than the one corresponding to this device. The DMA master | ||
147 | * driver may use this to determine if there is controller-specific | ||
148 | * data wrapped around this struct. Drivers of platform code that sets | ||
149 | * the dma_dev field must therefore make sure to use an appropriate | ||
150 | * controller-specific dma slave structure wrapping this struct. | ||
151 | */ | ||
152 | struct dma_slave { | ||
153 | struct device *dev; | ||
154 | struct device *dma_dev; | ||
155 | dma_addr_t tx_reg; | ||
156 | dma_addr_t rx_reg; | ||
157 | enum dma_slave_width reg_width; | ||
158 | }; | ||
159 | |||
160 | /** | ||
161 | * struct dma_chan_percpu - the per-CPU part of struct dma_chan | 99 | * struct dma_chan_percpu - the per-CPU part of struct dma_chan |
162 | * @refcount: local_t used for open-coded "bigref" counting | 100 | * @refcount: local_t used for open-coded "bigref" counting |
163 | * @memcpy_count: transaction counter | 101 | * @memcpy_count: transaction counter |
@@ -165,7 +103,6 @@ struct dma_slave { | |||
165 | */ | 103 | */ |
166 | 104 | ||
167 | struct dma_chan_percpu { | 105 | struct dma_chan_percpu { |
168 | local_t refcount; | ||
169 | /* stats */ | 106 | /* stats */ |
170 | unsigned long memcpy_count; | 107 | unsigned long memcpy_count; |
171 | unsigned long bytes_transferred; | 108 | unsigned long bytes_transferred; |
@@ -176,13 +113,14 @@ struct dma_chan_percpu { | |||
176 | * @device: ptr to the dma device who supplies this channel, always !%NULL | 113 | * @device: ptr to the dma device who supplies this channel, always !%NULL |
177 | * @cookie: last cookie value returned to client | 114 | * @cookie: last cookie value returned to client |
178 | * @chan_id: channel ID for sysfs | 115 | * @chan_id: channel ID for sysfs |
179 | * @class_dev: class device for sysfs | 116 | * @dev: class device for sysfs |
180 | * @refcount: kref, used in "bigref" slow-mode | 117 | * @refcount: kref, used in "bigref" slow-mode |
181 | * @slow_ref: indicates that the DMA channel is free | 118 | * @slow_ref: indicates that the DMA channel is free |
182 | * @rcu: the DMA channel's RCU head | 119 | * @rcu: the DMA channel's RCU head |
183 | * @device_node: used to add this to the device chan list | 120 | * @device_node: used to add this to the device chan list |
184 | * @local: per-cpu pointer to a struct dma_chan_percpu | 121 | * @local: per-cpu pointer to a struct dma_chan_percpu |
185 | * @client-count: how many clients are using this channel | 122 | * @client-count: how many clients are using this channel |
123 | * @table_count: number of appearances in the mem-to-mem allocation table | ||
186 | */ | 124 | */ |
187 | struct dma_chan { | 125 | struct dma_chan { |
188 | struct dma_device *device; | 126 | struct dma_device *device; |
@@ -190,73 +128,47 @@ struct dma_chan { | |||
190 | 128 | ||
191 | /* sysfs */ | 129 | /* sysfs */ |
192 | int chan_id; | 130 | int chan_id; |
193 | struct device dev; | 131 | struct dma_chan_dev *dev; |
194 | |||
195 | struct kref refcount; | ||
196 | int slow_ref; | ||
197 | struct rcu_head rcu; | ||
198 | 132 | ||
199 | struct list_head device_node; | 133 | struct list_head device_node; |
200 | struct dma_chan_percpu *local; | 134 | struct dma_chan_percpu *local; |
201 | int client_count; | 135 | int client_count; |
136 | int table_count; | ||
202 | }; | 137 | }; |
203 | 138 | ||
204 | #define to_dma_chan(p) container_of(p, struct dma_chan, dev) | 139 | /** |
205 | 140 | * struct dma_chan_dev - relate sysfs device node to backing channel device | |
206 | void dma_chan_cleanup(struct kref *kref); | 141 | * @chan - driver channel device |
207 | 142 | * @device - sysfs device | |
208 | static inline void dma_chan_get(struct dma_chan *chan) | 143 | * @dev_id - parent dma_device dev_id |
209 | { | 144 | * @idr_ref - reference count to gate release of dma_device dev_id |
210 | if (unlikely(chan->slow_ref)) | 145 | */ |
211 | kref_get(&chan->refcount); | 146 | struct dma_chan_dev { |
212 | else { | 147 | struct dma_chan *chan; |
213 | local_inc(&(per_cpu_ptr(chan->local, get_cpu())->refcount)); | 148 | struct device device; |
214 | put_cpu(); | 149 | int dev_id; |
215 | } | 150 | atomic_t *idr_ref; |
216 | } | 151 | }; |
217 | 152 | ||
218 | static inline void dma_chan_put(struct dma_chan *chan) | 153 | static inline const char *dma_chan_name(struct dma_chan *chan) |
219 | { | 154 | { |
220 | if (unlikely(chan->slow_ref)) | 155 | return dev_name(&chan->dev->device); |
221 | kref_put(&chan->refcount, dma_chan_cleanup); | ||
222 | else { | ||
223 | local_dec(&(per_cpu_ptr(chan->local, get_cpu())->refcount)); | ||
224 | put_cpu(); | ||
225 | } | ||
226 | } | 156 | } |
227 | 157 | ||
228 | /* | 158 | void dma_chan_cleanup(struct kref *kref); |
229 | * typedef dma_event_callback - function pointer to a DMA event callback | ||
230 | * For each channel added to the system this routine is called for each client. | ||
231 | * If the client would like to use the channel it returns '1' to signal (ack) | ||
232 | * the dmaengine core to take out a reference on the channel and its | ||
233 | * corresponding device. A client must not 'ack' an available channel more | ||
234 | * than once. When a channel is removed all clients are notified. If a client | ||
235 | * is using the channel it must 'ack' the removal. A client must not 'ack' a | ||
236 | * removed channel more than once. | ||
237 | * @client - 'this' pointer for the client context | ||
238 | * @chan - channel to be acted upon | ||
239 | * @state - available or removed | ||
240 | */ | ||
241 | struct dma_client; | ||
242 | typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client, | ||
243 | struct dma_chan *chan, enum dma_state state); | ||
244 | 159 | ||
245 | /** | 160 | /** |
246 | * struct dma_client - info on the entity making use of DMA services | 161 | * typedef dma_filter_fn - callback filter for dma_request_channel |
247 | * @event_callback: func ptr to call when something happens | 162 | * @chan: channel to be reviewed |
248 | * @cap_mask: only return channels that satisfy the requested capabilities | 163 | * @filter_param: opaque parameter passed through dma_request_channel |
249 | * a value of zero corresponds to any capability | 164 | * |
250 | * @slave: data for preparing slave transfer. Must be non-NULL iff the | 165 | * When this optional parameter is specified in a call to dma_request_channel a |
251 | * DMA_SLAVE capability is requested. | 166 | * suitable channel is passed to this routine for further dispositioning before |
252 | * @global_node: list_head for global dma_client_list | 167 | * being returned. Where 'suitable' indicates a non-busy channel that |
168 | * satisfies the given capability mask. It returns 'true' to indicate that the | ||
169 | * channel is suitable. | ||
253 | */ | 170 | */ |
254 | struct dma_client { | 171 | typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); |
255 | dma_event_callback event_callback; | ||
256 | dma_cap_mask_t cap_mask; | ||
257 | struct dma_slave *slave; | ||
258 | struct list_head global_node; | ||
259 | }; | ||
260 | 172 | ||
261 | typedef void (*dma_async_tx_callback)(void *dma_async_param); | 173 | typedef void (*dma_async_tx_callback)(void *dma_async_param); |
262 | /** | 174 | /** |
@@ -323,14 +235,10 @@ struct dma_device { | |||
323 | dma_cap_mask_t cap_mask; | 235 | dma_cap_mask_t cap_mask; |
324 | int max_xor; | 236 | int max_xor; |
325 | 237 | ||
326 | struct kref refcount; | ||
327 | struct completion done; | ||
328 | |||
329 | int dev_id; | 238 | int dev_id; |
330 | struct device *dev; | 239 | struct device *dev; |
331 | 240 | ||
332 | int (*device_alloc_chan_resources)(struct dma_chan *chan, | 241 | int (*device_alloc_chan_resources)(struct dma_chan *chan); |
333 | struct dma_client *client); | ||
334 | void (*device_free_chan_resources)(struct dma_chan *chan); | 242 | void (*device_free_chan_resources)(struct dma_chan *chan); |
335 | 243 | ||
336 | struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( | 244 | struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( |
@@ -362,9 +270,8 @@ struct dma_device { | |||
362 | 270 | ||
363 | /* --- public DMA engine API --- */ | 271 | /* --- public DMA engine API --- */ |
364 | 272 | ||
365 | void dma_async_client_register(struct dma_client *client); | 273 | void dmaengine_get(void); |
366 | void dma_async_client_unregister(struct dma_client *client); | 274 | void dmaengine_put(void); |
367 | void dma_async_client_chan_request(struct dma_client *client); | ||
368 | dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, | 275 | dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, |
369 | void *dest, void *src, size_t len); | 276 | void *dest, void *src, size_t len); |
370 | dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, | 277 | dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, |
@@ -406,6 +313,12 @@ __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) | |||
406 | set_bit(tx_type, dstp->bits); | 313 | set_bit(tx_type, dstp->bits); |
407 | } | 314 | } |
408 | 315 | ||
316 | #define dma_cap_zero(mask) __dma_cap_zero(&(mask)) | ||
317 | static inline void __dma_cap_zero(dma_cap_mask_t *dstp) | ||
318 | { | ||
319 | bitmap_zero(dstp->bits, DMA_TX_TYPE_END); | ||
320 | } | ||
321 | |||
409 | #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask)) | 322 | #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask)) |
410 | static inline int | 323 | static inline int |
411 | __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp) | 324 | __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp) |
@@ -475,11 +388,25 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, | |||
475 | } | 388 | } |
476 | 389 | ||
477 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); | 390 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); |
391 | #ifdef CONFIG_DMA_ENGINE | ||
392 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); | ||
393 | #else | ||
394 | static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | ||
395 | { | ||
396 | return DMA_SUCCESS; | ||
397 | } | ||
398 | #endif | ||
478 | 399 | ||
479 | /* --- DMA device --- */ | 400 | /* --- DMA device --- */ |
480 | 401 | ||
481 | int dma_async_device_register(struct dma_device *device); | 402 | int dma_async_device_register(struct dma_device *device); |
482 | void dma_async_device_unregister(struct dma_device *device); | 403 | void dma_async_device_unregister(struct dma_device *device); |
404 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx); | ||
405 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); | ||
406 | void dma_issue_pending_all(void); | ||
407 | #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) | ||
408 | struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param); | ||
409 | void dma_release_channel(struct dma_chan *chan); | ||
483 | 410 | ||
484 | /* --- Helper iov-locking functions --- */ | 411 | /* --- Helper iov-locking functions --- */ |
485 | 412 | ||
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h index 04d217b442bf..d797dde247f7 100644 --- a/include/linux/dw_dmac.h +++ b/include/linux/dw_dmac.h | |||
@@ -22,14 +22,34 @@ struct dw_dma_platform_data { | |||
22 | }; | 22 | }; |
23 | 23 | ||
24 | /** | 24 | /** |
25 | * enum dw_dma_slave_width - DMA slave register access width. | ||
26 | * @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses | ||
27 | * @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses | ||
28 | * @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses | ||
29 | */ | ||
30 | enum dw_dma_slave_width { | ||
31 | DW_DMA_SLAVE_WIDTH_8BIT, | ||
32 | DW_DMA_SLAVE_WIDTH_16BIT, | ||
33 | DW_DMA_SLAVE_WIDTH_32BIT, | ||
34 | }; | ||
35 | |||
36 | /** | ||
25 | * struct dw_dma_slave - Controller-specific information about a slave | 37 | * struct dw_dma_slave - Controller-specific information about a slave |
26 | * @slave: Generic information about the slave | 38 | * |
27 | * @ctl_lo: Platform-specific initializer for the CTL_LO register | 39 | * @dma_dev: required DMA master device |
40 | * @tx_reg: physical address of data register used for | ||
41 | * memory-to-peripheral transfers | ||
42 | * @rx_reg: physical address of data register used for | ||
43 | * peripheral-to-memory transfers | ||
44 | * @reg_width: peripheral register width | ||
28 | * @cfg_hi: Platform-specific initializer for the CFG_HI register | 45 | * @cfg_hi: Platform-specific initializer for the CFG_HI register |
29 | * @cfg_lo: Platform-specific initializer for the CFG_LO register | 46 | * @cfg_lo: Platform-specific initializer for the CFG_LO register |
30 | */ | 47 | */ |
31 | struct dw_dma_slave { | 48 | struct dw_dma_slave { |
32 | struct dma_slave slave; | 49 | struct device *dma_dev; |
50 | dma_addr_t tx_reg; | ||
51 | dma_addr_t rx_reg; | ||
52 | enum dw_dma_slave_width reg_width; | ||
33 | u32 cfg_hi; | 53 | u32 cfg_hi; |
34 | u32 cfg_lo; | 54 | u32 cfg_lo; |
35 | }; | 55 | }; |
@@ -54,9 +74,4 @@ struct dw_dma_slave { | |||
54 | #define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ | 74 | #define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ |
55 | #define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ | 75 | #define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ |
56 | 76 | ||
57 | static inline struct dw_dma_slave *to_dw_dma_slave(struct dma_slave *slave) | ||
58 | { | ||
59 | return container_of(slave, struct dw_dma_slave, slave); | ||
60 | } | ||
61 | |||
62 | #endif /* DW_DMAC_H */ | 77 | #endif /* DW_DMAC_H */ |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 114091be8872..f24556813375 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -1125,9 +1125,6 @@ struct softnet_data | |||
1125 | struct sk_buff *completion_queue; | 1125 | struct sk_buff *completion_queue; |
1126 | 1126 | ||
1127 | struct napi_struct backlog; | 1127 | struct napi_struct backlog; |
1128 | #ifdef CONFIG_NET_DMA | ||
1129 | struct dma_chan *net_dma; | ||
1130 | #endif | ||
1131 | }; | 1128 | }; |
1132 | 1129 | ||
1133 | DECLARE_PER_CPU(struct softnet_data,softnet_data); | 1130 | DECLARE_PER_CPU(struct softnet_data,softnet_data); |
diff --git a/include/net/netdma.h b/include/net/netdma.h index f28c6e064e8f..8ba8ce284eeb 100644 --- a/include/net/netdma.h +++ b/include/net/netdma.h | |||
@@ -24,17 +24,6 @@ | |||
24 | #include <linux/dmaengine.h> | 24 | #include <linux/dmaengine.h> |
25 | #include <linux/skbuff.h> | 25 | #include <linux/skbuff.h> |
26 | 26 | ||
27 | static inline struct dma_chan *get_softnet_dma(void) | ||
28 | { | ||
29 | struct dma_chan *chan; | ||
30 | rcu_read_lock(); | ||
31 | chan = rcu_dereference(__get_cpu_var(softnet_data).net_dma); | ||
32 | if (chan) | ||
33 | dma_chan_get(chan); | ||
34 | rcu_read_unlock(); | ||
35 | return chan; | ||
36 | } | ||
37 | |||
38 | int dma_skb_copy_datagram_iovec(struct dma_chan* chan, | 27 | int dma_skb_copy_datagram_iovec(struct dma_chan* chan, |
39 | struct sk_buff *skb, int offset, struct iovec *to, | 28 | struct sk_buff *skb, int offset, struct iovec *to, |
40 | size_t len, struct dma_pinned_list *pinned_list); | 29 | size_t len, struct dma_pinned_list *pinned_list); |
diff --git a/kernel/cred.c b/kernel/cred.c index ff7bc071991c..043f78c133c4 100644 --- a/kernel/cred.c +++ b/kernel/cred.c | |||
@@ -506,6 +506,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) | |||
506 | else | 506 | else |
507 | old = get_cred(&init_cred); | 507 | old = get_cred(&init_cred); |
508 | 508 | ||
509 | *new = *old; | ||
509 | get_uid(new->user); | 510 | get_uid(new->user); |
510 | get_group_info(new->group_info); | 511 | get_group_info(new->group_info); |
511 | 512 | ||
@@ -529,6 +530,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) | |||
529 | 530 | ||
530 | error: | 531 | error: |
531 | put_cred(new); | 532 | put_cred(new); |
533 | put_cred(old); | ||
532 | return NULL; | 534 | return NULL; |
533 | } | 535 | } |
534 | EXPORT_SYMBOL(prepare_kernel_cred); | 536 | EXPORT_SYMBOL(prepare_kernel_cred); |
diff --git a/net/core/dev.c b/net/core/dev.c index bab8bcedd62e..5f736f1ceeae 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -170,25 +170,6 @@ static DEFINE_SPINLOCK(ptype_lock); | |||
170 | static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; | 170 | static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; |
171 | static struct list_head ptype_all __read_mostly; /* Taps */ | 171 | static struct list_head ptype_all __read_mostly; /* Taps */ |
172 | 172 | ||
173 | #ifdef CONFIG_NET_DMA | ||
174 | struct net_dma { | ||
175 | struct dma_client client; | ||
176 | spinlock_t lock; | ||
177 | cpumask_t channel_mask; | ||
178 | struct dma_chan **channels; | ||
179 | }; | ||
180 | |||
181 | static enum dma_state_client | ||
182 | netdev_dma_event(struct dma_client *client, struct dma_chan *chan, | ||
183 | enum dma_state state); | ||
184 | |||
185 | static struct net_dma net_dma = { | ||
186 | .client = { | ||
187 | .event_callback = netdev_dma_event, | ||
188 | }, | ||
189 | }; | ||
190 | #endif | ||
191 | |||
192 | /* | 173 | /* |
193 | * The @dev_base_head list is protected by @dev_base_lock and the rtnl | 174 | * The @dev_base_head list is protected by @dev_base_lock and the rtnl |
194 | * semaphore. | 175 | * semaphore. |
@@ -2754,14 +2735,7 @@ out: | |||
2754 | * There may not be any more sk_buffs coming right now, so push | 2735 | * There may not be any more sk_buffs coming right now, so push |
2755 | * any pending DMA copies to hardware | 2736 | * any pending DMA copies to hardware |
2756 | */ | 2737 | */ |
2757 | if (!cpus_empty(net_dma.channel_mask)) { | 2738 | dma_issue_pending_all(); |
2758 | int chan_idx; | ||
2759 | for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) { | ||
2760 | struct dma_chan *chan = net_dma.channels[chan_idx]; | ||
2761 | if (chan) | ||
2762 | dma_async_memcpy_issue_pending(chan); | ||
2763 | } | ||
2764 | } | ||
2765 | #endif | 2739 | #endif |
2766 | 2740 | ||
2767 | return; | 2741 | return; |
@@ -4952,122 +4926,6 @@ static int dev_cpu_callback(struct notifier_block *nfb, | |||
4952 | return NOTIFY_OK; | 4926 | return NOTIFY_OK; |
4953 | } | 4927 | } |
4954 | 4928 | ||
4955 | #ifdef CONFIG_NET_DMA | ||
4956 | /** | ||
4957 | * net_dma_rebalance - try to maintain one DMA channel per CPU | ||
4958 | * @net_dma: DMA client and associated data (lock, channels, channel_mask) | ||
4959 | * | ||
4960 | * This is called when the number of channels allocated to the net_dma client | ||
4961 | * changes. The net_dma client tries to have one DMA channel per CPU. | ||
4962 | */ | ||
4963 | |||
4964 | static void net_dma_rebalance(struct net_dma *net_dma) | ||
4965 | { | ||
4966 | unsigned int cpu, i, n, chan_idx; | ||
4967 | struct dma_chan *chan; | ||
4968 | |||
4969 | if (cpus_empty(net_dma->channel_mask)) { | ||
4970 | for_each_online_cpu(cpu) | ||
4971 | rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL); | ||
4972 | return; | ||
4973 | } | ||
4974 | |||
4975 | i = 0; | ||
4976 | cpu = first_cpu(cpu_online_map); | ||
4977 | |||
4978 | for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) { | ||
4979 | chan = net_dma->channels[chan_idx]; | ||
4980 | |||
4981 | n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask)) | ||
4982 | + (i < (num_online_cpus() % | ||
4983 | cpus_weight(net_dma->channel_mask)) ? 1 : 0)); | ||
4984 | |||
4985 | while(n) { | ||
4986 | per_cpu(softnet_data, cpu).net_dma = chan; | ||
4987 | cpu = next_cpu(cpu, cpu_online_map); | ||
4988 | n--; | ||
4989 | } | ||
4990 | i++; | ||
4991 | } | ||
4992 | } | ||
4993 | |||
4994 | /** | ||
4995 | * netdev_dma_event - event callback for the net_dma_client | ||
4996 | * @client: should always be net_dma_client | ||
4997 | * @chan: DMA channel for the event | ||
4998 | * @state: DMA state to be handled | ||
4999 | */ | ||
5000 | static enum dma_state_client | ||
5001 | netdev_dma_event(struct dma_client *client, struct dma_chan *chan, | ||
5002 | enum dma_state state) | ||
5003 | { | ||
5004 | int i, found = 0, pos = -1; | ||
5005 | struct net_dma *net_dma = | ||
5006 | container_of(client, struct net_dma, client); | ||
5007 | enum dma_state_client ack = DMA_DUP; /* default: take no action */ | ||
5008 | |||
5009 | spin_lock(&net_dma->lock); | ||
5010 | switch (state) { | ||
5011 | case DMA_RESOURCE_AVAILABLE: | ||
5012 | for (i = 0; i < nr_cpu_ids; i++) | ||
5013 | if (net_dma->channels[i] == chan) { | ||
5014 | found = 1; | ||
5015 | break; | ||
5016 | } else if (net_dma->channels[i] == NULL && pos < 0) | ||
5017 | pos = i; | ||
5018 | |||
5019 | if (!found && pos >= 0) { | ||
5020 | ack = DMA_ACK; | ||
5021 | net_dma->channels[pos] = chan; | ||
5022 | cpu_set(pos, net_dma->channel_mask); | ||
5023 | net_dma_rebalance(net_dma); | ||
5024 | } | ||
5025 | break; | ||
5026 | case DMA_RESOURCE_REMOVED: | ||
5027 | for (i = 0; i < nr_cpu_ids; i++) | ||
5028 | if (net_dma->channels[i] == chan) { | ||
5029 | found = 1; | ||
5030 | pos = i; | ||
5031 | break; | ||
5032 | } | ||
5033 | |||
5034 | if (found) { | ||
5035 | ack = DMA_ACK; | ||
5036 | cpu_clear(pos, net_dma->channel_mask); | ||
5037 | net_dma->channels[i] = NULL; | ||
5038 | net_dma_rebalance(net_dma); | ||
5039 | } | ||
5040 | break; | ||
5041 | default: | ||
5042 | break; | ||
5043 | } | ||
5044 | spin_unlock(&net_dma->lock); | ||
5045 | |||
5046 | return ack; | ||
5047 | } | ||
5048 | |||
5049 | /** | ||
5050 | * netdev_dma_register - register the networking subsystem as a DMA client | ||
5051 | */ | ||
5052 | static int __init netdev_dma_register(void) | ||
5053 | { | ||
5054 | net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma), | ||
5055 | GFP_KERNEL); | ||
5056 | if (unlikely(!net_dma.channels)) { | ||
5057 | printk(KERN_NOTICE | ||
5058 | "netdev_dma: no memory for net_dma.channels\n"); | ||
5059 | return -ENOMEM; | ||
5060 | } | ||
5061 | spin_lock_init(&net_dma.lock); | ||
5062 | dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask); | ||
5063 | dma_async_client_register(&net_dma.client); | ||
5064 | dma_async_client_chan_request(&net_dma.client); | ||
5065 | return 0; | ||
5066 | } | ||
5067 | |||
5068 | #else | ||
5069 | static int __init netdev_dma_register(void) { return -ENODEV; } | ||
5070 | #endif /* CONFIG_NET_DMA */ | ||
5071 | 4929 | ||
5072 | /** | 4930 | /** |
5073 | * netdev_increment_features - increment feature set by one | 4931 | * netdev_increment_features - increment feature set by one |
@@ -5287,14 +5145,15 @@ static int __init net_dev_init(void) | |||
5287 | if (register_pernet_device(&default_device_ops)) | 5145 | if (register_pernet_device(&default_device_ops)) |
5288 | goto out; | 5146 | goto out; |
5289 | 5147 | ||
5290 | netdev_dma_register(); | ||
5291 | |||
5292 | open_softirq(NET_TX_SOFTIRQ, net_tx_action); | 5148 | open_softirq(NET_TX_SOFTIRQ, net_tx_action); |
5293 | open_softirq(NET_RX_SOFTIRQ, net_rx_action); | 5149 | open_softirq(NET_RX_SOFTIRQ, net_rx_action); |
5294 | 5150 | ||
5295 | hotcpu_notifier(dev_cpu_callback, 0); | 5151 | hotcpu_notifier(dev_cpu_callback, 0); |
5296 | dst_init(); | 5152 | dst_init(); |
5297 | dev_mcast_init(); | 5153 | dev_mcast_init(); |
5154 | #ifdef CONFIG_NET_DMA | ||
5155 | dmaengine_get(); | ||
5156 | #endif | ||
5298 | rc = 0; | 5157 | rc = 0; |
5299 | out: | 5158 | out: |
5300 | return rc; | 5159 | return rc; |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index bd6ff907d9e4..ce572f9dff02 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -1313,7 +1313,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1313 | if ((available < target) && | 1313 | if ((available < target) && |
1314 | (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && | 1314 | (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && |
1315 | !sysctl_tcp_low_latency && | 1315 | !sysctl_tcp_low_latency && |
1316 | __get_cpu_var(softnet_data).net_dma) { | 1316 | dma_find_channel(DMA_MEMCPY)) { |
1317 | preempt_enable_no_resched(); | 1317 | preempt_enable_no_resched(); |
1318 | tp->ucopy.pinned_list = | 1318 | tp->ucopy.pinned_list = |
1319 | dma_pin_iovec_pages(msg->msg_iov, len); | 1319 | dma_pin_iovec_pages(msg->msg_iov, len); |
@@ -1523,7 +1523,7 @@ do_prequeue: | |||
1523 | if (!(flags & MSG_TRUNC)) { | 1523 | if (!(flags & MSG_TRUNC)) { |
1524 | #ifdef CONFIG_NET_DMA | 1524 | #ifdef CONFIG_NET_DMA |
1525 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) | 1525 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) |
1526 | tp->ucopy.dma_chan = get_softnet_dma(); | 1526 | tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); |
1527 | 1527 | ||
1528 | if (tp->ucopy.dma_chan) { | 1528 | if (tp->ucopy.dma_chan) { |
1529 | tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( | 1529 | tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( |
@@ -1628,7 +1628,6 @@ skip_copy: | |||
1628 | 1628 | ||
1629 | /* Safe to free early-copied skbs now */ | 1629 | /* Safe to free early-copied skbs now */ |
1630 | __skb_queue_purge(&sk->sk_async_wait_queue); | 1630 | __skb_queue_purge(&sk->sk_async_wait_queue); |
1631 | dma_chan_put(tp->ucopy.dma_chan); | ||
1632 | tp->ucopy.dma_chan = NULL; | 1631 | tp->ucopy.dma_chan = NULL; |
1633 | } | 1632 | } |
1634 | if (tp->ucopy.pinned_list) { | 1633 | if (tp->ucopy.pinned_list) { |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 99b7ecbe8893..a6961d75c7ea 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -5005,7 +5005,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, | |||
5005 | return 0; | 5005 | return 0; |
5006 | 5006 | ||
5007 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) | 5007 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) |
5008 | tp->ucopy.dma_chan = get_softnet_dma(); | 5008 | tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); |
5009 | 5009 | ||
5010 | if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { | 5010 | if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { |
5011 | 5011 | ||
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 9d839fa9331e..19d7b429a262 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1594,7 +1594,7 @@ process: | |||
1594 | #ifdef CONFIG_NET_DMA | 1594 | #ifdef CONFIG_NET_DMA |
1595 | struct tcp_sock *tp = tcp_sk(sk); | 1595 | struct tcp_sock *tp = tcp_sk(sk); |
1596 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) | 1596 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) |
1597 | tp->ucopy.dma_chan = get_softnet_dma(); | 1597 | tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); |
1598 | if (tp->ucopy.dma_chan) | 1598 | if (tp->ucopy.dma_chan) |
1599 | ret = tcp_v4_do_rcv(sk, skb); | 1599 | ret = tcp_v4_do_rcv(sk, skb); |
1600 | else | 1600 | else |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 1297306d729c..e5b85d45bee8 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -1675,7 +1675,7 @@ process: | |||
1675 | #ifdef CONFIG_NET_DMA | 1675 | #ifdef CONFIG_NET_DMA |
1676 | struct tcp_sock *tp = tcp_sk(sk); | 1676 | struct tcp_sock *tp = tcp_sk(sk); |
1677 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) | 1677 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) |
1678 | tp->ucopy.dma_chan = get_softnet_dma(); | 1678 | tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); |
1679 | if (tp->ucopy.dma_chan) | 1679 | if (tp->ucopy.dma_chan) |
1680 | ret = tcp_v6_do_rcv(sk, skb); | 1680 | ret = tcp_v6_do_rcv(sk, skb); |
1681 | else | 1681 | else |