diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2007-06-13 02:53:03 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-06-13 03:01:24 -0400 |
commit | f467b998eeae933029a83db8ad860da3879acd63 (patch) | |
tree | af63ddd67c6e1d17f0ff52a23ff99e244cf505e2 /include/asm-sparc64/dma-mapping.h | |
parent | 56f5c0bd50e948408ac0fd587b5c89fa7e2a1b6e (diff) |
[SPARC64]: Really fix parport.
We were passing a "struct pci_dev *" instead of a
"struct device *" to the parport registry routines.
No wonder things exploded.
The ebus_bus_type hacks can be backed out from
asm-sparc64/dma-mapping.h, those were wrong.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/asm-sparc64/dma-mapping.h')
-rw-r--r-- | include/asm-sparc64/dma-mapping.h | 42 |
1 files changed, 14 insertions, 28 deletions
diff --git a/include/asm-sparc64/dma-mapping.h b/include/asm-sparc64/dma-mapping.h index 4e21c2f3065c..c58ec1661df8 100644 --- a/include/asm-sparc64/dma-mapping.h +++ b/include/asm-sparc64/dma-mapping.h | |||
@@ -15,8 +15,7 @@ | |||
15 | static inline int | 15 | static inline int |
16 | dma_supported(struct device *dev, u64 mask) | 16 | dma_supported(struct device *dev, u64 mask) |
17 | { | 17 | { |
18 | BUG_ON(dev->bus != &pci_bus_type && | 18 | BUG_ON(dev->bus != &pci_bus_type); |
19 | dev->bus != &ebus_bus_type); | ||
20 | 19 | ||
21 | return pci_dma_supported(to_pci_dev(dev), mask); | 20 | return pci_dma_supported(to_pci_dev(dev), mask); |
22 | } | 21 | } |
@@ -24,8 +23,7 @@ dma_supported(struct device *dev, u64 mask) | |||
24 | static inline int | 23 | static inline int |
25 | dma_set_mask(struct device *dev, u64 dma_mask) | 24 | dma_set_mask(struct device *dev, u64 dma_mask) |
26 | { | 25 | { |
27 | BUG_ON(dev->bus != &pci_bus_type && | 26 | BUG_ON(dev->bus != &pci_bus_type); |
28 | dev->bus != &ebus_bus_type); | ||
29 | 27 | ||
30 | return pci_set_dma_mask(to_pci_dev(dev), dma_mask); | 28 | return pci_set_dma_mask(to_pci_dev(dev), dma_mask); |
31 | } | 29 | } |
@@ -34,8 +32,7 @@ static inline void * | |||
34 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | 32 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, |
35 | gfp_t flag) | 33 | gfp_t flag) |
36 | { | 34 | { |
37 | BUG_ON(dev->bus != &pci_bus_type && | 35 | BUG_ON(dev->bus != &pci_bus_type); |
38 | dev->bus != &ebus_bus_type); | ||
39 | 36 | ||
40 | return pci_iommu_ops->alloc_consistent(to_pci_dev(dev), size, dma_handle, flag); | 37 | return pci_iommu_ops->alloc_consistent(to_pci_dev(dev), size, dma_handle, flag); |
41 | } | 38 | } |
@@ -44,8 +41,7 @@ static inline void | |||
44 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | 41 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, |
45 | dma_addr_t dma_handle) | 42 | dma_addr_t dma_handle) |
46 | { | 43 | { |
47 | BUG_ON(dev->bus != &pci_bus_type && | 44 | BUG_ON(dev->bus != &pci_bus_type); |
48 | dev->bus != &ebus_bus_type); | ||
49 | 45 | ||
50 | pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle); | 46 | pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle); |
51 | } | 47 | } |
@@ -54,8 +50,7 @@ static inline dma_addr_t | |||
54 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, | 50 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, |
55 | enum dma_data_direction direction) | 51 | enum dma_data_direction direction) |
56 | { | 52 | { |
57 | BUG_ON(dev->bus != &pci_bus_type && | 53 | BUG_ON(dev->bus != &pci_bus_type); |
58 | dev->bus != &ebus_bus_type); | ||
59 | 54 | ||
60 | return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction); | 55 | return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction); |
61 | } | 56 | } |
@@ -64,8 +59,7 @@ static inline void | |||
64 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | 59 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, |
65 | enum dma_data_direction direction) | 60 | enum dma_data_direction direction) |
66 | { | 61 | { |
67 | BUG_ON(dev->bus != &pci_bus_type && | 62 | BUG_ON(dev->bus != &pci_bus_type); |
68 | dev->bus != &ebus_bus_type); | ||
69 | 63 | ||
70 | pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction); | 64 | pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction); |
71 | } | 65 | } |
@@ -75,8 +69,7 @@ dma_map_page(struct device *dev, struct page *page, | |||
75 | unsigned long offset, size_t size, | 69 | unsigned long offset, size_t size, |
76 | enum dma_data_direction direction) | 70 | enum dma_data_direction direction) |
77 | { | 71 | { |
78 | BUG_ON(dev->bus != &pci_bus_type && | 72 | BUG_ON(dev->bus != &pci_bus_type); |
79 | dev->bus != &ebus_bus_type); | ||
80 | 73 | ||
81 | return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction); | 74 | return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction); |
82 | } | 75 | } |
@@ -85,8 +78,7 @@ static inline void | |||
85 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | 78 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, |
86 | enum dma_data_direction direction) | 79 | enum dma_data_direction direction) |
87 | { | 80 | { |
88 | BUG_ON(dev->bus != &pci_bus_type && | 81 | BUG_ON(dev->bus != &pci_bus_type); |
89 | dev->bus != &ebus_bus_type); | ||
90 | 82 | ||
91 | pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction); | 83 | pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction); |
92 | } | 84 | } |
@@ -95,8 +87,7 @@ static inline int | |||
95 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 87 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
96 | enum dma_data_direction direction) | 88 | enum dma_data_direction direction) |
97 | { | 89 | { |
98 | BUG_ON(dev->bus != &pci_bus_type && | 90 | BUG_ON(dev->bus != &pci_bus_type); |
99 | dev->bus != &ebus_bus_type); | ||
100 | 91 | ||
101 | return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction); | 92 | return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction); |
102 | } | 93 | } |
@@ -105,8 +96,7 @@ static inline void | |||
105 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | 96 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, |
106 | enum dma_data_direction direction) | 97 | enum dma_data_direction direction) |
107 | { | 98 | { |
108 | BUG_ON(dev->bus != &pci_bus_type && | 99 | BUG_ON(dev->bus != &pci_bus_type); |
109 | dev->bus != &ebus_bus_type); | ||
110 | 100 | ||
111 | pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction); | 101 | pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction); |
112 | } | 102 | } |
@@ -115,8 +105,7 @@ static inline void | |||
115 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, | 105 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, |
116 | enum dma_data_direction direction) | 106 | enum dma_data_direction direction) |
117 | { | 107 | { |
118 | BUG_ON(dev->bus != &pci_bus_type && | 108 | BUG_ON(dev->bus != &pci_bus_type); |
119 | dev->bus != &ebus_bus_type); | ||
120 | 109 | ||
121 | pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle, | 110 | pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle, |
122 | size, (int)direction); | 111 | size, (int)direction); |
@@ -126,8 +115,7 @@ static inline void | |||
126 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, | 115 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, |
127 | enum dma_data_direction direction) | 116 | enum dma_data_direction direction) |
128 | { | 117 | { |
129 | BUG_ON(dev->bus != &pci_bus_type && | 118 | BUG_ON(dev->bus != &pci_bus_type); |
130 | dev->bus != &ebus_bus_type); | ||
131 | 119 | ||
132 | pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle, | 120 | pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle, |
133 | size, (int)direction); | 121 | size, (int)direction); |
@@ -137,8 +125,7 @@ static inline void | |||
137 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | 125 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, |
138 | enum dma_data_direction direction) | 126 | enum dma_data_direction direction) |
139 | { | 127 | { |
140 | BUG_ON(dev->bus != &pci_bus_type && | 128 | BUG_ON(dev->bus != &pci_bus_type); |
141 | dev->bus != &ebus_bus_type); | ||
142 | 129 | ||
143 | pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction); | 130 | pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction); |
144 | } | 131 | } |
@@ -147,8 +134,7 @@ static inline void | |||
147 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | 134 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, |
148 | enum dma_data_direction direction) | 135 | enum dma_data_direction direction) |
149 | { | 136 | { |
150 | BUG_ON(dev->bus != &pci_bus_type && | 137 | BUG_ON(dev->bus != &pci_bus_type); |
151 | dev->bus != &ebus_bus_type); | ||
152 | 138 | ||
153 | pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction); | 139 | pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction); |
154 | } | 140 | } |