diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-30 14:06:55 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-30 14:06:55 -0400 |
commit | 9c837fb692b005203765d8a569a2fe43fdff9df1 (patch) | |
tree | 914146eb36c92c929bf32af69052e7d9fa28beb1 /include | |
parent | 63332a9d16e7dede26d252af3f9c4304b51e7974 (diff) | |
parent | c1f193a7aed1b468617bb26075777c0c2f4f597a (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6:
[SPARC64]: Fix show_stack() when stack argument is NULL.
[SPARC]: Fix serial console node string creation.
[SPARC]: Mark SBUS framebuffer ioctls as IGNORE in compat_ioctl.c
[SPARC64]: asm-sparc64/floppy.h needs linux/pci.h
[SPARC64]: Fix conflicts in SBUS/PCI/EBUS/ISA DMA handling.
[VIDEO]: Fix OOPS in all SBUS framebuffer drivers.
[SPARC64]: Handle mostek clock type in mini_rtc driver.
[PARTITION]: Sun/Solaris VTOC table corrections
[SPARC]: Fix floppy on some sun4c systems.
[SPARC64]: Fix sun4u PCI config space accesses on sun4u.
[PARTITION] MSDOS: Fix Sun num_partitions handling.
[SPARC]: Update defconfig.
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-sparc/device.h | 4 | ||||
-rw-r--r-- | include/asm-sparc/floppy.h | 80 | ||||
-rw-r--r-- | include/asm-sparc64/dma-mapping.h | 337 | ||||
-rw-r--r-- | include/asm-sparc64/fbio.h | 28 | ||||
-rw-r--r-- | include/asm-sparc64/floppy.h | 6 | ||||
-rw-r--r-- | include/asm-sparc64/iommu.h | 11 | ||||
-rw-r--r-- | include/asm-sparc64/parport.h | 2 | ||||
-rw-r--r-- | include/asm-sparc64/pci.h | 152 | ||||
-rw-r--r-- | include/asm-sparc64/sbus.h | 86 | ||||
-rw-r--r-- | include/linux/genhd.h | 2 |
10 files changed, 289 insertions, 419 deletions
diff --git a/include/asm-sparc/device.h b/include/asm-sparc/device.h index 4a56d84d69..c0a7786d65 100644 --- a/include/asm-sparc/device.h +++ b/include/asm-sparc/device.h | |||
@@ -10,6 +10,10 @@ struct device_node; | |||
10 | struct of_device; | 10 | struct of_device; |
11 | 11 | ||
12 | struct dev_archdata { | 12 | struct dev_archdata { |
13 | void *iommu; | ||
14 | void *stc; | ||
15 | void *host_controller; | ||
16 | |||
13 | struct device_node *prom_node; | 17 | struct device_node *prom_node; |
14 | struct of_device *op; | 18 | struct of_device *op; |
15 | }; | 19 | }; |
diff --git a/include/asm-sparc/floppy.h b/include/asm-sparc/floppy.h index 9073c84218..28ce2b9c3d 100644 --- a/include/asm-sparc/floppy.h +++ b/include/asm-sparc/floppy.h | |||
@@ -101,6 +101,29 @@ static struct sun_floppy_ops sun_fdops; | |||
101 | #define CROSS_64KB(a,s) (0) | 101 | #define CROSS_64KB(a,s) (0) |
102 | 102 | ||
103 | /* Routines unique to each controller type on a Sun. */ | 103 | /* Routines unique to each controller type on a Sun. */ |
104 | static void sun_set_dor(unsigned char value, int fdc_82077) | ||
105 | { | ||
106 | if (sparc_cpu_model == sun4c) { | ||
107 | unsigned int bits = 0; | ||
108 | if (value & 0x10) | ||
109 | bits |= AUXIO_FLPY_DSEL; | ||
110 | if ((value & 0x80) == 0) | ||
111 | bits |= AUXIO_FLPY_EJCT; | ||
112 | set_auxio(bits, (~bits) & (AUXIO_FLPY_DSEL|AUXIO_FLPY_EJCT)); | ||
113 | } | ||
114 | if (fdc_82077) { | ||
115 | sun_fdc->dor_82077 = value; | ||
116 | } | ||
117 | } | ||
118 | |||
119 | static unsigned char sun_read_dir(void) | ||
120 | { | ||
121 | if (sparc_cpu_model == sun4c) | ||
122 | return (get_auxio() & AUXIO_FLPY_DCHG) ? 0x80 : 0; | ||
123 | else | ||
124 | return sun_fdc->dir_82077; | ||
125 | } | ||
126 | |||
104 | static unsigned char sun_82072_fd_inb(int port) | 127 | static unsigned char sun_82072_fd_inb(int port) |
105 | { | 128 | { |
106 | udelay(5); | 129 | udelay(5); |
@@ -113,7 +136,7 @@ static unsigned char sun_82072_fd_inb(int port) | |||
113 | case 5: /* FD_DATA */ | 136 | case 5: /* FD_DATA */ |
114 | return sun_fdc->data_82072; | 137 | return sun_fdc->data_82072; |
115 | case 7: /* FD_DIR */ | 138 | case 7: /* FD_DIR */ |
116 | return (get_auxio() & AUXIO_FLPY_DCHG)? 0x80: 0; | 139 | return sun_read_dir(); |
117 | }; | 140 | }; |
118 | panic("sun_82072_fd_inb: How did I get here?"); | 141 | panic("sun_82072_fd_inb: How did I get here?"); |
119 | } | 142 | } |
@@ -126,20 +149,7 @@ static void sun_82072_fd_outb(unsigned char value, int port) | |||
126 | printk("floppy: Asked to write to unknown port %d\n", port); | 149 | printk("floppy: Asked to write to unknown port %d\n", port); |
127 | panic("floppy: Port bolixed."); | 150 | panic("floppy: Port bolixed."); |
128 | case 2: /* FD_DOR */ | 151 | case 2: /* FD_DOR */ |
129 | /* Oh geese, 82072 on the Sun has no DOR register, | 152 | sun_set_dor(value, 0); |
130 | * the functionality is implemented via the AUXIO | ||
131 | * I/O register. So we must emulate the behavior. | ||
132 | * | ||
133 | * ASSUMPTIONS: There will only ever be one floppy | ||
134 | * drive attached to a Sun controller | ||
135 | * and it will be at drive zero. | ||
136 | */ | ||
137 | { | ||
138 | unsigned bits = 0; | ||
139 | if (value & 0x10) bits |= AUXIO_FLPY_DSEL; | ||
140 | if ((value & 0x80) == 0) bits |= AUXIO_FLPY_EJCT; | ||
141 | set_auxio(bits, (~bits) & (AUXIO_FLPY_DSEL|AUXIO_FLPY_EJCT)); | ||
142 | } | ||
143 | break; | 153 | break; |
144 | case 5: /* FD_DATA */ | 154 | case 5: /* FD_DATA */ |
145 | sun_fdc->data_82072 = value; | 155 | sun_fdc->data_82072 = value; |
@@ -161,15 +171,22 @@ static unsigned char sun_82077_fd_inb(int port) | |||
161 | default: | 171 | default: |
162 | printk("floppy: Asked to read unknown port %d\n", port); | 172 | printk("floppy: Asked to read unknown port %d\n", port); |
163 | panic("floppy: Port bolixed."); | 173 | panic("floppy: Port bolixed."); |
174 | case 0: /* FD_STATUS_0 */ | ||
175 | return sun_fdc->status1_82077; | ||
176 | case 1: /* FD_STATUS_1 */ | ||
177 | return sun_fdc->status2_82077; | ||
178 | case 2: /* FD_DOR */ | ||
179 | return sun_fdc->dor_82077; | ||
180 | case 3: /* FD_TDR */ | ||
181 | return sun_fdc->tapectl_82077; | ||
164 | case 4: /* FD_STATUS */ | 182 | case 4: /* FD_STATUS */ |
165 | return sun_fdc->status_82077 & ~STATUS_DMA; | 183 | return sun_fdc->status_82077 & ~STATUS_DMA; |
166 | case 5: /* FD_DATA */ | 184 | case 5: /* FD_DATA */ |
167 | return sun_fdc->data_82077; | 185 | return sun_fdc->data_82077; |
168 | case 7: /* FD_DIR */ | 186 | case 7: /* FD_DIR */ |
169 | /* XXX: Is DCL on 0x80 in sun4m? */ | 187 | return sun_read_dir(); |
170 | return sun_fdc->dir_82077; | ||
171 | }; | 188 | }; |
172 | panic("sun_82072_fd_inb: How did I get here?"); | 189 | panic("sun_82077_fd_inb: How did I get here?"); |
173 | } | 190 | } |
174 | 191 | ||
175 | static void sun_82077_fd_outb(unsigned char value, int port) | 192 | static void sun_82077_fd_outb(unsigned char value, int port) |
@@ -180,8 +197,7 @@ static void sun_82077_fd_outb(unsigned char value, int port) | |||
180 | printk("floppy: Asked to write to unknown port %d\n", port); | 197 | printk("floppy: Asked to write to unknown port %d\n", port); |
181 | panic("floppy: Port bolixed."); | 198 | panic("floppy: Port bolixed."); |
182 | case 2: /* FD_DOR */ | 199 | case 2: /* FD_DOR */ |
183 | /* Happily, the 82077 has a real DOR register. */ | 200 | sun_set_dor(value, 1); |
184 | sun_fdc->dor_82077 = value; | ||
185 | break; | 201 | break; |
186 | case 5: /* FD_DATA */ | 202 | case 5: /* FD_DATA */ |
187 | sun_fdc->data_82077 = value; | 203 | sun_fdc->data_82077 = value; |
@@ -192,6 +208,9 @@ static void sun_82077_fd_outb(unsigned char value, int port) | |||
192 | case 4: /* FD_STATUS */ | 208 | case 4: /* FD_STATUS */ |
193 | sun_fdc->status_82077 = value; | 209 | sun_fdc->status_82077 = value; |
194 | break; | 210 | break; |
211 | case 3: /* FD_TDR */ | ||
212 | sun_fdc->tapectl_82077 = value; | ||
213 | break; | ||
195 | }; | 214 | }; |
196 | return; | 215 | return; |
197 | } | 216 | } |
@@ -332,16 +351,17 @@ static int sun_floppy_init(void) | |||
332 | goto no_sun_fdc; | 351 | goto no_sun_fdc; |
333 | } | 352 | } |
334 | 353 | ||
335 | if(sparc_cpu_model == sun4c) { | 354 | sun_fdops.fd_inb = sun_82077_fd_inb; |
336 | sun_fdops.fd_inb = sun_82072_fd_inb; | 355 | sun_fdops.fd_outb = sun_82077_fd_outb; |
337 | sun_fdops.fd_outb = sun_82072_fd_outb; | 356 | fdc_status = &sun_fdc->status_82077; |
338 | fdc_status = &sun_fdc->status_82072; | 357 | |
339 | /* printk("AUXIO @0x%lx\n", auxio_register); */ /* P3 */ | 358 | if (sun_fdc->dor_82077 == 0x80) { |
340 | } else { | 359 | sun_fdc->dor_82077 = 0x02; |
341 | sun_fdops.fd_inb = sun_82077_fd_inb; | 360 | if (sun_fdc->dor_82077 == 0x80) { |
342 | sun_fdops.fd_outb = sun_82077_fd_outb; | 361 | sun_fdops.fd_inb = sun_82072_fd_inb; |
343 | fdc_status = &sun_fdc->status_82077; | 362 | sun_fdops.fd_outb = sun_82072_fd_outb; |
344 | /* printk("DOR @0x%p\n", &sun_fdc->dor_82077); */ /* P3 */ | 363 | fdc_status = &sun_fdc->status_82072; |
364 | } | ||
345 | } | 365 | } |
346 | 366 | ||
347 | /* Success... */ | 367 | /* Success... */ |
diff --git a/include/asm-sparc64/dma-mapping.h b/include/asm-sparc64/dma-mapping.h index c58ec1661d..0a1006692b 100644 --- a/include/asm-sparc64/dma-mapping.h +++ b/include/asm-sparc64/dma-mapping.h | |||
@@ -1,307 +1,134 @@ | |||
1 | #ifndef _ASM_SPARC64_DMA_MAPPING_H | 1 | #ifndef _ASM_SPARC64_DMA_MAPPING_H |
2 | #define _ASM_SPARC64_DMA_MAPPING_H | 2 | #define _ASM_SPARC64_DMA_MAPPING_H |
3 | 3 | ||
4 | 4 | #include <linux/scatterlist.h> | |
5 | #ifdef CONFIG_PCI | ||
6 | |||
7 | /* we implement the API below in terms of the existing PCI one, | ||
8 | * so include it */ | ||
9 | #include <linux/pci.h> | ||
10 | /* need struct page definitions */ | ||
11 | #include <linux/mm.h> | 5 | #include <linux/mm.h> |
12 | 6 | ||
13 | #include <asm/of_device.h> | 7 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) |
14 | 8 | ||
15 | static inline int | 9 | struct dma_ops { |
16 | dma_supported(struct device *dev, u64 mask) | 10 | void *(*alloc_coherent)(struct device *dev, size_t size, |
17 | { | 11 | dma_addr_t *dma_handle, gfp_t flag); |
18 | BUG_ON(dev->bus != &pci_bus_type); | 12 | void (*free_coherent)(struct device *dev, size_t size, |
19 | 13 | void *cpu_addr, dma_addr_t dma_handle); | |
20 | return pci_dma_supported(to_pci_dev(dev), mask); | 14 | dma_addr_t (*map_single)(struct device *dev, void *cpu_addr, |
21 | } | 15 | size_t size, |
22 | 16 | enum dma_data_direction direction); | |
23 | static inline int | 17 | void (*unmap_single)(struct device *dev, dma_addr_t dma_addr, |
24 | dma_set_mask(struct device *dev, u64 dma_mask) | 18 | size_t size, |
25 | { | 19 | enum dma_data_direction direction); |
26 | BUG_ON(dev->bus != &pci_bus_type); | 20 | int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, |
27 | 21 | enum dma_data_direction direction); | |
28 | return pci_set_dma_mask(to_pci_dev(dev), dma_mask); | 22 | void (*unmap_sg)(struct device *dev, struct scatterlist *sg, |
29 | } | 23 | int nhwentries, |
30 | 24 | enum dma_data_direction direction); | |
31 | static inline void * | 25 | void (*sync_single_for_cpu)(struct device *dev, |
32 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | 26 | dma_addr_t dma_handle, size_t size, |
33 | gfp_t flag) | 27 | enum dma_data_direction direction); |
34 | { | 28 | void (*sync_single_for_device)(struct device *dev, |
35 | BUG_ON(dev->bus != &pci_bus_type); | 29 | dma_addr_t dma_handle, size_t size, |
36 | 30 | enum dma_data_direction direction); | |
37 | return pci_iommu_ops->alloc_consistent(to_pci_dev(dev), size, dma_handle, flag); | 31 | void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, |
38 | } | 32 | int nelems, |
39 | 33 | enum dma_data_direction direction); | |
40 | static inline void | 34 | void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg, |
41 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | 35 | int nelems, |
42 | dma_addr_t dma_handle) | 36 | enum dma_data_direction direction); |
43 | { | 37 | }; |
44 | BUG_ON(dev->bus != &pci_bus_type); | 38 | extern const struct dma_ops *dma_ops; |
45 | 39 | ||
46 | pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle); | 40 | extern int dma_supported(struct device *dev, u64 mask); |
47 | } | 41 | extern int dma_set_mask(struct device *dev, u64 dma_mask); |
48 | |||
49 | static inline dma_addr_t | ||
50 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, | ||
51 | enum dma_data_direction direction) | ||
52 | { | ||
53 | BUG_ON(dev->bus != &pci_bus_type); | ||
54 | |||
55 | return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction); | ||
56 | } | ||
57 | |||
58 | static inline void | ||
59 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
60 | enum dma_data_direction direction) | ||
61 | { | ||
62 | BUG_ON(dev->bus != &pci_bus_type); | ||
63 | |||
64 | pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction); | ||
65 | } | ||
66 | |||
67 | static inline dma_addr_t | ||
68 | dma_map_page(struct device *dev, struct page *page, | ||
69 | unsigned long offset, size_t size, | ||
70 | enum dma_data_direction direction) | ||
71 | { | ||
72 | BUG_ON(dev->bus != &pci_bus_type); | ||
73 | |||
74 | return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction); | ||
75 | } | ||
76 | |||
77 | static inline void | ||
78 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||
79 | enum dma_data_direction direction) | ||
80 | { | ||
81 | BUG_ON(dev->bus != &pci_bus_type); | ||
82 | |||
83 | pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction); | ||
84 | } | ||
85 | |||
86 | static inline int | ||
87 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
88 | enum dma_data_direction direction) | ||
89 | { | ||
90 | BUG_ON(dev->bus != &pci_bus_type); | ||
91 | |||
92 | return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction); | ||
93 | } | ||
94 | |||
95 | static inline void | ||
96 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | ||
97 | enum dma_data_direction direction) | ||
98 | { | ||
99 | BUG_ON(dev->bus != &pci_bus_type); | ||
100 | |||
101 | pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction); | ||
102 | } | ||
103 | |||
104 | static inline void | ||
105 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
106 | enum dma_data_direction direction) | ||
107 | { | ||
108 | BUG_ON(dev->bus != &pci_bus_type); | ||
109 | |||
110 | pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle, | ||
111 | size, (int)direction); | ||
112 | } | ||
113 | |||
114 | static inline void | ||
115 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
116 | enum dma_data_direction direction) | ||
117 | { | ||
118 | BUG_ON(dev->bus != &pci_bus_type); | ||
119 | |||
120 | pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle, | ||
121 | size, (int)direction); | ||
122 | } | ||
123 | |||
124 | static inline void | ||
125 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | ||
126 | enum dma_data_direction direction) | ||
127 | { | ||
128 | BUG_ON(dev->bus != &pci_bus_type); | ||
129 | |||
130 | pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction); | ||
131 | } | ||
132 | |||
133 | static inline void | ||
134 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | ||
135 | enum dma_data_direction direction) | ||
136 | { | ||
137 | BUG_ON(dev->bus != &pci_bus_type); | ||
138 | |||
139 | pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction); | ||
140 | } | ||
141 | |||
142 | static inline int | ||
143 | dma_mapping_error(dma_addr_t dma_addr) | ||
144 | { | ||
145 | return pci_dma_mapping_error(dma_addr); | ||
146 | } | ||
147 | |||
148 | #else | ||
149 | |||
150 | struct device; | ||
151 | struct page; | ||
152 | struct scatterlist; | ||
153 | |||
154 | static inline int | ||
155 | dma_supported(struct device *dev, u64 mask) | ||
156 | { | ||
157 | BUG(); | ||
158 | return 0; | ||
159 | } | ||
160 | |||
161 | static inline int | ||
162 | dma_set_mask(struct device *dev, u64 dma_mask) | ||
163 | { | ||
164 | BUG(); | ||
165 | return 0; | ||
166 | } | ||
167 | 42 | ||
168 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | 43 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
169 | dma_addr_t *dma_handle, gfp_t flag) | 44 | dma_addr_t *dma_handle, gfp_t flag) |
170 | { | 45 | { |
171 | BUG(); | 46 | return dma_ops->alloc_coherent(dev, size, dma_handle, flag); |
172 | return NULL; | ||
173 | } | 47 | } |
174 | 48 | ||
175 | static inline void dma_free_coherent(struct device *dev, size_t size, | 49 | static inline void dma_free_coherent(struct device *dev, size_t size, |
176 | void *vaddr, dma_addr_t dma_handle) | 50 | void *cpu_addr, dma_addr_t dma_handle) |
177 | { | 51 | { |
178 | BUG(); | 52 | dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); |
179 | } | 53 | } |
180 | 54 | ||
181 | static inline dma_addr_t | 55 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, |
182 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, | 56 | size_t size, |
183 | enum dma_data_direction direction) | 57 | enum dma_data_direction direction) |
184 | { | 58 | { |
185 | BUG(); | 59 | return dma_ops->map_single(dev, cpu_addr, size, direction); |
186 | return 0; | ||
187 | } | 60 | } |
188 | 61 | ||
189 | static inline void | 62 | static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, |
190 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | 63 | size_t size, |
191 | enum dma_data_direction direction) | 64 | enum dma_data_direction direction) |
192 | { | 65 | { |
193 | BUG(); | 66 | dma_ops->unmap_single(dev, dma_addr, size, direction); |
194 | } | 67 | } |
195 | 68 | ||
196 | static inline dma_addr_t | 69 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, |
197 | dma_map_page(struct device *dev, struct page *page, | 70 | unsigned long offset, size_t size, |
198 | unsigned long offset, size_t size, | 71 | enum dma_data_direction direction) |
199 | enum dma_data_direction direction) | ||
200 | { | 72 | { |
201 | BUG(); | 73 | return dma_ops->map_single(dev, page_address(page) + offset, |
202 | return 0; | 74 | size, direction); |
203 | } | 75 | } |
204 | 76 | ||
205 | static inline void | 77 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, |
206 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | 78 | size_t size, |
207 | enum dma_data_direction direction) | 79 | enum dma_data_direction direction) |
208 | { | 80 | { |
209 | BUG(); | 81 | dma_ops->unmap_single(dev, dma_address, size, direction); |
210 | } | 82 | } |
211 | 83 | ||
212 | static inline int | 84 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, |
213 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 85 | int nents, enum dma_data_direction direction) |
214 | enum dma_data_direction direction) | ||
215 | { | 86 | { |
216 | BUG(); | 87 | return dma_ops->map_sg(dev, sg, nents, direction); |
217 | return 0; | ||
218 | } | 88 | } |
219 | 89 | ||
220 | static inline void | 90 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, |
221 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | 91 | int nents, enum dma_data_direction direction) |
222 | enum dma_data_direction direction) | ||
223 | { | 92 | { |
224 | BUG(); | 93 | dma_ops->unmap_sg(dev, sg, nents, direction); |
225 | } | 94 | } |
226 | 95 | ||
227 | static inline void | 96 | static inline void dma_sync_single_for_cpu(struct device *dev, |
228 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, | 97 | dma_addr_t dma_handle, size_t size, |
229 | enum dma_data_direction direction) | 98 | enum dma_data_direction direction) |
230 | { | 99 | { |
231 | BUG(); | 100 | dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction); |
232 | } | 101 | } |
233 | 102 | ||
234 | static inline void | 103 | static inline void dma_sync_single_for_device(struct device *dev, |
235 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, | 104 | dma_addr_t dma_handle, |
236 | enum dma_data_direction direction) | 105 | size_t size, |
106 | enum dma_data_direction direction) | ||
237 | { | 107 | { |
238 | BUG(); | 108 | dma_ops->sync_single_for_device(dev, dma_handle, size, direction); |
239 | } | 109 | } |
240 | 110 | ||
241 | static inline void | 111 | static inline void dma_sync_sg_for_cpu(struct device *dev, |
242 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | 112 | struct scatterlist *sg, int nelems, |
243 | enum dma_data_direction direction) | 113 | enum dma_data_direction direction) |
244 | { | 114 | { |
245 | BUG(); | 115 | dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction); |
246 | } | 116 | } |
247 | 117 | ||
248 | static inline void | 118 | static inline void dma_sync_sg_for_device(struct device *dev, |
249 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | 119 | struct scatterlist *sg, int nelems, |
250 | enum dma_data_direction direction) | 120 | enum dma_data_direction direction) |
251 | { | 121 | { |
252 | BUG(); | 122 | dma_ops->sync_sg_for_device(dev, sg, nelems, direction); |
253 | } | 123 | } |
254 | 124 | ||
255 | static inline int | 125 | static inline int dma_mapping_error(dma_addr_t dma_addr) |
256 | dma_mapping_error(dma_addr_t dma_addr) | ||
257 | { | 126 | { |
258 | BUG(); | 127 | return (dma_addr == DMA_ERROR_CODE); |
259 | return 0; | ||
260 | } | 128 | } |
261 | 129 | ||
262 | #endif /* PCI */ | ||
263 | |||
264 | |||
265 | /* Now for the API extensions over the pci_ one */ | ||
266 | |||
267 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 130 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
268 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | 131 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
269 | #define dma_is_consistent(d, h) (1) | 132 | #define dma_is_consistent(d, h) (1) |
270 | 133 | ||
271 | static inline int | ||
272 | dma_get_cache_alignment(void) | ||
273 | { | ||
274 | /* no easy way to get cache size on all processors, so return | ||
275 | * the maximum possible, to be safe */ | ||
276 | return (1 << INTERNODE_CACHE_SHIFT); | ||
277 | } | ||
278 | |||
279 | static inline void | ||
280 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
281 | unsigned long offset, size_t size, | ||
282 | enum dma_data_direction direction) | ||
283 | { | ||
284 | /* just sync everything, that's all the pci API can do */ | ||
285 | dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction); | ||
286 | } | ||
287 | |||
288 | static inline void | ||
289 | dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | ||
290 | unsigned long offset, size_t size, | ||
291 | enum dma_data_direction direction) | ||
292 | { | ||
293 | /* just sync everything, that's all the pci API can do */ | ||
294 | dma_sync_single_for_device(dev, dma_handle, offset+size, direction); | ||
295 | } | ||
296 | |||
297 | static inline void | ||
298 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, | ||
299 | enum dma_data_direction direction) | ||
300 | { | ||
301 | /* could define this in terms of the dma_cache ... operations, | ||
302 | * but if you get this on a platform, you should convert the platform | ||
303 | * to using the generic device DMA API */ | ||
304 | BUG(); | ||
305 | } | ||
306 | |||
307 | #endif /* _ASM_SPARC64_DMA_MAPPING_H */ | 134 | #endif /* _ASM_SPARC64_DMA_MAPPING_H */ |
diff --git a/include/asm-sparc64/fbio.h b/include/asm-sparc64/fbio.h index 500026d9f6..b9215a0907 100644 --- a/include/asm-sparc64/fbio.h +++ b/include/asm-sparc64/fbio.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define __LINUX_FBIO_H | 2 | #define __LINUX_FBIO_H |
3 | 3 | ||
4 | #include <linux/compiler.h> | 4 | #include <linux/compiler.h> |
5 | #include <linux/types.h> | ||
5 | 6 | ||
6 | /* Constants used for fbio SunOS compatibility */ | 7 | /* Constants used for fbio SunOS compatibility */ |
7 | /* (C) 1996 Miguel de Icaza */ | 8 | /* (C) 1996 Miguel de Icaza */ |
@@ -299,4 +300,31 @@ struct fb_clut32 { | |||
299 | #define LEO_LD_GBL_MAP 0x01009000 | 300 | #define LEO_LD_GBL_MAP 0x01009000 |
300 | #define LEO_UNK2_MAP 0x0100a000 | 301 | #define LEO_UNK2_MAP 0x0100a000 |
301 | 302 | ||
303 | #ifdef __KERNEL__ | ||
304 | struct fbcmap32 { | ||
305 | int index; /* first element (0 origin) */ | ||
306 | int count; | ||
307 | u32 red; | ||
308 | u32 green; | ||
309 | u32 blue; | ||
310 | }; | ||
311 | |||
312 | #define FBIOPUTCMAP32 _IOW('F', 3, struct fbcmap32) | ||
313 | #define FBIOGETCMAP32 _IOW('F', 4, struct fbcmap32) | ||
314 | |||
315 | struct fbcursor32 { | ||
316 | short set; /* what to set, choose from the list above */ | ||
317 | short enable; /* cursor on/off */ | ||
318 | struct fbcurpos pos; /* cursor position */ | ||
319 | struct fbcurpos hot; /* cursor hot spot */ | ||
320 | struct fbcmap32 cmap; /* color map info */ | ||
321 | struct fbcurpos size; /* cursor bit map size */ | ||
322 | u32 image; /* cursor image bits */ | ||
323 | u32 mask; /* cursor mask bits */ | ||
324 | }; | ||
325 | |||
326 | #define FBIOSCURSOR32 _IOW('F', 24, struct fbcursor32) | ||
327 | #define FBIOGCURSOR32 _IOW('F', 25, struct fbcursor32) | ||
328 | #endif | ||
329 | |||
302 | #endif /* __LINUX_FBIO_H */ | 330 | #endif /* __LINUX_FBIO_H */ |
diff --git a/include/asm-sparc64/floppy.h b/include/asm-sparc64/floppy.h index 4aa0925e1b..1783239c7b 100644 --- a/include/asm-sparc64/floppy.h +++ b/include/asm-sparc64/floppy.h | |||
@@ -1,7 +1,6 @@ | |||
1 | /* $Id: floppy.h,v 1.32 2001/10/26 17:59:36 davem Exp $ | 1 | /* floppy.h: Sparc specific parts of the Floppy driver. |
2 | * asm-sparc64/floppy.h: Sparc specific parts of the Floppy driver. | ||
3 | * | 2 | * |
4 | * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) | 3 | * Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net) |
5 | * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | 4 | * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) |
6 | * | 5 | * |
7 | * Ultra/PCI support added: Sep 1997 Eddie C. Dost (ecd@skynet.be) | 6 | * Ultra/PCI support added: Sep 1997 Eddie C. Dost (ecd@skynet.be) |
@@ -11,6 +10,7 @@ | |||
11 | #define __ASM_SPARC64_FLOPPY_H | 10 | #define __ASM_SPARC64_FLOPPY_H |
12 | 11 | ||
13 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/pci.h> | ||
14 | 14 | ||
15 | #include <asm/page.h> | 15 | #include <asm/page.h> |
16 | #include <asm/pgtable.h> | 16 | #include <asm/pgtable.h> |
diff --git a/include/asm-sparc64/iommu.h b/include/asm-sparc64/iommu.h index 0b1813f410..9eac6676ca 100644 --- a/include/asm-sparc64/iommu.h +++ b/include/asm-sparc64/iommu.h | |||
@@ -1,7 +1,6 @@ | |||
1 | /* $Id: iommu.h,v 1.10 2001/03/08 09:55:56 davem Exp $ | 1 | /* iommu.h: Definitions for the sun5 IOMMU. |
2 | * iommu.h: Definitions for the sun5 IOMMU. | ||
3 | * | 2 | * |
4 | * Copyright (C) 1996, 1999 David S. Miller (davem@caip.rutgers.edu) | 3 | * Copyright (C) 1996, 1999, 2007 David S. Miller (davem@davemloft.net) |
5 | */ | 4 | */ |
6 | #ifndef _SPARC64_IOMMU_H | 5 | #ifndef _SPARC64_IOMMU_H |
7 | #define _SPARC64_IOMMU_H | 6 | #define _SPARC64_IOMMU_H |
@@ -33,6 +32,7 @@ struct iommu { | |||
33 | unsigned long iommu_tsbbase; | 32 | unsigned long iommu_tsbbase; |
34 | unsigned long iommu_flush; | 33 | unsigned long iommu_flush; |
35 | unsigned long iommu_flushinv; | 34 | unsigned long iommu_flushinv; |
35 | unsigned long iommu_tags; | ||
36 | unsigned long iommu_ctxflush; | 36 | unsigned long iommu_ctxflush; |
37 | unsigned long write_complete_reg; | 37 | unsigned long write_complete_reg; |
38 | unsigned long dummy_page; | 38 | unsigned long dummy_page; |
@@ -54,4 +54,7 @@ struct strbuf { | |||
54 | volatile unsigned long __flushflag_buf[(64+(64-1)) / sizeof(long)]; | 54 | volatile unsigned long __flushflag_buf[(64+(64-1)) / sizeof(long)]; |
55 | }; | 55 | }; |
56 | 56 | ||
57 | #endif /* !(_SPARC_IOMMU_H) */ | 57 | extern int iommu_table_init(struct iommu *iommu, int tsbsize, |
58 | u32 dma_offset, u32 dma_addr_mask); | ||
59 | |||
60 | #endif /* !(_SPARC64_IOMMU_H) */ | ||
diff --git a/include/asm-sparc64/parport.h b/include/asm-sparc64/parport.h index 600afe5ae2..8116e8f606 100644 --- a/include/asm-sparc64/parport.h +++ b/include/asm-sparc64/parport.h | |||
@@ -117,7 +117,7 @@ static int __devinit ecpp_probe(struct of_device *op, const struct of_device_id | |||
117 | if (!strcmp(parent->name, "dma")) { | 117 | if (!strcmp(parent->name, "dma")) { |
118 | p = parport_pc_probe_port(base, base + 0x400, | 118 | p = parport_pc_probe_port(base, base + 0x400, |
119 | op->irqs[0], PARPORT_DMA_NOFIFO, | 119 | op->irqs[0], PARPORT_DMA_NOFIFO, |
120 | op->dev.parent); | 120 | op->dev.parent->parent); |
121 | if (!p) | 121 | if (!p) |
122 | return -ENOMEM; | 122 | return -ENOMEM; |
123 | dev_set_drvdata(&op->dev, p); | 123 | dev_set_drvdata(&op->dev, p); |
diff --git a/include/asm-sparc64/pci.h b/include/asm-sparc64/pci.h index e11ac100f0..1393e57d50 100644 --- a/include/asm-sparc64/pci.h +++ b/include/asm-sparc64/pci.h | |||
@@ -3,8 +3,7 @@ | |||
3 | 3 | ||
4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | 5 | ||
6 | #include <linux/fs.h> | 6 | #include <linux/dma-mapping.h> |
7 | #include <linux/mm.h> | ||
8 | 7 | ||
9 | /* Can be used to override the logic in pci_scan_bus for skipping | 8 | /* Can be used to override the logic in pci_scan_bus for skipping |
10 | * already-configured bus numbers - to be used for buggy BIOSes | 9 | * already-configured bus numbers - to be used for buggy BIOSes |
@@ -30,80 +29,42 @@ static inline void pcibios_penalize_isa_irq(int irq, int active) | |||
30 | /* We don't do dynamic PCI IRQ allocation */ | 29 | /* We don't do dynamic PCI IRQ allocation */ |
31 | } | 30 | } |
32 | 31 | ||
33 | /* Dynamic DMA mapping stuff. | ||
34 | */ | ||
35 | |||
36 | /* The PCI address space does not equal the physical memory | 32 | /* The PCI address space does not equal the physical memory |
37 | * address space. The networking and block device layers use | 33 | * address space. The networking and block device layers use |
38 | * this boolean for bounce buffer decisions. | 34 | * this boolean for bounce buffer decisions. |
39 | */ | 35 | */ |
40 | #define PCI_DMA_BUS_IS_PHYS (0) | 36 | #define PCI_DMA_BUS_IS_PHYS (0) |
41 | 37 | ||
42 | #include <asm/scatterlist.h> | 38 | static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, |
43 | 39 | dma_addr_t *dma_handle) | |
44 | struct pci_dev; | ||
45 | |||
46 | struct pci_iommu_ops { | ||
47 | void *(*alloc_consistent)(struct pci_dev *, size_t, dma_addr_t *, gfp_t); | ||
48 | void (*free_consistent)(struct pci_dev *, size_t, void *, dma_addr_t); | ||
49 | dma_addr_t (*map_single)(struct pci_dev *, void *, size_t, int); | ||
50 | void (*unmap_single)(struct pci_dev *, dma_addr_t, size_t, int); | ||
51 | int (*map_sg)(struct pci_dev *, struct scatterlist *, int, int); | ||
52 | void (*unmap_sg)(struct pci_dev *, struct scatterlist *, int, int); | ||
53 | void (*dma_sync_single_for_cpu)(struct pci_dev *, dma_addr_t, size_t, int); | ||
54 | void (*dma_sync_sg_for_cpu)(struct pci_dev *, struct scatterlist *, int, int); | ||
55 | }; | ||
56 | |||
57 | extern const struct pci_iommu_ops *pci_iommu_ops; | ||
58 | |||
59 | /* Allocate and map kernel buffer using consistent mode DMA for a device. | ||
60 | * hwdev should be valid struct pci_dev pointer for PCI devices. | ||
61 | */ | ||
62 | static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle) | ||
63 | { | 40 | { |
64 | return pci_iommu_ops->alloc_consistent(hwdev, size, dma_handle, GFP_ATOMIC); | 41 | return dma_alloc_coherent(&pdev->dev, size, dma_handle, GFP_ATOMIC); |
65 | } | 42 | } |
66 | 43 | ||
67 | /* Free and unmap a consistent DMA buffer. | 44 | static inline void pci_free_consistent(struct pci_dev *pdev, size_t size, |
68 | * cpu_addr is what was returned from pci_alloc_consistent, | 45 | void *vaddr, dma_addr_t dma_handle) |
69 | * size must be the same as what as passed into pci_alloc_consistent, | ||
70 | * and likewise dma_addr must be the same as what *dma_addrp was set to. | ||
71 | * | ||
72 | * References to the memory and mappings associated with cpu_addr/dma_addr | ||
73 | * past this call are illegal. | ||
74 | */ | ||
75 | static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle) | ||
76 | { | 46 | { |
77 | return pci_iommu_ops->free_consistent(hwdev, size, vaddr, dma_handle); | 47 | return dma_free_coherent(&pdev->dev, size, vaddr, dma_handle); |
78 | } | 48 | } |
79 | 49 | ||
80 | /* Map a single buffer of the indicated size for DMA in streaming mode. | 50 | static inline dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, |
81 | * The 32-bit bus address to use is returned. | 51 | size_t size, int direction) |
82 | * | ||
83 | * Once the device is given the dma address, the device owns this memory | ||
84 | * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed. | ||
85 | */ | ||
86 | static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) | ||
87 | { | 52 | { |
88 | return pci_iommu_ops->map_single(hwdev, ptr, size, direction); | 53 | return dma_map_single(&pdev->dev, ptr, size, |
54 | (enum dma_data_direction) direction); | ||
89 | } | 55 | } |
90 | 56 | ||
91 | /* Unmap a single streaming mode DMA translation. The dma_addr and size | 57 | static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, |
92 | * must match what was provided for in a previous pci_map_single call. All | 58 | size_t size, int direction) |
93 | * other usages are undefined. | ||
94 | * | ||
95 | * After this call, reads by the cpu to the buffer are guaranteed to see | ||
96 | * whatever the device wrote there. | ||
97 | */ | ||
98 | static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction) | ||
99 | { | 59 | { |
100 | pci_iommu_ops->unmap_single(hwdev, dma_addr, size, direction); | 60 | dma_unmap_single(&pdev->dev, dma_addr, size, |
61 | (enum dma_data_direction) direction); | ||
101 | } | 62 | } |
102 | 63 | ||
103 | /* No highmem on sparc64, plus we have an IOMMU, so mapping pages is easy. */ | ||
104 | #define pci_map_page(dev, page, off, size, dir) \ | 64 | #define pci_map_page(dev, page, off, size, dir) \ |
105 | pci_map_single(dev, (page_address(page) + (off)), size, dir) | 65 | pci_map_single(dev, (page_address(page) + (off)), size, dir) |
106 | #define pci_unmap_page(dev,addr,sz,dir) pci_unmap_single(dev,addr,sz,dir) | 66 | #define pci_unmap_page(dev,addr,sz,dir) \ |
67 | pci_unmap_single(dev,addr,sz,dir) | ||
107 | 68 | ||
108 | /* pci_unmap_{single,page} is not a nop, thus... */ | 69 | /* pci_unmap_{single,page} is not a nop, thus... */ |
109 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ | 70 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ |
@@ -119,75 +80,48 @@ static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, | |||
119 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ | 80 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ |
120 | (((PTR)->LEN_NAME) = (VAL)) | 81 | (((PTR)->LEN_NAME) = (VAL)) |
121 | 82 | ||
122 | /* Map a set of buffers described by scatterlist in streaming | 83 | static inline int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, |
123 | * mode for DMA. This is the scatter-gather version of the | 84 | int nents, int direction) |
124 | * above pci_map_single interface. Here the scatter gather list | ||
125 | * elements are each tagged with the appropriate dma address | ||
126 | * and length. They are obtained via sg_dma_{address,length}(SG). | ||
127 | * | ||
128 | * NOTE: An implementation may be able to use a smaller number of | ||
129 | * DMA address/length pairs than there are SG table elements. | ||
130 | * (for example via virtual mapping capabilities) | ||
131 | * The routine returns the number of addr/length pairs actually | ||
132 | * used, at most nents. | ||
133 | * | ||
134 | * Device ownership issues as mentioned above for pci_map_single are | ||
135 | * the same here. | ||
136 | */ | ||
137 | static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) | ||
138 | { | 85 | { |
139 | return pci_iommu_ops->map_sg(hwdev, sg, nents, direction); | 86 | return dma_map_sg(&pdev->dev, sg, nents, |
87 | (enum dma_data_direction) direction); | ||
140 | } | 88 | } |
141 | 89 | ||
142 | /* Unmap a set of streaming mode DMA translations. | 90 | static inline void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, |
143 | * Again, cpu read rules concerning calls here are the same as for | 91 | int nents, int direction) |
144 | * pci_unmap_single() above. | ||
145 | */ | ||
146 | static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nhwents, int direction) | ||
147 | { | 92 | { |
148 | pci_iommu_ops->unmap_sg(hwdev, sg, nhwents, direction); | 93 | dma_unmap_sg(&pdev->dev, sg, nents, |
94 | (enum dma_data_direction) direction); | ||
149 | } | 95 | } |
150 | 96 | ||
151 | /* Make physical memory consistent for a single | 97 | static inline void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, |
152 | * streaming mode DMA translation after a transfer. | 98 | dma_addr_t dma_handle, |
153 | * | 99 | size_t size, int direction) |
154 | * If you perform a pci_map_single() but wish to interrogate the | ||
155 | * buffer using the cpu, yet do not wish to teardown the PCI dma | ||
156 | * mapping, you must call this function before doing so. At the | ||
157 | * next point you give the PCI dma address back to the card, you | ||
158 | * must first perform a pci_dma_sync_for_device, and then the | ||
159 | * device again owns the buffer. | ||
160 | */ | ||
161 | static inline void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction) | ||
162 | { | 100 | { |
163 | pci_iommu_ops->dma_sync_single_for_cpu(hwdev, dma_handle, size, direction); | 101 | dma_sync_single_for_cpu(&pdev->dev, dma_handle, size, |
102 | (enum dma_data_direction) direction); | ||
164 | } | 103 | } |
165 | 104 | ||
166 | static inline void | 105 | static inline void pci_dma_sync_single_for_device(struct pci_dev *pdev, |
167 | pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, | 106 | dma_addr_t dma_handle, |
168 | size_t size, int direction) | 107 | size_t size, int direction) |
169 | { | 108 | { |
170 | /* No flushing needed to sync cpu writes to the device. */ | 109 | /* No flushing needed to sync cpu writes to the device. */ |
171 | BUG_ON(direction == PCI_DMA_NONE); | ||
172 | } | 110 | } |
173 | 111 | ||
174 | /* Make physical memory consistent for a set of streaming | 112 | static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, |
175 | * mode DMA translations after a transfer. | 113 | struct scatterlist *sg, |
176 | * | 114 | int nents, int direction) |
177 | * The same as pci_dma_sync_single_* but for a scatter-gather list, | ||
178 | * same rules and usage. | ||
179 | */ | ||
180 | static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction) | ||
181 | { | 115 | { |
182 | pci_iommu_ops->dma_sync_sg_for_cpu(hwdev, sg, nelems, direction); | 116 | dma_sync_sg_for_cpu(&pdev->dev, sg, nents, |
117 | (enum dma_data_direction) direction); | ||
183 | } | 118 | } |
184 | 119 | ||
185 | static inline void | 120 | static inline void pci_dma_sync_sg_for_device(struct pci_dev *pdev, |
186 | pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, | 121 | struct scatterlist *sg, |
187 | int nelems, int direction) | 122 | int nelems, int direction) |
188 | { | 123 | { |
189 | /* No flushing needed to sync cpu writes to the device. */ | 124 | /* No flushing needed to sync cpu writes to the device. */ |
190 | BUG_ON(direction == PCI_DMA_NONE); | ||
191 | } | 125 | } |
192 | 126 | ||
193 | /* Return whether the given PCI device DMA address mask can | 127 | /* Return whether the given PCI device DMA address mask can |
@@ -206,11 +140,9 @@ extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask); | |||
206 | #define PCI64_REQUIRED_MASK (~(dma64_addr_t)0) | 140 | #define PCI64_REQUIRED_MASK (~(dma64_addr_t)0) |
207 | #define PCI64_ADDR_BASE 0xfffc000000000000UL | 141 | #define PCI64_ADDR_BASE 0xfffc000000000000UL |
208 | 142 | ||
209 | #define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0) | ||
210 | |||
211 | static inline int pci_dma_mapping_error(dma_addr_t dma_addr) | 143 | static inline int pci_dma_mapping_error(dma_addr_t dma_addr) |
212 | { | 144 | { |
213 | return (dma_addr == PCI_DMA_ERROR_CODE); | 145 | return dma_mapping_error(dma_addr); |
214 | } | 146 | } |
215 | 147 | ||
216 | #ifdef CONFIG_PCI | 148 | #ifdef CONFIG_PCI |
diff --git a/include/asm-sparc64/sbus.h b/include/asm-sparc64/sbus.h index 7efd49d31b..0151cad486 100644 --- a/include/asm-sparc64/sbus.h +++ b/include/asm-sparc64/sbus.h | |||
@@ -1,7 +1,6 @@ | |||
1 | /* $Id: sbus.h,v 1.14 2000/02/18 13:50:55 davem Exp $ | 1 | /* sbus.h: Defines for the Sun SBus. |
2 | * sbus.h: Defines for the Sun SBus. | ||
3 | * | 2 | * |
4 | * Copyright (C) 1996, 1999 David S. Miller (davem@redhat.com) | 3 | * Copyright (C) 1996, 1999, 2007 David S. Miller (davem@davemloft.net) |
5 | */ | 4 | */ |
6 | 5 | ||
7 | #ifndef _SPARC64_SBUS_H | 6 | #ifndef _SPARC64_SBUS_H |
@@ -69,7 +68,6 @@ struct sbus_dev { | |||
69 | /* This struct describes the SBus(s) found on this machine. */ | 68 | /* This struct describes the SBus(s) found on this machine. */ |
70 | struct sbus_bus { | 69 | struct sbus_bus { |
71 | struct of_device ofdev; | 70 | struct of_device ofdev; |
72 | void *iommu; /* Opaque IOMMU cookie */ | ||
73 | struct sbus_dev *devices; /* Tree of SBUS devices */ | 71 | struct sbus_dev *devices; /* Tree of SBUS devices */ |
74 | struct sbus_bus *next; /* Next SBUS in system */ | 72 | struct sbus_bus *next; /* Next SBUS in system */ |
75 | int prom_node; /* OBP node of SBUS */ | 73 | int prom_node; /* OBP node of SBUS */ |
@@ -102,9 +100,18 @@ extern struct sbus_bus *sbus_root; | |||
102 | extern void sbus_set_sbus64(struct sbus_dev *, int); | 100 | extern void sbus_set_sbus64(struct sbus_dev *, int); |
103 | extern void sbus_fill_device_irq(struct sbus_dev *); | 101 | extern void sbus_fill_device_irq(struct sbus_dev *); |
104 | 102 | ||
105 | /* These yield IOMMU mappings in consistent mode. */ | 103 | static inline void *sbus_alloc_consistent(struct sbus_dev *sdev , size_t size, |
106 | extern void *sbus_alloc_consistent(struct sbus_dev *, size_t, dma_addr_t *dma_addrp); | 104 | dma_addr_t *dma_handle) |
107 | extern void sbus_free_consistent(struct sbus_dev *, size_t, void *, dma_addr_t); | 105 | { |
106 | return dma_alloc_coherent(&sdev->ofdev.dev, size, | ||
107 | dma_handle, GFP_ATOMIC); | ||
108 | } | ||
109 | |||
110 | static inline void sbus_free_consistent(struct sbus_dev *sdev, size_t size, | ||
111 | void *vaddr, dma_addr_t dma_handle) | ||
112 | { | ||
113 | return dma_free_coherent(&sdev->ofdev.dev, size, vaddr, dma_handle); | ||
114 | } | ||
108 | 115 | ||
109 | #define SBUS_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL | 116 | #define SBUS_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL |
110 | #define SBUS_DMA_TODEVICE DMA_TO_DEVICE | 117 | #define SBUS_DMA_TODEVICE DMA_TO_DEVICE |
@@ -112,18 +119,67 @@ extern void sbus_free_consistent(struct sbus_dev *, size_t, void *, dma_addr_t); | |||
112 | #define SBUS_DMA_NONE DMA_NONE | 119 | #define SBUS_DMA_NONE DMA_NONE |
113 | 120 | ||
114 | /* All the rest use streaming mode mappings. */ | 121 | /* All the rest use streaming mode mappings. */ |
115 | extern dma_addr_t sbus_map_single(struct sbus_dev *, void *, size_t, int); | 122 | static inline dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, |
116 | extern void sbus_unmap_single(struct sbus_dev *, dma_addr_t, size_t, int); | 123 | size_t size, int direction) |
117 | extern int sbus_map_sg(struct sbus_dev *, struct scatterlist *, int, int); | 124 | { |
118 | extern void sbus_unmap_sg(struct sbus_dev *, struct scatterlist *, int, int); | 125 | return dma_map_single(&sdev->ofdev.dev, ptr, size, |
126 | (enum dma_data_direction) direction); | ||
127 | } | ||
128 | |||
129 | static inline void sbus_unmap_single(struct sbus_dev *sdev, | ||
130 | dma_addr_t dma_addr, size_t size, | ||
131 | int direction) | ||
132 | { | ||
133 | dma_unmap_single(&sdev->ofdev.dev, dma_addr, size, | ||
134 | (enum dma_data_direction) direction); | ||
135 | } | ||
136 | |||
137 | static inline int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg, | ||
138 | int nents, int direction) | ||
139 | { | ||
140 | return dma_map_sg(&sdev->ofdev.dev, sg, nents, | ||
141 | (enum dma_data_direction) direction); | ||
142 | } | ||
143 | |||
144 | static inline void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, | ||
145 | int nents, int direction) | ||
146 | { | ||
147 | dma_unmap_sg(&sdev->ofdev.dev, sg, nents, | ||
148 | (enum dma_data_direction) direction); | ||
149 | } | ||
119 | 150 | ||
120 | /* Finally, allow explicit synchronization of streamable mappings. */ | 151 | /* Finally, allow explicit synchronization of streamable mappings. */ |
121 | extern void sbus_dma_sync_single_for_cpu(struct sbus_dev *, dma_addr_t, size_t, int); | 152 | static inline void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, |
153 | dma_addr_t dma_handle, | ||
154 | size_t size, int direction) | ||
155 | { | ||
156 | dma_sync_single_for_cpu(&sdev->ofdev.dev, dma_handle, size, | ||
157 | (enum dma_data_direction) direction); | ||
158 | } | ||
122 | #define sbus_dma_sync_single sbus_dma_sync_single_for_cpu | 159 | #define sbus_dma_sync_single sbus_dma_sync_single_for_cpu |
123 | extern void sbus_dma_sync_single_for_device(struct sbus_dev *, dma_addr_t, size_t, int); | 160 | |
124 | extern void sbus_dma_sync_sg_for_cpu(struct sbus_dev *, struct scatterlist *, int, int); | 161 | static inline void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, |
162 | dma_addr_t dma_handle, | ||
163 | size_t size, int direction) | ||
164 | { | ||
165 | /* No flushing needed to sync cpu writes to the device. */ | ||
166 | } | ||
167 | |||
168 | static inline void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, | ||
169 | struct scatterlist *sg, | ||
170 | int nents, int direction) | ||
171 | { | ||
172 | dma_sync_sg_for_cpu(&sdev->ofdev.dev, sg, nents, | ||
173 | (enum dma_data_direction) direction); | ||
174 | } | ||
125 | #define sbus_dma_sync_sg sbus_dma_sync_sg_for_cpu | 175 | #define sbus_dma_sync_sg sbus_dma_sync_sg_for_cpu |
126 | extern void sbus_dma_sync_sg_for_device(struct sbus_dev *, struct scatterlist *, int, int); | 176 | |
177 | static inline void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, | ||
178 | struct scatterlist *sg, | ||
179 | int nents, int direction) | ||
180 | { | ||
181 | /* No flushing needed to sync cpu writes to the device. */ | ||
182 | } | ||
127 | 183 | ||
128 | extern void sbus_arch_bus_ranges_init(struct device_node *, struct sbus_bus *); | 184 | extern void sbus_arch_bus_ranges_init(struct device_node *, struct sbus_bus *); |
129 | extern void sbus_setup_iommu(struct sbus_bus *, struct device_node *); | 185 | extern void sbus_setup_iommu(struct sbus_bus *, struct device_node *); |
diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 9756fc102a..a47b8025d3 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h | |||
@@ -264,7 +264,7 @@ static inline void set_capacity(struct gendisk *disk, sector_t size) | |||
264 | 264 | ||
265 | #ifdef CONFIG_SOLARIS_X86_PARTITION | 265 | #ifdef CONFIG_SOLARIS_X86_PARTITION |
266 | 266 | ||
267 | #define SOLARIS_X86_NUMSLICE 8 | 267 | #define SOLARIS_X86_NUMSLICE 16 |
268 | #define SOLARIS_X86_VTOC_SANE (0x600DDEEEUL) | 268 | #define SOLARIS_X86_VTOC_SANE (0x600DDEEEUL) |
269 | 269 | ||
270 | struct solaris_x86_slice { | 270 | struct solaris_x86_slice { |