aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-09-11 23:35:13 -0400
committerDavid S. Miller <davem@davemloft.net>2009-09-11 23:35:13 -0400
commitcabc5c0f7fa1342049042d6e147db5a73773955b (patch)
tree2be09ae1777d580c7dfe05d6d5b76e57281ec447 /arch/sparc/kernel
parentb73d884756303316ead4cd7dad51236b2a515a1a (diff)
parent86d710146fb9975f04c505ec78caa43d227c1018 (diff)
Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
Conflicts: arch/sparc/Kconfig
Diffstat (limited to 'arch/sparc/kernel')
-rw-r--r--arch/sparc/kernel/Makefile2
-rw-r--r--arch/sparc/kernel/dma.c175
-rw-r--r--arch/sparc/kernel/dma.h14
-rw-r--r--arch/sparc/kernel/iommu.c20
-rw-r--r--arch/sparc/kernel/ioport.c190
-rw-r--r--arch/sparc/kernel/irq_64.c2
-rw-r--r--arch/sparc/kernel/nmi.c2
-rw-r--r--arch/sparc/kernel/pci.c2
-rw-r--r--arch/sparc/kernel/pci_sun4v.c30
-rw-r--r--arch/sparc/kernel/process_64.c4
-rw-r--r--arch/sparc/kernel/signal_32.c2
-rw-r--r--arch/sparc/kernel/signal_64.c3
12 files changed, 143 insertions, 303 deletions
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index f96dc5761f74..247cc620cee5 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -63,7 +63,7 @@ obj-$(CONFIG_SPARC64_SMP) += cpumap.o
63obj-$(CONFIG_SPARC32) += devres.o 63obj-$(CONFIG_SPARC32) += devres.o
64devres-y := ../../../kernel/irq/devres.o 64devres-y := ../../../kernel/irq/devres.o
65 65
66obj-$(CONFIG_SPARC32) += dma.o 66obj-y += dma.o
67 67
68obj-$(CONFIG_SPARC32_PCI) += pcic.o 68obj-$(CONFIG_SPARC32_PCI) += pcic.o
69 69
diff --git a/arch/sparc/kernel/dma.c b/arch/sparc/kernel/dma.c
index 524c32f97c55..e1ba8ee21b9a 100644
--- a/arch/sparc/kernel/dma.c
+++ b/arch/sparc/kernel/dma.c
@@ -1,178 +1,13 @@
1/* dma.c: PCI and SBUS DMA accessors for 32-bit sparc.
2 *
3 * Copyright (C) 2008 David S. Miller <davem@davemloft.net>
4 */
5
6#include <linux/kernel.h> 1#include <linux/kernel.h>
7#include <linux/module.h> 2#include <linux/module.h>
8#include <linux/dma-mapping.h> 3#include <linux/dma-mapping.h>
9#include <linux/scatterlist.h> 4#include <linux/dma-debug.h>
10#include <linux/mm.h>
11
12#ifdef CONFIG_PCI
13#include <linux/pci.h>
14#endif
15 5
16#include "dma.h" 6#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 15)
17 7
18int dma_supported(struct device *dev, u64 mask) 8static int __init dma_init(void)
19{ 9{
20#ifdef CONFIG_PCI 10 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
21 if (dev->bus == &pci_bus_type)
22 return pci_dma_supported(to_pci_dev(dev), mask);
23#endif
24 return 0; 11 return 0;
25} 12}
26EXPORT_SYMBOL(dma_supported); 13fs_initcall(dma_init);
27
28int dma_set_mask(struct device *dev, u64 dma_mask)
29{
30#ifdef CONFIG_PCI
31 if (dev->bus == &pci_bus_type)
32 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
33#endif
34 return -EOPNOTSUPP;
35}
36EXPORT_SYMBOL(dma_set_mask);
37
38static void *dma32_alloc_coherent(struct device *dev, size_t size,
39 dma_addr_t *dma_handle, gfp_t flag)
40{
41#ifdef CONFIG_PCI
42 if (dev->bus == &pci_bus_type)
43 return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle);
44#endif
45 return sbus_alloc_consistent(dev, size, dma_handle);
46}
47
48static void dma32_free_coherent(struct device *dev, size_t size,
49 void *cpu_addr, dma_addr_t dma_handle)
50{
51#ifdef CONFIG_PCI
52 if (dev->bus == &pci_bus_type) {
53 pci_free_consistent(to_pci_dev(dev), size,
54 cpu_addr, dma_handle);
55 return;
56 }
57#endif
58 sbus_free_consistent(dev, size, cpu_addr, dma_handle);
59}
60
61static dma_addr_t dma32_map_page(struct device *dev, struct page *page,
62 unsigned long offset, size_t size,
63 enum dma_data_direction direction)
64{
65#ifdef CONFIG_PCI
66 if (dev->bus == &pci_bus_type)
67 return pci_map_page(to_pci_dev(dev), page, offset,
68 size, (int)direction);
69#endif
70 return sbus_map_single(dev, page_address(page) + offset,
71 size, (int)direction);
72}
73
74static void dma32_unmap_page(struct device *dev, dma_addr_t dma_address,
75 size_t size, enum dma_data_direction direction)
76{
77#ifdef CONFIG_PCI
78 if (dev->bus == &pci_bus_type) {
79 pci_unmap_page(to_pci_dev(dev), dma_address,
80 size, (int)direction);
81 return;
82 }
83#endif
84 sbus_unmap_single(dev, dma_address, size, (int)direction);
85}
86
87static int dma32_map_sg(struct device *dev, struct scatterlist *sg,
88 int nents, enum dma_data_direction direction)
89{
90#ifdef CONFIG_PCI
91 if (dev->bus == &pci_bus_type)
92 return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
93#endif
94 return sbus_map_sg(dev, sg, nents, direction);
95}
96
97void dma32_unmap_sg(struct device *dev, struct scatterlist *sg,
98 int nents, enum dma_data_direction direction)
99{
100#ifdef CONFIG_PCI
101 if (dev->bus == &pci_bus_type) {
102 pci_unmap_sg(to_pci_dev(dev), sg, nents, (int)direction);
103 return;
104 }
105#endif
106 sbus_unmap_sg(dev, sg, nents, (int)direction);
107}
108
109static void dma32_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
110 size_t size,
111 enum dma_data_direction direction)
112{
113#ifdef CONFIG_PCI
114 if (dev->bus == &pci_bus_type) {
115 pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle,
116 size, (int)direction);
117 return;
118 }
119#endif
120 sbus_dma_sync_single_for_cpu(dev, dma_handle, size, (int) direction);
121}
122
123static void dma32_sync_single_for_device(struct device *dev,
124 dma_addr_t dma_handle, size_t size,
125 enum dma_data_direction direction)
126{
127#ifdef CONFIG_PCI
128 if (dev->bus == &pci_bus_type) {
129 pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle,
130 size, (int)direction);
131 return;
132 }
133#endif
134 sbus_dma_sync_single_for_device(dev, dma_handle, size, (int) direction);
135}
136
137static void dma32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
138 int nelems, enum dma_data_direction direction)
139{
140#ifdef CONFIG_PCI
141 if (dev->bus == &pci_bus_type) {
142 pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg,
143 nelems, (int)direction);
144 return;
145 }
146#endif
147 BUG();
148}
149
150static void dma32_sync_sg_for_device(struct device *dev,
151 struct scatterlist *sg, int nelems,
152 enum dma_data_direction direction)
153{
154#ifdef CONFIG_PCI
155 if (dev->bus == &pci_bus_type) {
156 pci_dma_sync_sg_for_device(to_pci_dev(dev), sg,
157 nelems, (int)direction);
158 return;
159 }
160#endif
161 BUG();
162}
163
164static const struct dma_ops dma32_dma_ops = {
165 .alloc_coherent = dma32_alloc_coherent,
166 .free_coherent = dma32_free_coherent,
167 .map_page = dma32_map_page,
168 .unmap_page = dma32_unmap_page,
169 .map_sg = dma32_map_sg,
170 .unmap_sg = dma32_unmap_sg,
171 .sync_single_for_cpu = dma32_sync_single_for_cpu,
172 .sync_single_for_device = dma32_sync_single_for_device,
173 .sync_sg_for_cpu = dma32_sync_sg_for_cpu,
174 .sync_sg_for_device = dma32_sync_sg_for_device,
175};
176
177const struct dma_ops *dma_ops = &dma32_dma_ops;
178EXPORT_SYMBOL(dma_ops);
diff --git a/arch/sparc/kernel/dma.h b/arch/sparc/kernel/dma.h
deleted file mode 100644
index f8d8951adb53..000000000000
--- a/arch/sparc/kernel/dma.h
+++ /dev/null
@@ -1,14 +0,0 @@
1void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp);
2void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba);
3dma_addr_t sbus_map_single(struct device *dev, void *va,
4 size_t len, int direction);
5void sbus_unmap_single(struct device *dev, dma_addr_t ba,
6 size_t n, int direction);
7int sbus_map_sg(struct device *dev, struct scatterlist *sg,
8 int n, int direction);
9void sbus_unmap_sg(struct device *dev, struct scatterlist *sg,
10 int n, int direction);
11void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
12 size_t size, int direction);
13void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba,
14 size_t size, int direction);
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 0aeaefe696b9..7690cc219ecc 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -353,7 +353,8 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
353 353
354static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, 354static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
355 unsigned long offset, size_t sz, 355 unsigned long offset, size_t sz,
356 enum dma_data_direction direction) 356 enum dma_data_direction direction,
357 struct dma_attrs *attrs)
357{ 358{
358 struct iommu *iommu; 359 struct iommu *iommu;
359 struct strbuf *strbuf; 360 struct strbuf *strbuf;
@@ -474,7 +475,8 @@ do_flush_sync:
474} 475}
475 476
476static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, 477static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
477 size_t sz, enum dma_data_direction direction) 478 size_t sz, enum dma_data_direction direction,
479 struct dma_attrs *attrs)
478{ 480{
479 struct iommu *iommu; 481 struct iommu *iommu;
480 struct strbuf *strbuf; 482 struct strbuf *strbuf;
@@ -520,7 +522,8 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
520} 522}
521 523
522static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, 524static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
523 int nelems, enum dma_data_direction direction) 525 int nelems, enum dma_data_direction direction,
526 struct dma_attrs *attrs)
524{ 527{
525 struct scatterlist *s, *outs, *segstart; 528 struct scatterlist *s, *outs, *segstart;
526 unsigned long flags, handle, prot, ctx; 529 unsigned long flags, handle, prot, ctx;
@@ -691,7 +694,8 @@ static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
691} 694}
692 695
693static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, 696static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
694 int nelems, enum dma_data_direction direction) 697 int nelems, enum dma_data_direction direction,
698 struct dma_attrs *attrs)
695{ 699{
696 unsigned long flags, ctx; 700 unsigned long flags, ctx;
697 struct scatterlist *sg; 701 struct scatterlist *sg;
@@ -822,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
822 spin_unlock_irqrestore(&iommu->lock, flags); 826 spin_unlock_irqrestore(&iommu->lock, flags);
823} 827}
824 828
825static const struct dma_ops sun4u_dma_ops = { 829static struct dma_map_ops sun4u_dma_ops = {
826 .alloc_coherent = dma_4u_alloc_coherent, 830 .alloc_coherent = dma_4u_alloc_coherent,
827 .free_coherent = dma_4u_free_coherent, 831 .free_coherent = dma_4u_free_coherent,
828 .map_page = dma_4u_map_page, 832 .map_page = dma_4u_map_page,
@@ -833,9 +837,11 @@ static const struct dma_ops sun4u_dma_ops = {
833 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, 837 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
834}; 838};
835 839
836const struct dma_ops *dma_ops = &sun4u_dma_ops; 840struct dma_map_ops *dma_ops = &sun4u_dma_ops;
837EXPORT_SYMBOL(dma_ops); 841EXPORT_SYMBOL(dma_ops);
838 842
843extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
844
839int dma_supported(struct device *dev, u64 device_mask) 845int dma_supported(struct device *dev, u64 device_mask)
840{ 846{
841 struct iommu *iommu = dev->archdata.iommu; 847 struct iommu *iommu = dev->archdata.iommu;
@@ -849,7 +855,7 @@ int dma_supported(struct device *dev, u64 device_mask)
849 855
850#ifdef CONFIG_PCI 856#ifdef CONFIG_PCI
851 if (dev->bus == &pci_bus_type) 857 if (dev->bus == &pci_bus_type)
852 return pci_dma_supported(to_pci_dev(dev), device_mask); 858 return pci64_dma_supported(to_pci_dev(dev), device_mask);
853#endif 859#endif
854 860
855 return 0; 861 return 0;
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index e71ce79d8c15..9f61fd8cbb7b 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -49,8 +49,6 @@
49#include <asm/iommu.h> 49#include <asm/iommu.h>
50#include <asm/io-unit.h> 50#include <asm/io-unit.h>
51 51
52#include "dma.h"
53
54#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */ 52#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */
55 53
56static struct resource *_sparc_find_resource(struct resource *r, 54static struct resource *_sparc_find_resource(struct resource *r,
@@ -247,7 +245,8 @@ EXPORT_SYMBOL(sbus_set_sbus64);
247 * Typically devices use them for control blocks. 245 * Typically devices use them for control blocks.
248 * CPU may access them without any explicit flushing. 246 * CPU may access them without any explicit flushing.
249 */ 247 */
250void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp) 248static void *sbus_alloc_coherent(struct device *dev, size_t len,
249 dma_addr_t *dma_addrp, gfp_t gfp)
251{ 250{
252 struct of_device *op = to_of_device(dev); 251 struct of_device *op = to_of_device(dev);
253 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; 252 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
@@ -300,7 +299,8 @@ err_nopages:
300 return NULL; 299 return NULL;
301} 300}
302 301
303void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba) 302static void sbus_free_coherent(struct device *dev, size_t n, void *p,
303 dma_addr_t ba)
304{ 304{
305 struct resource *res; 305 struct resource *res;
306 struct page *pgv; 306 struct page *pgv;
@@ -318,7 +318,7 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
318 318
319 n = (n + PAGE_SIZE-1) & PAGE_MASK; 319 n = (n + PAGE_SIZE-1) & PAGE_MASK;
320 if ((res->end-res->start)+1 != n) { 320 if ((res->end-res->start)+1 != n) {
321 printk("sbus_free_consistent: region 0x%lx asked 0x%lx\n", 321 printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n",
322 (long)((res->end-res->start)+1), n); 322 (long)((res->end-res->start)+1), n);
323 return; 323 return;
324 } 324 }
@@ -338,8 +338,13 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
338 * CPU view of this memory may be inconsistent with 338 * CPU view of this memory may be inconsistent with
339 * a device view and explicit flushing is necessary. 339 * a device view and explicit flushing is necessary.
340 */ 340 */
341dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int direction) 341static dma_addr_t sbus_map_page(struct device *dev, struct page *page,
342 unsigned long offset, size_t len,
343 enum dma_data_direction dir,
344 struct dma_attrs *attrs)
342{ 345{
346 void *va = page_address(page) + offset;
347
343 /* XXX why are some lengths signed, others unsigned? */ 348 /* XXX why are some lengths signed, others unsigned? */
344 if (len <= 0) { 349 if (len <= 0) {
345 return 0; 350 return 0;
@@ -351,12 +356,14 @@ dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int directi
351 return mmu_get_scsi_one(dev, va, len); 356 return mmu_get_scsi_one(dev, va, len);
352} 357}
353 358
354void sbus_unmap_single(struct device *dev, dma_addr_t ba, size_t n, int direction) 359static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n,
360 enum dma_data_direction dir, struct dma_attrs *attrs)
355{ 361{
356 mmu_release_scsi_one(dev, ba, n); 362 mmu_release_scsi_one(dev, ba, n);
357} 363}
358 364
359int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction) 365static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n,
366 enum dma_data_direction dir, struct dma_attrs *attrs)
360{ 367{
361 mmu_get_scsi_sgl(dev, sg, n); 368 mmu_get_scsi_sgl(dev, sg, n);
362 369
@@ -367,19 +374,38 @@ int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction
367 return n; 374 return n;
368} 375}
369 376
370void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, int direction) 377static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n,
378 enum dma_data_direction dir, struct dma_attrs *attrs)
371{ 379{
372 mmu_release_scsi_sgl(dev, sg, n); 380 mmu_release_scsi_sgl(dev, sg, n);
373} 381}
374 382
375void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, size_t size, int direction) 383static void sbus_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
384 int n, enum dma_data_direction dir)
376{ 385{
386 BUG();
377} 387}
378 388
379void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, size_t size, int direction) 389static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
390 int n, enum dma_data_direction dir)
380{ 391{
392 BUG();
381} 393}
382 394
395struct dma_map_ops sbus_dma_ops = {
396 .alloc_coherent = sbus_alloc_coherent,
397 .free_coherent = sbus_free_coherent,
398 .map_page = sbus_map_page,
399 .unmap_page = sbus_unmap_page,
400 .map_sg = sbus_map_sg,
401 .unmap_sg = sbus_unmap_sg,
402 .sync_sg_for_cpu = sbus_sync_sg_for_cpu,
403 .sync_sg_for_device = sbus_sync_sg_for_device,
404};
405
406struct dma_map_ops *dma_ops = &sbus_dma_ops;
407EXPORT_SYMBOL(dma_ops);
408
383static int __init sparc_register_ioport(void) 409static int __init sparc_register_ioport(void)
384{ 410{
385 register_proc_sparc_ioport(); 411 register_proc_sparc_ioport();
@@ -396,7 +422,8 @@ arch_initcall(sparc_register_ioport);
396/* Allocate and map kernel buffer using consistent mode DMA for a device. 422/* Allocate and map kernel buffer using consistent mode DMA for a device.
397 * hwdev should be valid struct pci_dev pointer for PCI devices. 423 * hwdev should be valid struct pci_dev pointer for PCI devices.
398 */ 424 */
399void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba) 425static void *pci32_alloc_coherent(struct device *dev, size_t len,
426 dma_addr_t *pba, gfp_t gfp)
400{ 427{
401 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; 428 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
402 unsigned long va; 429 unsigned long va;
@@ -440,7 +467,6 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba)
440 *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ 467 *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
441 return (void *) res->start; 468 return (void *) res->start;
442} 469}
443EXPORT_SYMBOL(pci_alloc_consistent);
444 470
445/* Free and unmap a consistent DMA buffer. 471/* Free and unmap a consistent DMA buffer.
446 * cpu_addr is what was returned from pci_alloc_consistent, 472 * cpu_addr is what was returned from pci_alloc_consistent,
@@ -450,7 +476,8 @@ EXPORT_SYMBOL(pci_alloc_consistent);
450 * References to the memory and mappings associated with cpu_addr/dma_addr 476 * References to the memory and mappings associated with cpu_addr/dma_addr
451 * past this call are illegal. 477 * past this call are illegal.
452 */ 478 */
453void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba) 479static void pci32_free_coherent(struct device *dev, size_t n, void *p,
480 dma_addr_t ba)
454{ 481{
455 struct resource *res; 482 struct resource *res;
456 unsigned long pgp; 483 unsigned long pgp;
@@ -482,60 +509,18 @@ void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba)
482 509
483 free_pages(pgp, get_order(n)); 510 free_pages(pgp, get_order(n));
484} 511}
485EXPORT_SYMBOL(pci_free_consistent);
486
487/* Map a single buffer of the indicated size for DMA in streaming mode.
488 * The 32-bit bus address to use is returned.
489 *
490 * Once the device is given the dma address, the device owns this memory
491 * until either pci_unmap_single or pci_dma_sync_single_* is performed.
492 */
493dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
494 int direction)
495{
496 BUG_ON(direction == PCI_DMA_NONE);
497 /* IIep is write-through, not flushing. */
498 return virt_to_phys(ptr);
499}
500EXPORT_SYMBOL(pci_map_single);
501
502/* Unmap a single streaming mode DMA translation. The dma_addr and size
503 * must match what was provided for in a previous pci_map_single call. All
504 * other usages are undefined.
505 *
506 * After this call, reads by the cpu to the buffer are guaranteed to see
507 * whatever the device wrote there.
508 */
509void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size,
510 int direction)
511{
512 BUG_ON(direction == PCI_DMA_NONE);
513 if (direction != PCI_DMA_TODEVICE) {
514 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
515 (size + PAGE_SIZE-1) & PAGE_MASK);
516 }
517}
518EXPORT_SYMBOL(pci_unmap_single);
519 512
520/* 513/*
521 * Same as pci_map_single, but with pages. 514 * Same as pci_map_single, but with pages.
522 */ 515 */
523dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, 516static dma_addr_t pci32_map_page(struct device *dev, struct page *page,
524 unsigned long offset, size_t size, int direction) 517 unsigned long offset, size_t size,
518 enum dma_data_direction dir,
519 struct dma_attrs *attrs)
525{ 520{
526 BUG_ON(direction == PCI_DMA_NONE);
527 /* IIep is write-through, not flushing. */ 521 /* IIep is write-through, not flushing. */
528 return page_to_phys(page) + offset; 522 return page_to_phys(page) + offset;
529} 523}
530EXPORT_SYMBOL(pci_map_page);
531
532void pci_unmap_page(struct pci_dev *hwdev,
533 dma_addr_t dma_address, size_t size, int direction)
534{
535 BUG_ON(direction == PCI_DMA_NONE);
536 /* mmu_inval_dma_area XXX */
537}
538EXPORT_SYMBOL(pci_unmap_page);
539 524
540/* Map a set of buffers described by scatterlist in streaming 525/* Map a set of buffers described by scatterlist in streaming
541 * mode for DMA. This is the scather-gather version of the 526 * mode for DMA. This is the scather-gather version of the
@@ -552,13 +537,13 @@ EXPORT_SYMBOL(pci_unmap_page);
552 * Device ownership issues as mentioned above for pci_map_single are 537 * Device ownership issues as mentioned above for pci_map_single are
553 * the same here. 538 * the same here.
554 */ 539 */
555int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, 540static int pci32_map_sg(struct device *device, struct scatterlist *sgl,
556 int direction) 541 int nents, enum dma_data_direction dir,
542 struct dma_attrs *attrs)
557{ 543{
558 struct scatterlist *sg; 544 struct scatterlist *sg;
559 int n; 545 int n;
560 546
561 BUG_ON(direction == PCI_DMA_NONE);
562 /* IIep is write-through, not flushing. */ 547 /* IIep is write-through, not flushing. */
563 for_each_sg(sgl, sg, nents, n) { 548 for_each_sg(sgl, sg, nents, n) {
564 BUG_ON(page_address(sg_page(sg)) == NULL); 549 BUG_ON(page_address(sg_page(sg)) == NULL);
@@ -567,20 +552,19 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
567 } 552 }
568 return nents; 553 return nents;
569} 554}
570EXPORT_SYMBOL(pci_map_sg);
571 555
572/* Unmap a set of streaming mode DMA translations. 556/* Unmap a set of streaming mode DMA translations.
573 * Again, cpu read rules concerning calls here are the same as for 557 * Again, cpu read rules concerning calls here are the same as for
574 * pci_unmap_single() above. 558 * pci_unmap_single() above.
575 */ 559 */
576void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, 560static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
577 int direction) 561 int nents, enum dma_data_direction dir,
562 struct dma_attrs *attrs)
578{ 563{
579 struct scatterlist *sg; 564 struct scatterlist *sg;
580 int n; 565 int n;
581 566
582 BUG_ON(direction == PCI_DMA_NONE); 567 if (dir != PCI_DMA_TODEVICE) {
583 if (direction != PCI_DMA_TODEVICE) {
584 for_each_sg(sgl, sg, nents, n) { 568 for_each_sg(sgl, sg, nents, n) {
585 BUG_ON(page_address(sg_page(sg)) == NULL); 569 BUG_ON(page_address(sg_page(sg)) == NULL);
586 mmu_inval_dma_area( 570 mmu_inval_dma_area(
@@ -589,7 +573,6 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
589 } 573 }
590 } 574 }
591} 575}
592EXPORT_SYMBOL(pci_unmap_sg);
593 576
594/* Make physical memory consistent for a single 577/* Make physical memory consistent for a single
595 * streaming mode DMA translation before or after a transfer. 578 * streaming mode DMA translation before or after a transfer.
@@ -601,25 +584,23 @@ EXPORT_SYMBOL(pci_unmap_sg);
601 * must first perform a pci_dma_sync_for_device, and then the 584 * must first perform a pci_dma_sync_for_device, and then the
602 * device again owns the buffer. 585 * device again owns the buffer.
603 */ 586 */
604void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) 587static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
588 size_t size, enum dma_data_direction dir)
605{ 589{
606 BUG_ON(direction == PCI_DMA_NONE); 590 if (dir != PCI_DMA_TODEVICE) {
607 if (direction != PCI_DMA_TODEVICE) {
608 mmu_inval_dma_area((unsigned long)phys_to_virt(ba), 591 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
609 (size + PAGE_SIZE-1) & PAGE_MASK); 592 (size + PAGE_SIZE-1) & PAGE_MASK);
610 } 593 }
611} 594}
612EXPORT_SYMBOL(pci_dma_sync_single_for_cpu);
613 595
614void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) 596static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba,
597 size_t size, enum dma_data_direction dir)
615{ 598{
616 BUG_ON(direction == PCI_DMA_NONE); 599 if (dir != PCI_DMA_TODEVICE) {
617 if (direction != PCI_DMA_TODEVICE) {
618 mmu_inval_dma_area((unsigned long)phys_to_virt(ba), 600 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
619 (size + PAGE_SIZE-1) & PAGE_MASK); 601 (size + PAGE_SIZE-1) & PAGE_MASK);
620 } 602 }
621} 603}
622EXPORT_SYMBOL(pci_dma_sync_single_for_device);
623 604
624/* Make physical memory consistent for a set of streaming 605/* Make physical memory consistent for a set of streaming
625 * mode DMA translations after a transfer. 606 * mode DMA translations after a transfer.
@@ -627,13 +608,13 @@ EXPORT_SYMBOL(pci_dma_sync_single_for_device);
627 * The same as pci_dma_sync_single_* but for a scatter-gather list, 608 * The same as pci_dma_sync_single_* but for a scatter-gather list,
628 * same rules and usage. 609 * same rules and usage.
629 */ 610 */
630void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) 611static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
612 int nents, enum dma_data_direction dir)
631{ 613{
632 struct scatterlist *sg; 614 struct scatterlist *sg;
633 int n; 615 int n;
634 616
635 BUG_ON(direction == PCI_DMA_NONE); 617 if (dir != PCI_DMA_TODEVICE) {
636 if (direction != PCI_DMA_TODEVICE) {
637 for_each_sg(sgl, sg, nents, n) { 618 for_each_sg(sgl, sg, nents, n) {
638 BUG_ON(page_address(sg_page(sg)) == NULL); 619 BUG_ON(page_address(sg_page(sg)) == NULL);
639 mmu_inval_dma_area( 620 mmu_inval_dma_area(
@@ -642,15 +623,14 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int
642 } 623 }
643 } 624 }
644} 625}
645EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu);
646 626
647void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) 627static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *sgl,
628 int nents, enum dma_data_direction dir)
648{ 629{
649 struct scatterlist *sg; 630 struct scatterlist *sg;
650 int n; 631 int n;
651 632
652 BUG_ON(direction == PCI_DMA_NONE); 633 if (dir != PCI_DMA_TODEVICE) {
653 if (direction != PCI_DMA_TODEVICE) {
654 for_each_sg(sgl, sg, nents, n) { 634 for_each_sg(sgl, sg, nents, n) {
655 BUG_ON(page_address(sg_page(sg)) == NULL); 635 BUG_ON(page_address(sg_page(sg)) == NULL);
656 mmu_inval_dma_area( 636 mmu_inval_dma_area(
@@ -659,9 +639,49 @@ void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl,
659 } 639 }
660 } 640 }
661} 641}
662EXPORT_SYMBOL(pci_dma_sync_sg_for_device); 642
643struct dma_map_ops pci32_dma_ops = {
644 .alloc_coherent = pci32_alloc_coherent,
645 .free_coherent = pci32_free_coherent,
646 .map_page = pci32_map_page,
647 .map_sg = pci32_map_sg,
648 .unmap_sg = pci32_unmap_sg,
649 .sync_single_for_cpu = pci32_sync_single_for_cpu,
650 .sync_single_for_device = pci32_sync_single_for_device,
651 .sync_sg_for_cpu = pci32_sync_sg_for_cpu,
652 .sync_sg_for_device = pci32_sync_sg_for_device,
653};
654EXPORT_SYMBOL(pci32_dma_ops);
655
663#endif /* CONFIG_PCI */ 656#endif /* CONFIG_PCI */
664 657
658/*
659 * Return whether the given PCI device DMA address mask can be
660 * supported properly. For example, if your device can only drive the
661 * low 24-bits during PCI bus mastering, then you would pass
662 * 0x00ffffff as the mask to this function.
663 */
664int dma_supported(struct device *dev, u64 mask)
665{
666#ifdef CONFIG_PCI
667 if (dev->bus == &pci_bus_type)
668 return 1;
669#endif
670 return 0;
671}
672EXPORT_SYMBOL(dma_supported);
673
674int dma_set_mask(struct device *dev, u64 dma_mask)
675{
676#ifdef CONFIG_PCI
677 if (dev->bus == &pci_bus_type)
678 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
679#endif
680 return -EOPNOTSUPP;
681}
682EXPORT_SYMBOL(dma_set_mask);
683
684
665#ifdef CONFIG_PROC_FS 685#ifdef CONFIG_PROC_FS
666 686
667static int sparc_io_proc_show(struct seq_file *m, void *v) 687static int sparc_io_proc_show(struct seq_file *m, void *v)
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index f0ee79055409..8daab33fc17d 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -886,7 +886,7 @@ void notrace init_irqwork_curcpu(void)
886 * Therefore you cannot make any OBP calls, not even prom_printf, 886 * Therefore you cannot make any OBP calls, not even prom_printf,
887 * from these two routines. 887 * from these two routines.
888 */ 888 */
889static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask) 889static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
890{ 890{
891 unsigned long num_entries = (qmask + 1) / 64; 891 unsigned long num_entries = (qmask + 1) / 64;
892 unsigned long status; 892 unsigned long status;
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index 391a6ed9a184..378eb53e0776 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -113,7 +113,7 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
113 } 113 }
114 if (!touched && __get_cpu_var(last_irq_sum) == sum) { 114 if (!touched && __get_cpu_var(last_irq_sum) == sum) {
115 local_inc(&__get_cpu_var(alert_counter)); 115 local_inc(&__get_cpu_var(alert_counter));
116 if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz) 116 if (local_read(&__get_cpu_var(alert_counter)) == 30 * nmi_hz)
117 die_nmi("BUG: NMI Watchdog detected LOCKUP", 117 die_nmi("BUG: NMI Watchdog detected LOCKUP",
118 regs, panic_on_timeout); 118 regs, panic_on_timeout);
119 } else { 119 } else {
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 57859ad23547..c68648662802 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -1039,7 +1039,7 @@ static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
1039 pci_dev_put(ali_isa_bridge); 1039 pci_dev_put(ali_isa_bridge);
1040} 1040}
1041 1041
1042int pci_dma_supported(struct pci_dev *pdev, u64 device_mask) 1042int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask)
1043{ 1043{
1044 u64 dma_addr_mask; 1044 u64 dma_addr_mask;
1045 1045
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 2485eaa23101..23c33ff9c31e 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -232,7 +232,8 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
232 232
233static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, 233static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
234 unsigned long offset, size_t sz, 234 unsigned long offset, size_t sz,
235 enum dma_data_direction direction) 235 enum dma_data_direction direction,
236 struct dma_attrs *attrs)
236{ 237{
237 struct iommu *iommu; 238 struct iommu *iommu;
238 unsigned long flags, npages, oaddr; 239 unsigned long flags, npages, oaddr;
@@ -296,7 +297,8 @@ iommu_map_fail:
296} 297}
297 298
298static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, 299static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
299 size_t sz, enum dma_data_direction direction) 300 size_t sz, enum dma_data_direction direction,
301 struct dma_attrs *attrs)
300{ 302{
301 struct pci_pbm_info *pbm; 303 struct pci_pbm_info *pbm;
302 struct iommu *iommu; 304 struct iommu *iommu;
@@ -336,7 +338,8 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
336} 338}
337 339
338static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, 340static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
339 int nelems, enum dma_data_direction direction) 341 int nelems, enum dma_data_direction direction,
342 struct dma_attrs *attrs)
340{ 343{
341 struct scatterlist *s, *outs, *segstart; 344 struct scatterlist *s, *outs, *segstart;
342 unsigned long flags, handle, prot; 345 unsigned long flags, handle, prot;
@@ -478,7 +481,8 @@ iommu_map_failed:
478} 481}
479 482
480static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, 483static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
481 int nelems, enum dma_data_direction direction) 484 int nelems, enum dma_data_direction direction,
485 struct dma_attrs *attrs)
482{ 486{
483 struct pci_pbm_info *pbm; 487 struct pci_pbm_info *pbm;
484 struct scatterlist *sg; 488 struct scatterlist *sg;
@@ -521,29 +525,13 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
521 spin_unlock_irqrestore(&iommu->lock, flags); 525 spin_unlock_irqrestore(&iommu->lock, flags);
522} 526}
523 527
524static void dma_4v_sync_single_for_cpu(struct device *dev, 528static struct dma_map_ops sun4v_dma_ops = {
525 dma_addr_t bus_addr, size_t sz,
526 enum dma_data_direction direction)
527{
528 /* Nothing to do... */
529}
530
531static void dma_4v_sync_sg_for_cpu(struct device *dev,
532 struct scatterlist *sglist, int nelems,
533 enum dma_data_direction direction)
534{
535 /* Nothing to do... */
536}
537
538static const struct dma_ops sun4v_dma_ops = {
539 .alloc_coherent = dma_4v_alloc_coherent, 529 .alloc_coherent = dma_4v_alloc_coherent,
540 .free_coherent = dma_4v_free_coherent, 530 .free_coherent = dma_4v_free_coherent,
541 .map_page = dma_4v_map_page, 531 .map_page = dma_4v_map_page,
542 .unmap_page = dma_4v_unmap_page, 532 .unmap_page = dma_4v_unmap_page,
543 .map_sg = dma_4v_map_sg, 533 .map_sg = dma_4v_map_sg,
544 .unmap_sg = dma_4v_unmap_sg, 534 .unmap_sg = dma_4v_unmap_sg,
545 .sync_single_for_cpu = dma_4v_sync_single_for_cpu,
546 .sync_sg_for_cpu = dma_4v_sync_sg_for_cpu,
547}; 535};
548 536
549static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm, 537static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm,
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 4041f94e7724..18d67854a1b8 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -251,7 +251,7 @@ static void __global_reg_poll(struct global_reg_snapshot *gp)
251 } 251 }
252} 252}
253 253
254void __trigger_all_cpu_backtrace(void) 254void arch_trigger_all_cpu_backtrace(void)
255{ 255{
256 struct thread_info *tp = current_thread_info(); 256 struct thread_info *tp = current_thread_info();
257 struct pt_regs *regs = get_irq_regs(); 257 struct pt_regs *regs = get_irq_regs();
@@ -304,7 +304,7 @@ void __trigger_all_cpu_backtrace(void)
304 304
305static void sysrq_handle_globreg(int key, struct tty_struct *tty) 305static void sysrq_handle_globreg(int key, struct tty_struct *tty)
306{ 306{
307 __trigger_all_cpu_backtrace(); 307 arch_trigger_all_cpu_backtrace();
308} 308}
309 309
310static struct sysrq_key_op sparc_globalreg_op = { 310static struct sysrq_key_op sparc_globalreg_op = {
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 181d069a2d44..7ce1a1005b1d 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -590,6 +590,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0,
590 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 590 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
591 clear_thread_flag(TIF_NOTIFY_RESUME); 591 clear_thread_flag(TIF_NOTIFY_RESUME);
592 tracehook_notify_resume(regs); 592 tracehook_notify_resume(regs);
593 if (current->replacement_session_keyring)
594 key_replace_session_keyring();
593 } 595 }
594} 596}
595 597
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index ec82d76dc6f2..647afbda7ae1 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -613,5 +613,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long
613 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 613 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
614 clear_thread_flag(TIF_NOTIFY_RESUME); 614 clear_thread_flag(TIF_NOTIFY_RESUME);
615 tracehook_notify_resume(regs); 615 tracehook_notify_resume(regs);
616 if (current->replacement_session_keyring)
617 key_replace_session_keyring();
616 } 618 }
617} 619}
620