aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorJohn Keller <jpk@sgi.com>2008-11-24 17:47:17 -0500
committerTony Luck <tony.luck@intel.com>2009-01-15 13:42:16 -0500
commit175add1981e53d22caba8f42d5f924a4de507b6c (patch)
tree71ddbf7f46d18222f4f5867d7261b335677c6af6 /arch/ia64
parenta6a3bb5c88d706c5efe0c86b3b669ac9ee012b3f (diff)
[IA64] SN specific version of dma_get_required_mask()
Create a platform specific version of dma_get_required_mask() for ia64 SN Altix. All SN Altix platforms support 64 bit DMA addressing regardless of the size of system memory. Create an ia64 machvec for dma_get_required_mask, with the SN version unconditionally returning DMA_64BIT_MASK. Signed-off-by: John Keller <jpk@sgi.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/include/asm/dma-mapping.h2
-rw-r--r--arch/ia64/include/asm/machvec.h7
-rw-r--r--arch/ia64/include/asm/machvec_init.h1
-rw-r--r--arch/ia64/include/asm/machvec_sn2.h2
-rw-r--r--arch/ia64/pci/pci.c27
-rw-r--r--arch/ia64/sn/pci/pci_dma.c6
6 files changed, 45 insertions, 0 deletions
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index bbab7e2b0fc9..1f912d927585 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -9,6 +9,8 @@
9#include <linux/scatterlist.h> 9#include <linux/scatterlist.h>
10#include <asm/swiotlb.h> 10#include <asm/swiotlb.h>
11 11
12#define ARCH_HAS_DMA_GET_REQUIRED_MASK
13
12struct dma_mapping_ops { 14struct dma_mapping_ops {
13 int (*mapping_error)(struct device *dev, 15 int (*mapping_error)(struct device *dev,
14 dma_addr_t dma_addr); 16 dma_addr_t dma_addr);
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
index 59c17e446683..fe87b2121707 100644
--- a/arch/ia64/include/asm/machvec.h
+++ b/arch/ia64/include/asm/machvec.h
@@ -62,6 +62,7 @@ typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t
62typedef void ia64_mv_dma_unmap_single_attrs (struct device *, dma_addr_t, size_t, int, struct dma_attrs *); 62typedef void ia64_mv_dma_unmap_single_attrs (struct device *, dma_addr_t, size_t, int, struct dma_attrs *);
63typedef int ia64_mv_dma_map_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *); 63typedef int ia64_mv_dma_map_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
64typedef void ia64_mv_dma_unmap_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *); 64typedef void ia64_mv_dma_unmap_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
65typedef u64 ia64_mv_dma_get_required_mask (struct device *);
65 66
66/* 67/*
67 * WARNING: The legacy I/O space is _architected_. Platforms are 68 * WARNING: The legacy I/O space is _architected_. Platforms are
@@ -159,6 +160,7 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
159# define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device 160# define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device
160# define platform_dma_mapping_error ia64_mv.dma_mapping_error 161# define platform_dma_mapping_error ia64_mv.dma_mapping_error
161# define platform_dma_supported ia64_mv.dma_supported 162# define platform_dma_supported ia64_mv.dma_supported
163# define platform_dma_get_required_mask ia64_mv.dma_get_required_mask
162# define platform_irq_to_vector ia64_mv.irq_to_vector 164# define platform_irq_to_vector ia64_mv.irq_to_vector
163# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq 165# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq
164# define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem 166# define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem
@@ -213,6 +215,7 @@ struct ia64_machine_vector {
213 ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device; 215 ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device;
214 ia64_mv_dma_mapping_error *dma_mapping_error; 216 ia64_mv_dma_mapping_error *dma_mapping_error;
215 ia64_mv_dma_supported *dma_supported; 217 ia64_mv_dma_supported *dma_supported;
218 ia64_mv_dma_get_required_mask *dma_get_required_mask;
216 ia64_mv_irq_to_vector *irq_to_vector; 219 ia64_mv_irq_to_vector *irq_to_vector;
217 ia64_mv_local_vector_to_irq *local_vector_to_irq; 220 ia64_mv_local_vector_to_irq *local_vector_to_irq;
218 ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem; 221 ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem;
@@ -263,6 +266,7 @@ struct ia64_machine_vector {
263 platform_dma_sync_sg_for_device, \ 266 platform_dma_sync_sg_for_device, \
264 platform_dma_mapping_error, \ 267 platform_dma_mapping_error, \
265 platform_dma_supported, \ 268 platform_dma_supported, \
269 platform_dma_get_required_mask, \
266 platform_irq_to_vector, \ 270 platform_irq_to_vector, \
267 platform_local_vector_to_irq, \ 271 platform_local_vector_to_irq, \
268 platform_pci_get_legacy_mem, \ 272 platform_pci_get_legacy_mem, \
@@ -366,6 +370,9 @@ extern void machvec_init_from_cmdline(const char *cmdline);
366#ifndef platform_dma_supported 370#ifndef platform_dma_supported
367# define platform_dma_supported swiotlb_dma_supported 371# define platform_dma_supported swiotlb_dma_supported
368#endif 372#endif
373#ifndef platform_dma_get_required_mask
374# define platform_dma_get_required_mask ia64_dma_get_required_mask
375#endif
369#ifndef platform_irq_to_vector 376#ifndef platform_irq_to_vector
370# define platform_irq_to_vector __ia64_irq_to_vector 377# define platform_irq_to_vector __ia64_irq_to_vector
371#endif 378#endif
diff --git a/arch/ia64/include/asm/machvec_init.h b/arch/ia64/include/asm/machvec_init.h
index ef964b286842..37a469849ab9 100644
--- a/arch/ia64/include/asm/machvec_init.h
+++ b/arch/ia64/include/asm/machvec_init.h
@@ -3,6 +3,7 @@
3 3
4extern ia64_mv_send_ipi_t ia64_send_ipi; 4extern ia64_mv_send_ipi_t ia64_send_ipi;
5extern ia64_mv_global_tlb_purge_t ia64_global_tlb_purge; 5extern ia64_mv_global_tlb_purge_t ia64_global_tlb_purge;
6extern ia64_mv_dma_get_required_mask ia64_dma_get_required_mask;
6extern ia64_mv_irq_to_vector __ia64_irq_to_vector; 7extern ia64_mv_irq_to_vector __ia64_irq_to_vector;
7extern ia64_mv_local_vector_to_irq __ia64_local_vector_to_irq; 8extern ia64_mv_local_vector_to_irq __ia64_local_vector_to_irq;
8extern ia64_mv_pci_get_legacy_mem_t ia64_pci_get_legacy_mem; 9extern ia64_mv_pci_get_legacy_mem_t ia64_pci_get_legacy_mem;
diff --git a/arch/ia64/include/asm/machvec_sn2.h b/arch/ia64/include/asm/machvec_sn2.h
index 781308ea7b88..f1a6e0d6dfa5 100644
--- a/arch/ia64/include/asm/machvec_sn2.h
+++ b/arch/ia64/include/asm/machvec_sn2.h
@@ -67,6 +67,7 @@ extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device;
67extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device; 67extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device;
68extern ia64_mv_dma_mapping_error sn_dma_mapping_error; 68extern ia64_mv_dma_mapping_error sn_dma_mapping_error;
69extern ia64_mv_dma_supported sn_dma_supported; 69extern ia64_mv_dma_supported sn_dma_supported;
70extern ia64_mv_dma_get_required_mask sn_dma_get_required_mask;
70extern ia64_mv_migrate_t sn_migrate; 71extern ia64_mv_migrate_t sn_migrate;
71extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event; 72extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event;
72extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq; 73extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq;
@@ -123,6 +124,7 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus;
123#define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device 124#define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device
124#define platform_dma_mapping_error sn_dma_mapping_error 125#define platform_dma_mapping_error sn_dma_mapping_error
125#define platform_dma_supported sn_dma_supported 126#define platform_dma_supported sn_dma_supported
127#define platform_dma_get_required_mask sn_dma_get_required_mask
126#define platform_migrate sn_migrate 128#define platform_migrate sn_migrate
127#define platform_kernel_launch_event sn_kernel_launch_event 129#define platform_kernel_launch_event sn_kernel_launch_event
128#ifdef CONFIG_PCI_MSI 130#ifdef CONFIG_PCI_MSI
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 211fcfd115f9..61f1af5c23c1 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -19,6 +19,7 @@
19#include <linux/ioport.h> 19#include <linux/ioport.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/spinlock.h> 21#include <linux/spinlock.h>
22#include <linux/bootmem.h>
22 23
23#include <asm/machvec.h> 24#include <asm/machvec.h>
24#include <asm/page.h> 25#include <asm/page.h>
@@ -748,6 +749,32 @@ static void __init set_pci_cacheline_size(void)
748 pci_cache_line_size = (1 << cci.pcci_line_size) / 4; 749 pci_cache_line_size = (1 << cci.pcci_line_size) / 4;
749} 750}
750 751
752u64 ia64_dma_get_required_mask(struct device *dev)
753{
754 u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
755 u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
756 u64 mask;
757
758 if (!high_totalram) {
759 /* convert to mask just covering totalram */
760 low_totalram = (1 << (fls(low_totalram) - 1));
761 low_totalram += low_totalram - 1;
762 mask = low_totalram;
763 } else {
764 high_totalram = (1 << (fls(high_totalram) - 1));
765 high_totalram += high_totalram - 1;
766 mask = (((u64)high_totalram) << 32) + 0xffffffff;
767 }
768 return mask;
769}
770EXPORT_SYMBOL_GPL(ia64_dma_get_required_mask);
771
772u64 dma_get_required_mask(struct device *dev)
773{
774 return platform_dma_get_required_mask(dev);
775}
776EXPORT_SYMBOL_GPL(dma_get_required_mask);
777
751static int __init pcibios_init(void) 778static int __init pcibios_init(void)
752{ 779{
753 set_pci_cacheline_size(); 780 set_pci_cacheline_size();
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index 53ebb6484495..863f5017baae 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -356,6 +356,12 @@ int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
356} 356}
357EXPORT_SYMBOL(sn_dma_mapping_error); 357EXPORT_SYMBOL(sn_dma_mapping_error);
358 358
359u64 sn_dma_get_required_mask(struct device *dev)
360{
361 return DMA_64BIT_MASK;
362}
363EXPORT_SYMBOL_GPL(sn_dma_get_required_mask);
364
359char *sn_pci_get_legacy_mem(struct pci_bus *bus) 365char *sn_pci_get_legacy_mem(struct pci_bus *bus)
360{ 366{
361 if (!SN_PCIBUS_BUSSOFT(bus)) 367 if (!SN_PCIBUS_BUSSOFT(bus))