aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/include/asm')
-rw-r--r--arch/sparc/include/asm/dma-mapping.h145
-rw-r--r--arch/sparc/include/asm/irq_64.h4
-rw-r--r--arch/sparc/include/asm/pci.h3
-rw-r--r--arch/sparc/include/asm/pci_32.h105
-rw-r--r--arch/sparc/include/asm/pci_64.h88
-rw-r--r--arch/sparc/include/asm/spinlock_32.h12
-rw-r--r--arch/sparc/include/asm/spinlock_64.h28
7 files changed, 47 insertions, 338 deletions
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 204e4bf64438..5a8c308e2b5c 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/scatterlist.h> 4#include <linux/scatterlist.h>
5#include <linux/mm.h> 5#include <linux/mm.h>
6#include <linux/dma-debug.h>
6 7
7#define DMA_ERROR_CODE (~(dma_addr_t)0x0) 8#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
8 9
@@ -13,142 +14,40 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
13#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 14#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
14#define dma_is_consistent(d, h) (1) 15#define dma_is_consistent(d, h) (1)
15 16
16struct dma_ops { 17extern struct dma_map_ops *dma_ops, pci32_dma_ops;
17 void *(*alloc_coherent)(struct device *dev, size_t size, 18extern struct bus_type pci_bus_type;
18 dma_addr_t *dma_handle, gfp_t flag);
19 void (*free_coherent)(struct device *dev, size_t size,
20 void *cpu_addr, dma_addr_t dma_handle);
21 dma_addr_t (*map_page)(struct device *dev, struct page *page,
22 unsigned long offset, size_t size,
23 enum dma_data_direction direction);
24 void (*unmap_page)(struct device *dev, dma_addr_t dma_addr,
25 size_t size,
26 enum dma_data_direction direction);
27 int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
28 enum dma_data_direction direction);
29 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
30 int nhwentries,
31 enum dma_data_direction direction);
32 void (*sync_single_for_cpu)(struct device *dev,
33 dma_addr_t dma_handle, size_t size,
34 enum dma_data_direction direction);
35 void (*sync_single_for_device)(struct device *dev,
36 dma_addr_t dma_handle, size_t size,
37 enum dma_data_direction direction);
38 void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
39 int nelems,
40 enum dma_data_direction direction);
41 void (*sync_sg_for_device)(struct device *dev,
42 struct scatterlist *sg, int nents,
43 enum dma_data_direction dir);
44};
45extern const struct dma_ops *dma_ops;
46 19
47static inline void *dma_alloc_coherent(struct device *dev, size_t size, 20static inline struct dma_map_ops *get_dma_ops(struct device *dev)
48 dma_addr_t *dma_handle, gfp_t flag)
49{
50 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
51}
52
53static inline void dma_free_coherent(struct device *dev, size_t size,
54 void *cpu_addr, dma_addr_t dma_handle)
55{
56 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
57}
58
59static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
60 size_t size,
61 enum dma_data_direction direction)
62{
63 return dma_ops->map_page(dev, virt_to_page(cpu_addr),
64 (unsigned long)cpu_addr & ~PAGE_MASK, size,
65 direction);
66}
67
68static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
69 size_t size,
70 enum dma_data_direction direction)
71{
72 dma_ops->unmap_page(dev, dma_addr, size, direction);
73}
74
75static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
76 unsigned long offset, size_t size,
77 enum dma_data_direction direction)
78{
79 return dma_ops->map_page(dev, page, offset, size, direction);
80}
81
82static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
83 size_t size,
84 enum dma_data_direction direction)
85{
86 dma_ops->unmap_page(dev, dma_address, size, direction);
87}
88
89static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
90 int nents, enum dma_data_direction direction)
91{
92 return dma_ops->map_sg(dev, sg, nents, direction);
93}
94
95static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
96 int nents, enum dma_data_direction direction)
97{ 21{
98 dma_ops->unmap_sg(dev, sg, nents, direction); 22#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
99} 23 if (dev->bus == &pci_bus_type)
100 24 return &pci32_dma_ops;
101static inline void dma_sync_single_for_cpu(struct device *dev, 25#endif
102 dma_addr_t dma_handle, size_t size, 26 return dma_ops;
103 enum dma_data_direction direction)
104{
105 dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
106} 27}
107 28
108static inline void dma_sync_single_for_device(struct device *dev, 29#include <asm-generic/dma-mapping-common.h>
109 dma_addr_t dma_handle,
110 size_t size,
111 enum dma_data_direction direction)
112{
113 if (dma_ops->sync_single_for_device)
114 dma_ops->sync_single_for_device(dev, dma_handle, size,
115 direction);
116}
117 30
118static inline void dma_sync_sg_for_cpu(struct device *dev, 31static inline void *dma_alloc_coherent(struct device *dev, size_t size,
119 struct scatterlist *sg, int nelems, 32 dma_addr_t *dma_handle, gfp_t flag)
120 enum dma_data_direction direction)
121{ 33{
122 dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction); 34 struct dma_map_ops *ops = get_dma_ops(dev);
123} 35 void *cpu_addr;
124 36
125static inline void dma_sync_sg_for_device(struct device *dev, 37 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
126 struct scatterlist *sg, int nelems, 38 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
127 enum dma_data_direction direction) 39 return cpu_addr;
128{
129 if (dma_ops->sync_sg_for_device)
130 dma_ops->sync_sg_for_device(dev, sg, nelems, direction);
131} 40}
132 41
133static inline void dma_sync_single_range_for_cpu(struct device *dev, 42static inline void dma_free_coherent(struct device *dev, size_t size,
134 dma_addr_t dma_handle, 43 void *cpu_addr, dma_addr_t dma_handle)
135 unsigned long offset,
136 size_t size,
137 enum dma_data_direction dir)
138{ 44{
139 dma_sync_single_for_cpu(dev, dma_handle+offset, size, dir); 45 struct dma_map_ops *ops = get_dma_ops(dev);
140}
141 46
142static inline void dma_sync_single_range_for_device(struct device *dev, 47 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
143 dma_addr_t dma_handle, 48 ops->free_coherent(dev, size, cpu_addr, dma_handle);
144 unsigned long offset,
145 size_t size,
146 enum dma_data_direction dir)
147{
148 dma_sync_single_for_device(dev, dma_handle+offset, size, dir);
149} 49}
150 50
151
152static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 51static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
153{ 52{
154 return (dma_addr == DMA_ERROR_CODE); 53 return (dma_addr == DMA_ERROR_CODE);
diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h
index 1934f2cbf513..a0b443cb3c1f 100644
--- a/arch/sparc/include/asm/irq_64.h
+++ b/arch/sparc/include/asm/irq_64.h
@@ -89,8 +89,8 @@ static inline unsigned long get_softint(void)
89 return retval; 89 return retval;
90} 90}
91 91
92void __trigger_all_cpu_backtrace(void); 92void arch_trigger_all_cpu_backtrace(void);
93#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() 93#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
94 94
95extern void *hardirq_stack[NR_CPUS]; 95extern void *hardirq_stack[NR_CPUS];
96extern void *softirq_stack[NR_CPUS]; 96extern void *softirq_stack[NR_CPUS];
diff --git a/arch/sparc/include/asm/pci.h b/arch/sparc/include/asm/pci.h
index 6e14fd179335..d9c031f9910f 100644
--- a/arch/sparc/include/asm/pci.h
+++ b/arch/sparc/include/asm/pci.h
@@ -5,4 +5,7 @@
5#else 5#else
6#include <asm/pci_32.h> 6#include <asm/pci_32.h>
7#endif 7#endif
8
9#include <asm-generic/pci-dma-compat.h>
10
8#endif 11#endif
diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h
index b41c4c198159..ac0e8369fd97 100644
--- a/arch/sparc/include/asm/pci_32.h
+++ b/arch/sparc/include/asm/pci_32.h
@@ -31,42 +31,8 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
31 */ 31 */
32#define PCI_DMA_BUS_IS_PHYS (0) 32#define PCI_DMA_BUS_IS_PHYS (0)
33 33
34#include <asm/scatterlist.h>
35
36struct pci_dev; 34struct pci_dev;
37 35
38/* Allocate and map kernel buffer using consistent mode DMA for a device.
39 * hwdev should be valid struct pci_dev pointer for PCI devices.
40 */
41extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle);
42
43/* Free and unmap a consistent DMA buffer.
44 * cpu_addr is what was returned from pci_alloc_consistent,
45 * size must be the same as what as passed into pci_alloc_consistent,
46 * and likewise dma_addr must be the same as what *dma_addrp was set to.
47 *
48 * References to the memory and mappings assosciated with cpu_addr/dma_addr
49 * past this call are illegal.
50 */
51extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
52
53/* Map a single buffer of the indicated size for DMA in streaming mode.
54 * The 32-bit bus address to use is returned.
55 *
56 * Once the device is given the dma address, the device owns this memory
57 * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed.
58 */
59extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction);
60
61/* Unmap a single streaming mode DMA translation. The dma_addr and size
62 * must match what was provided for in a previous pci_map_single call. All
63 * other usages are undefined.
64 *
65 * After this call, reads by the cpu to the buffer are guaranteed to see
66 * whatever the device wrote there.
67 */
68extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction);
69
70/* pci_unmap_{single,page} is not a nop, thus... */ 36/* pci_unmap_{single,page} is not a nop, thus... */
71#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ 37#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
72 dma_addr_t ADDR_NAME; 38 dma_addr_t ADDR_NAME;
@@ -81,69 +47,6 @@ extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t
81#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ 47#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
82 (((PTR)->LEN_NAME) = (VAL)) 48 (((PTR)->LEN_NAME) = (VAL))
83 49
84/*
85 * Same as above, only with pages instead of mapped addresses.
86 */
87extern dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
88 unsigned long offset, size_t size, int direction);
89extern void pci_unmap_page(struct pci_dev *hwdev,
90 dma_addr_t dma_address, size_t size, int direction);
91
92/* Map a set of buffers described by scatterlist in streaming
93 * mode for DMA. This is the scather-gather version of the
94 * above pci_map_single interface. Here the scatter gather list
95 * elements are each tagged with the appropriate dma address
96 * and length. They are obtained via sg_dma_{address,length}(SG).
97 *
98 * NOTE: An implementation may be able to use a smaller number of
99 * DMA address/length pairs than there are SG table elements.
100 * (for example via virtual mapping capabilities)
101 * The routine returns the number of addr/length pairs actually
102 * used, at most nents.
103 *
104 * Device ownership issues as mentioned above for pci_map_single are
105 * the same here.
106 */
107extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction);
108
109/* Unmap a set of streaming mode DMA translations.
110 * Again, cpu read rules concerning calls here are the same as for
111 * pci_unmap_single() above.
112 */
113extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nhwents, int direction);
114
115/* Make physical memory consistent for a single
116 * streaming mode DMA translation after a transfer.
117 *
118 * If you perform a pci_map_single() but wish to interrogate the
119 * buffer using the cpu, yet do not wish to teardown the PCI dma
120 * mapping, you must call this function before doing so. At the
121 * next point you give the PCI dma address back to the card, you
122 * must first perform a pci_dma_sync_for_device, and then the device
123 * again owns the buffer.
124 */
125extern void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
126extern void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
127
128/* Make physical memory consistent for a set of streaming
129 * mode DMA translations after a transfer.
130 *
131 * The same as pci_dma_sync_single_* but for a scatter-gather list,
132 * same rules and usage.
133 */
134extern void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);
135extern void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);
136
137/* Return whether the given PCI device DMA address mask can
138 * be supported properly. For example, if your device can
139 * only drive the low 24-bits during PCI bus mastering, then
140 * you would pass 0x00ffffff as the mask to this function.
141 */
142static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
143{
144 return 1;
145}
146
147#ifdef CONFIG_PCI 50#ifdef CONFIG_PCI
148static inline void pci_dma_burst_advice(struct pci_dev *pdev, 51static inline void pci_dma_burst_advice(struct pci_dev *pdev,
149 enum pci_dma_burst_strategy *strat, 52 enum pci_dma_burst_strategy *strat,
@@ -154,14 +57,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
154} 57}
155#endif 58#endif
156 59
157#define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0)
158
159static inline int pci_dma_mapping_error(struct pci_dev *pdev,
160 dma_addr_t dma_addr)
161{
162 return (dma_addr == PCI_DMA_ERROR_CODE);
163}
164
165struct device_node; 60struct device_node;
166extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev); 61extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);
167 62
diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h
index 7a1e3566e59c..5cc9f6aa5494 100644
--- a/arch/sparc/include/asm/pci_64.h
+++ b/arch/sparc/include/asm/pci_64.h
@@ -35,37 +35,6 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
35 */ 35 */
36#define PCI_DMA_BUS_IS_PHYS (0) 36#define PCI_DMA_BUS_IS_PHYS (0)
37 37
38static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size,
39 dma_addr_t *dma_handle)
40{
41 return dma_alloc_coherent(&pdev->dev, size, dma_handle, GFP_ATOMIC);
42}
43
44static inline void pci_free_consistent(struct pci_dev *pdev, size_t size,
45 void *vaddr, dma_addr_t dma_handle)
46{
47 return dma_free_coherent(&pdev->dev, size, vaddr, dma_handle);
48}
49
50static inline dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr,
51 size_t size, int direction)
52{
53 return dma_map_single(&pdev->dev, ptr, size,
54 (enum dma_data_direction) direction);
55}
56
57static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr,
58 size_t size, int direction)
59{
60 dma_unmap_single(&pdev->dev, dma_addr, size,
61 (enum dma_data_direction) direction);
62}
63
64#define pci_map_page(dev, page, off, size, dir) \
65 pci_map_single(dev, (page_address(page) + (off)), size, dir)
66#define pci_unmap_page(dev,addr,sz,dir) \
67 pci_unmap_single(dev,addr,sz,dir)
68
69/* pci_unmap_{single,page} is not a nop, thus... */ 38/* pci_unmap_{single,page} is not a nop, thus... */
70#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ 39#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
71 dma_addr_t ADDR_NAME; 40 dma_addr_t ADDR_NAME;
@@ -80,57 +49,6 @@ static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr,
80#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ 49#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
81 (((PTR)->LEN_NAME) = (VAL)) 50 (((PTR)->LEN_NAME) = (VAL))
82 51
83static inline int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg,
84 int nents, int direction)
85{
86 return dma_map_sg(&pdev->dev, sg, nents,
87 (enum dma_data_direction) direction);
88}
89
90static inline void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg,
91 int nents, int direction)
92{
93 dma_unmap_sg(&pdev->dev, sg, nents,
94 (enum dma_data_direction) direction);
95}
96
97static inline void pci_dma_sync_single_for_cpu(struct pci_dev *pdev,
98 dma_addr_t dma_handle,
99 size_t size, int direction)
100{
101 dma_sync_single_for_cpu(&pdev->dev, dma_handle, size,
102 (enum dma_data_direction) direction);
103}
104
105static inline void pci_dma_sync_single_for_device(struct pci_dev *pdev,
106 dma_addr_t dma_handle,
107 size_t size, int direction)
108{
109 /* No flushing needed to sync cpu writes to the device. */
110}
111
112static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev,
113 struct scatterlist *sg,
114 int nents, int direction)
115{
116 dma_sync_sg_for_cpu(&pdev->dev, sg, nents,
117 (enum dma_data_direction) direction);
118}
119
120static inline void pci_dma_sync_sg_for_device(struct pci_dev *pdev,
121 struct scatterlist *sg,
122 int nelems, int direction)
123{
124 /* No flushing needed to sync cpu writes to the device. */
125}
126
127/* Return whether the given PCI device DMA address mask can
128 * be supported properly. For example, if your device can
129 * only drive the low 24-bits during PCI bus mastering, then
130 * you would pass 0x00ffffff as the mask to this function.
131 */
132extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
133
134/* PCI IOMMU mapping bypass support. */ 52/* PCI IOMMU mapping bypass support. */
135 53
136/* PCI 64-bit addressing works for all slots on all controller 54/* PCI 64-bit addressing works for all slots on all controller
@@ -140,12 +58,6 @@ extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
140#define PCI64_REQUIRED_MASK (~(dma64_addr_t)0) 58#define PCI64_REQUIRED_MASK (~(dma64_addr_t)0)
141#define PCI64_ADDR_BASE 0xfffc000000000000UL 59#define PCI64_ADDR_BASE 0xfffc000000000000UL
142 60
143static inline int pci_dma_mapping_error(struct pci_dev *pdev,
144 dma_addr_t dma_addr)
145{
146 return dma_mapping_error(&pdev->dev, dma_addr);
147}
148
149#ifdef CONFIG_PCI 61#ifdef CONFIG_PCI
150static inline void pci_dma_burst_advice(struct pci_dev *pdev, 62static inline void pci_dma_burst_advice(struct pci_dev *pdev,
151 enum pci_dma_burst_strategy *strat, 63 enum pci_dma_burst_strategy *strat,
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h
index 46f91ab66a50..857630cff636 100644
--- a/arch/sparc/include/asm/spinlock_32.h
+++ b/arch/sparc/include/asm/spinlock_32.h
@@ -76,7 +76,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
76 * 76 *
77 * Unfortunately this scheme limits us to ~16,000,000 cpus. 77 * Unfortunately this scheme limits us to ~16,000,000 cpus.
78 */ 78 */
79static inline void __read_lock(raw_rwlock_t *rw) 79static inline void arch_read_lock(raw_rwlock_t *rw)
80{ 80{
81 register raw_rwlock_t *lp asm("g1"); 81 register raw_rwlock_t *lp asm("g1");
82 lp = rw; 82 lp = rw;
@@ -92,11 +92,11 @@ static inline void __read_lock(raw_rwlock_t *rw)
92#define __raw_read_lock(lock) \ 92#define __raw_read_lock(lock) \
93do { unsigned long flags; \ 93do { unsigned long flags; \
94 local_irq_save(flags); \ 94 local_irq_save(flags); \
95 __read_lock(lock); \ 95 arch_read_lock(lock); \
96 local_irq_restore(flags); \ 96 local_irq_restore(flags); \
97} while(0) 97} while(0)
98 98
99static inline void __read_unlock(raw_rwlock_t *rw) 99static inline void arch_read_unlock(raw_rwlock_t *rw)
100{ 100{
101 register raw_rwlock_t *lp asm("g1"); 101 register raw_rwlock_t *lp asm("g1");
102 lp = rw; 102 lp = rw;
@@ -112,7 +112,7 @@ static inline void __read_unlock(raw_rwlock_t *rw)
112#define __raw_read_unlock(lock) \ 112#define __raw_read_unlock(lock) \
113do { unsigned long flags; \ 113do { unsigned long flags; \
114 local_irq_save(flags); \ 114 local_irq_save(flags); \
115 __read_unlock(lock); \ 115 arch_read_unlock(lock); \
116 local_irq_restore(flags); \ 116 local_irq_restore(flags); \
117} while(0) 117} while(0)
118 118
@@ -150,7 +150,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
150 return (val == 0); 150 return (val == 0);
151} 151}
152 152
153static inline int __read_trylock(raw_rwlock_t *rw) 153static inline int arch_read_trylock(raw_rwlock_t *rw)
154{ 154{
155 register raw_rwlock_t *lp asm("g1"); 155 register raw_rwlock_t *lp asm("g1");
156 register int res asm("o0"); 156 register int res asm("o0");
@@ -169,7 +169,7 @@ static inline int __read_trylock(raw_rwlock_t *rw)
169({ unsigned long flags; \ 169({ unsigned long flags; \
170 int res; \ 170 int res; \
171 local_irq_save(flags); \ 171 local_irq_save(flags); \
172 res = __read_trylock(lock); \ 172 res = arch_read_trylock(lock); \
173 local_irq_restore(flags); \ 173 local_irq_restore(flags); \
174 res; \ 174 res; \
175}) 175})
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index f6b2b92ad8d2..43e514783582 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -92,7 +92,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
92 92
93/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ 93/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
94 94
95static void inline __read_lock(raw_rwlock_t *lock) 95static void inline arch_read_lock(raw_rwlock_t *lock)
96{ 96{
97 unsigned long tmp1, tmp2; 97 unsigned long tmp1, tmp2;
98 98
@@ -115,7 +115,7 @@ static void inline __read_lock(raw_rwlock_t *lock)
115 : "memory"); 115 : "memory");
116} 116}
117 117
118static int inline __read_trylock(raw_rwlock_t *lock) 118static int inline arch_read_trylock(raw_rwlock_t *lock)
119{ 119{
120 int tmp1, tmp2; 120 int tmp1, tmp2;
121 121
@@ -136,7 +136,7 @@ static int inline __read_trylock(raw_rwlock_t *lock)
136 return tmp1; 136 return tmp1;
137} 137}
138 138
139static void inline __read_unlock(raw_rwlock_t *lock) 139static void inline arch_read_unlock(raw_rwlock_t *lock)
140{ 140{
141 unsigned long tmp1, tmp2; 141 unsigned long tmp1, tmp2;
142 142
@@ -152,7 +152,7 @@ static void inline __read_unlock(raw_rwlock_t *lock)
152 : "memory"); 152 : "memory");
153} 153}
154 154
155static void inline __write_lock(raw_rwlock_t *lock) 155static void inline arch_write_lock(raw_rwlock_t *lock)
156{ 156{
157 unsigned long mask, tmp1, tmp2; 157 unsigned long mask, tmp1, tmp2;
158 158
@@ -177,7 +177,7 @@ static void inline __write_lock(raw_rwlock_t *lock)
177 : "memory"); 177 : "memory");
178} 178}
179 179
180static void inline __write_unlock(raw_rwlock_t *lock) 180static void inline arch_write_unlock(raw_rwlock_t *lock)
181{ 181{
182 __asm__ __volatile__( 182 __asm__ __volatile__(
183" stw %%g0, [%0]" 183" stw %%g0, [%0]"
@@ -186,7 +186,7 @@ static void inline __write_unlock(raw_rwlock_t *lock)
186 : "memory"); 186 : "memory");
187} 187}
188 188
189static int inline __write_trylock(raw_rwlock_t *lock) 189static int inline arch_write_trylock(raw_rwlock_t *lock)
190{ 190{
191 unsigned long mask, tmp1, tmp2, result; 191 unsigned long mask, tmp1, tmp2, result;
192 192
@@ -210,14 +210,14 @@ static int inline __write_trylock(raw_rwlock_t *lock)
210 return result; 210 return result;
211} 211}
212 212
213#define __raw_read_lock(p) __read_lock(p) 213#define __raw_read_lock(p) arch_read_lock(p)
214#define __raw_read_lock_flags(p, f) __read_lock(p) 214#define __raw_read_lock_flags(p, f) arch_read_lock(p)
215#define __raw_read_trylock(p) __read_trylock(p) 215#define __raw_read_trylock(p) arch_read_trylock(p)
216#define __raw_read_unlock(p) __read_unlock(p) 216#define __raw_read_unlock(p) arch_read_unlock(p)
217#define __raw_write_lock(p) __write_lock(p) 217#define __raw_write_lock(p) arch_write_lock(p)
218#define __raw_write_lock_flags(p, f) __write_lock(p) 218#define __raw_write_lock_flags(p, f) arch_write_lock(p)
219#define __raw_write_unlock(p) __write_unlock(p) 219#define __raw_write_unlock(p) arch_write_unlock(p)
220#define __raw_write_trylock(p) __write_trylock(p) 220#define __raw_write_trylock(p) arch_write_trylock(p)
221 221
222#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) 222#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
223#define __raw_write_can_lock(rw) (!(rw)->lock) 223#define __raw_write_can_lock(rw) (!(rw)->lock)