aboutsummaryrefslogtreecommitdiffstats
path: root/arch/frv/include/asm
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2009-06-12 03:53:47 -0400
committerArnd Bergmann <arnd@arndb.de>2009-06-12 05:32:58 -0400
commit5b02ee3d219f9e01b6e9146e25613822cfc2e5ce (patch)
tree7ce9126738c3cf4b37d67170d0e4b34818c057a9 /arch/frv/include/asm
parent26a28fa4fea5b8c65713aa50c124f76a88c7924d (diff)
parent8ebf975608aaebd7feb33d77f07ba21a6380e086 (diff)
asm-generic: merge branch 'master' of torvalds/linux-2.6
Fixes a merge conflict against the x86 tree caused by a fix to atomic.h which I renamed to atomic_long.h. Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/frv/include/asm')
-rw-r--r--arch/frv/include/asm/bitops.h29
-rw-r--r--arch/frv/include/asm/elf.h1
-rw-r--r--arch/frv/include/asm/pci.h7
-rw-r--r--arch/frv/include/asm/ptrace.h11
-rw-r--r--arch/frv/include/asm/syscall.h123
-rw-r--r--arch/frv/include/asm/thread_info.h10
6 files changed, 156 insertions, 25 deletions
diff --git a/arch/frv/include/asm/bitops.h b/arch/frv/include/asm/bitops.h
index 287f6f697ce2..50ae91b29674 100644
--- a/arch/frv/include/asm/bitops.h
+++ b/arch/frv/include/asm/bitops.h
@@ -112,7 +112,7 @@ extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsig
112#define atomic_clear_mask(mask, v) atomic_test_and_ANDNOT_mask((mask), (v)) 112#define atomic_clear_mask(mask, v) atomic_test_and_ANDNOT_mask((mask), (v))
113#define atomic_set_mask(mask, v) atomic_test_and_OR_mask((mask), (v)) 113#define atomic_set_mask(mask, v) atomic_test_and_OR_mask((mask), (v))
114 114
115static inline int test_and_clear_bit(int nr, volatile void *addr) 115static inline int test_and_clear_bit(unsigned long nr, volatile void *addr)
116{ 116{
117 volatile unsigned long *ptr = addr; 117 volatile unsigned long *ptr = addr;
118 unsigned long mask = 1UL << (nr & 31); 118 unsigned long mask = 1UL << (nr & 31);
@@ -120,7 +120,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr)
120 return (atomic_test_and_ANDNOT_mask(mask, ptr) & mask) != 0; 120 return (atomic_test_and_ANDNOT_mask(mask, ptr) & mask) != 0;
121} 121}
122 122
123static inline int test_and_set_bit(int nr, volatile void *addr) 123static inline int test_and_set_bit(unsigned long nr, volatile void *addr)
124{ 124{
125 volatile unsigned long *ptr = addr; 125 volatile unsigned long *ptr = addr;
126 unsigned long mask = 1UL << (nr & 31); 126 unsigned long mask = 1UL << (nr & 31);
@@ -128,7 +128,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr)
128 return (atomic_test_and_OR_mask(mask, ptr) & mask) != 0; 128 return (atomic_test_and_OR_mask(mask, ptr) & mask) != 0;
129} 129}
130 130
131static inline int test_and_change_bit(int nr, volatile void *addr) 131static inline int test_and_change_bit(unsigned long nr, volatile void *addr)
132{ 132{
133 volatile unsigned long *ptr = addr; 133 volatile unsigned long *ptr = addr;
134 unsigned long mask = 1UL << (nr & 31); 134 unsigned long mask = 1UL << (nr & 31);
@@ -136,22 +136,22 @@ static inline int test_and_change_bit(int nr, volatile void *addr)
136 return (atomic_test_and_XOR_mask(mask, ptr) & mask) != 0; 136 return (atomic_test_and_XOR_mask(mask, ptr) & mask) != 0;
137} 137}
138 138
139static inline void clear_bit(int nr, volatile void *addr) 139static inline void clear_bit(unsigned long nr, volatile void *addr)
140{ 140{
141 test_and_clear_bit(nr, addr); 141 test_and_clear_bit(nr, addr);
142} 142}
143 143
144static inline void set_bit(int nr, volatile void *addr) 144static inline void set_bit(unsigned long nr, volatile void *addr)
145{ 145{
146 test_and_set_bit(nr, addr); 146 test_and_set_bit(nr, addr);
147} 147}
148 148
149static inline void change_bit(int nr, volatile void * addr) 149static inline void change_bit(unsigned long nr, volatile void *addr)
150{ 150{
151 test_and_change_bit(nr, addr); 151 test_and_change_bit(nr, addr);
152} 152}
153 153
154static inline void __clear_bit(int nr, volatile void * addr) 154static inline void __clear_bit(unsigned long nr, volatile void *addr)
155{ 155{
156 volatile unsigned long *a = addr; 156 volatile unsigned long *a = addr;
157 int mask; 157 int mask;
@@ -161,7 +161,7 @@ static inline void __clear_bit(int nr, volatile void * addr)
161 *a &= ~mask; 161 *a &= ~mask;
162} 162}
163 163
164static inline void __set_bit(int nr, volatile void * addr) 164static inline void __set_bit(unsigned long nr, volatile void *addr)
165{ 165{
166 volatile unsigned long *a = addr; 166 volatile unsigned long *a = addr;
167 int mask; 167 int mask;
@@ -171,7 +171,7 @@ static inline void __set_bit(int nr, volatile void * addr)
171 *a |= mask; 171 *a |= mask;
172} 172}
173 173
174static inline void __change_bit(int nr, volatile void *addr) 174static inline void __change_bit(unsigned long nr, volatile void *addr)
175{ 175{
176 volatile unsigned long *a = addr; 176 volatile unsigned long *a = addr;
177 int mask; 177 int mask;
@@ -181,7 +181,7 @@ static inline void __change_bit(int nr, volatile void *addr)
181 *a ^= mask; 181 *a ^= mask;
182} 182}
183 183
184static inline int __test_and_clear_bit(int nr, volatile void * addr) 184static inline int __test_and_clear_bit(unsigned long nr, volatile void *addr)
185{ 185{
186 volatile unsigned long *a = addr; 186 volatile unsigned long *a = addr;
187 int mask, retval; 187 int mask, retval;
@@ -193,7 +193,7 @@ static inline int __test_and_clear_bit(int nr, volatile void * addr)
193 return retval; 193 return retval;
194} 194}
195 195
196static inline int __test_and_set_bit(int nr, volatile void * addr) 196static inline int __test_and_set_bit(unsigned long nr, volatile void *addr)
197{ 197{
198 volatile unsigned long *a = addr; 198 volatile unsigned long *a = addr;
199 int mask, retval; 199 int mask, retval;
@@ -205,7 +205,7 @@ static inline int __test_and_set_bit(int nr, volatile void * addr)
205 return retval; 205 return retval;
206} 206}
207 207
208static inline int __test_and_change_bit(int nr, volatile void * addr) 208static inline int __test_and_change_bit(unsigned long nr, volatile void *addr)
209{ 209{
210 volatile unsigned long *a = addr; 210 volatile unsigned long *a = addr;
211 int mask, retval; 211 int mask, retval;
@@ -220,12 +220,13 @@ static inline int __test_and_change_bit(int nr, volatile void * addr)
220/* 220/*
221 * This routine doesn't need to be atomic. 221 * This routine doesn't need to be atomic.
222 */ 222 */
223static inline int __constant_test_bit(int nr, const volatile void * addr) 223static inline int
224__constant_test_bit(unsigned long nr, const volatile void *addr)
224{ 225{
225 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; 226 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
226} 227}
227 228
228static inline int __test_bit(int nr, const volatile void * addr) 229static inline int __test_bit(unsigned long nr, const volatile void *addr)
229{ 230{
230 int * a = (int *) addr; 231 int * a = (int *) addr;
231 int mask; 232 int mask;
diff --git a/arch/frv/include/asm/elf.h b/arch/frv/include/asm/elf.h
index 7279ec07d62e..7bbf6e47f8c8 100644
--- a/arch/frv/include/asm/elf.h
+++ b/arch/frv/include/asm/elf.h
@@ -116,6 +116,7 @@ do { \
116} while(0) 116} while(0)
117 117
118#define USE_ELF_CORE_DUMP 118#define USE_ELF_CORE_DUMP
119#define CORE_DUMP_USE_REGSET
119#define ELF_FDPIC_CORE_EFLAGS EF_FRV_FDPIC 120#define ELF_FDPIC_CORE_EFLAGS EF_FRV_FDPIC
120#define ELF_EXEC_PAGESIZE 16384 121#define ELF_EXEC_PAGESIZE 16384
121 122
diff --git a/arch/frv/include/asm/pci.h b/arch/frv/include/asm/pci.h
index 3ce227ba7744..492b5c4dfed6 100644
--- a/arch/frv/include/asm/pci.h
+++ b/arch/frv/include/asm/pci.h
@@ -81,8 +81,7 @@ static inline void pci_dma_sync_single(struct pci_dev *hwdev,
81 dma_addr_t dma_handle, 81 dma_addr_t dma_handle,
82 size_t size, int direction) 82 size_t size, int direction)
83{ 83{
84 if (direction == PCI_DMA_NONE) 84 BUG_ON(direction == PCI_DMA_NONE);
85 BUG();
86 85
87 frv_cache_wback_inv((unsigned long)bus_to_virt(dma_handle), 86 frv_cache_wback_inv((unsigned long)bus_to_virt(dma_handle),
88 (unsigned long)bus_to_virt(dma_handle) + size); 87 (unsigned long)bus_to_virt(dma_handle) + size);
@@ -99,9 +98,7 @@ static inline void pci_dma_sync_sg(struct pci_dev *hwdev,
99 int nelems, int direction) 98 int nelems, int direction)
100{ 99{
101 int i; 100 int i;
102 101 BUG_ON(direction == PCI_DMA_NONE);
103 if (direction == PCI_DMA_NONE)
104 BUG();
105 102
106 for (i = 0; i < nelems; i++) 103 for (i = 0; i < nelems; i++)
107 frv_cache_wback_inv(sg_dma_address(&sg[i]), 104 frv_cache_wback_inv(sg_dma_address(&sg[i]),
diff --git a/arch/frv/include/asm/ptrace.h b/arch/frv/include/asm/ptrace.h
index cf6934012b64..a54b535c9e49 100644
--- a/arch/frv/include/asm/ptrace.h
+++ b/arch/frv/include/asm/ptrace.h
@@ -65,6 +65,8 @@
65#ifdef __KERNEL__ 65#ifdef __KERNEL__
66#ifndef __ASSEMBLY__ 66#ifndef __ASSEMBLY__
67 67
68struct task_struct;
69
68/* 70/*
69 * we dedicate GR28 to keeping a pointer to the current exception frame 71 * we dedicate GR28 to keeping a pointer to the current exception frame
70 * - gr28 is destroyed on entry to the kernel from userspace 72 * - gr28 is destroyed on entry to the kernel from userspace
@@ -73,11 +75,18 @@ register struct pt_regs *__frame asm("gr28");
73 75
74#define user_mode(regs) (!((regs)->psr & PSR_S)) 76#define user_mode(regs) (!((regs)->psr & PSR_S))
75#define instruction_pointer(regs) ((regs)->pc) 77#define instruction_pointer(regs) ((regs)->pc)
78#define user_stack_pointer(regs) ((regs)->sp)
76 79
77extern unsigned long user_stack(const struct pt_regs *); 80extern unsigned long user_stack(const struct pt_regs *);
78extern void show_regs(struct pt_regs *); 81extern void show_regs(struct pt_regs *);
79#define profile_pc(regs) ((regs)->pc) 82#define profile_pc(regs) ((regs)->pc)
80#endif 83
84#define task_pt_regs(task) ((task)->thread.frame0)
85
86#define arch_has_single_step() (1)
87extern void user_enable_single_step(struct task_struct *);
88extern void user_disable_single_step(struct task_struct *);
81 89
82#endif /* !__ASSEMBLY__ */ 90#endif /* !__ASSEMBLY__ */
91#endif /* __KERNEL__ */
83#endif /* _ASM_PTRACE_H */ 92#endif /* _ASM_PTRACE_H */
diff --git a/arch/frv/include/asm/syscall.h b/arch/frv/include/asm/syscall.h
new file mode 100644
index 000000000000..70689eb29b98
--- /dev/null
+++ b/arch/frv/include/asm/syscall.h
@@ -0,0 +1,123 @@
1/* syscall parameter access functions
2 *
3 * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#ifndef _ASM_SYSCALL_H
13#define _ASM_SYSCALL_H
14
15#include <linux/err.h>
16#include <asm/ptrace.h>
17
18/*
19 * Get the system call number or -1
20 */
21static inline long syscall_get_nr(struct task_struct *task,
22 struct pt_regs *regs)
23{
24 return regs->syscallno;
25}
26
27/*
28 * Restore the clobbered GR8 register
29 * (1st syscall arg was overwritten with syscall return or error)
30 */
31static inline void syscall_rollback(struct task_struct *task,
32 struct pt_regs *regs)
33{
34 regs->gr8 = regs->orig_gr8;
35}
36
37/*
38 * See if the syscall return value is an error, returning it if it is and 0 if
39 * not
40 */
41static inline long syscall_get_error(struct task_struct *task,
42 struct pt_regs *regs)
43{
44 return IS_ERR_VALUE(regs->gr8) ? regs->gr8 : 0;
45}
46
47/*
48 * Get the syscall return value
49 */
50static inline long syscall_get_return_value(struct task_struct *task,
51 struct pt_regs *regs)
52{
53 return regs->gr8;
54}
55
56/*
57 * Set the syscall return value
58 */
59static inline void syscall_set_return_value(struct task_struct *task,
60 struct pt_regs *regs,
61 int error, long val)
62{
63 if (error)
64 regs->gr8 = -error;
65 else
66 regs->gr8 = val;
67}
68
69/*
70 * Retrieve the system call arguments
71 */
72static inline void syscall_get_arguments(struct task_struct *task,
73 struct pt_regs *regs,
74 unsigned int i, unsigned int n,
75 unsigned long *args)
76{
77 /*
78 * Do this simply for now. If we need to start supporting
79 * fetching arguments from arbitrary indices, this will need some
80 * extra logic. Presently there are no in-tree users that depend
81 * on this behaviour.
82 */
83 BUG_ON(i);
84
85 /* Argument pattern is: GR8, GR9, GR10, GR11, GR12, GR13 */
86 switch (n) {
87 case 6: args[5] = regs->gr13;
88 case 5: args[4] = regs->gr12;
89 case 4: args[3] = regs->gr11;
90 case 3: args[2] = regs->gr10;
91 case 2: args[1] = regs->gr9;
92 case 1: args[0] = regs->gr8;
93 break;
94 default:
95 BUG();
96 }
97}
98
99/*
100 * Alter the system call arguments
101 */
102static inline void syscall_set_arguments(struct task_struct *task,
103 struct pt_regs *regs,
104 unsigned int i, unsigned int n,
105 const unsigned long *args)
106{
107 /* Same note as above applies */
108 BUG_ON(i);
109
110 switch (n) {
111 case 6: regs->gr13 = args[5];
112 case 5: regs->gr12 = args[4];
113 case 4: regs->gr11 = args[3];
114 case 3: regs->gr10 = args[2];
115 case 2: regs->gr9 = args[1];
116 case 1: regs->gr8 = args[0];
117 break;
118 default:
119 BUG();
120 }
121}
122
123#endif /* _ASM_SYSCALL_H */
diff --git a/arch/frv/include/asm/thread_info.h b/arch/frv/include/asm/thread_info.h
index bb53ab753ffb..e8a5ed7be021 100644
--- a/arch/frv/include/asm/thread_info.h
+++ b/arch/frv/include/asm/thread_info.h
@@ -109,20 +109,20 @@ register struct thread_info *__current_thread_info asm("gr15");
109 * - other flags in MSW 109 * - other flags in MSW
110 */ 110 */
111#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ 111#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
112#define TIF_SIGPENDING 1 /* signal pending */ 112#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
113#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ 113#define TIF_SIGPENDING 2 /* signal pending */
114#define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */ 114#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
115#define TIF_IRET 4 /* return with iret */ 115#define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */
116#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ 116#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
117#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 117#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
118#define TIF_MEMDIE 17 /* OOM killer killed process */ 118#define TIF_MEMDIE 17 /* OOM killer killed process */
119#define TIF_FREEZE 18 /* freezing for suspend */ 119#define TIF_FREEZE 18 /* freezing for suspend */
120 120
121#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 121#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
122#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
122#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 123#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
123#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 124#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
124#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) 125#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
125#define _TIF_IRET (1 << TIF_IRET)
126#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) 126#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
127#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 127#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
128#define _TIF_FREEZE (1 << TIF_FREEZE) 128#define _TIF_FREEZE (1 << TIF_FREEZE)