aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-08-12 21:33:56 -0400
committerDavid S. Miller <davem@davemloft.net>2008-08-12 21:33:56 -0400
commit4f70f7a91bffdcc39f088748dc678953eb9a3fbd (patch)
tree934591a9518fbed87c14b758a1744cc30c9dfbb8
parente34456825de0d3ac4c4e8fe0bdc6b599404ea06f (diff)
sparc64: Implement IRQ stacks.
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc/include/asm/irq_64.h4
-rw-r--r--arch/sparc64/kernel/irq.c52
-rw-r--r--arch/sparc64/kernel/kstack.h58
-rw-r--r--arch/sparc64/kernel/process.c27
-rw-r--r--arch/sparc64/kernel/stacktrace.c10
-rw-r--r--arch/sparc64/kernel/traps.c7
-rw-r--r--arch/sparc64/lib/mcount.S22
-rw-r--r--arch/sparc64/mm/init.c11
8 files changed, 161 insertions, 30 deletions
diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h
index 3473e25231d9..e3dd9303643d 100644
--- a/arch/sparc/include/asm/irq_64.h
+++ b/arch/sparc/include/asm/irq_64.h
@@ -93,4 +93,8 @@ static inline unsigned long get_softint(void)
93void __trigger_all_cpu_backtrace(void); 93void __trigger_all_cpu_backtrace(void);
94#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() 94#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
95 95
96extern void *hardirq_stack[NR_CPUS];
97extern void *softirq_stack[NR_CPUS];
98#define __ARCH_HAS_DO_SOFTIRQ
99
96#endif 100#endif
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index ba43d85e8dde..9b6689d9d570 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -682,10 +682,32 @@ void ack_bad_irq(unsigned int virt_irq)
682 ino, virt_irq); 682 ino, virt_irq);
683} 683}
684 684
685void *hardirq_stack[NR_CPUS];
686void *softirq_stack[NR_CPUS];
687
688static __attribute__((always_inline)) void *set_hardirq_stack(void)
689{
690 void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
691
692 __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
693 if (orig_sp < sp ||
694 orig_sp > (sp + THREAD_SIZE)) {
695 sp += THREAD_SIZE - 192 - STACK_BIAS;
696 __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
697 }
698
699 return orig_sp;
700}
701static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
702{
703 __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
704}
705
685void handler_irq(int irq, struct pt_regs *regs) 706void handler_irq(int irq, struct pt_regs *regs)
686{ 707{
687 unsigned long pstate, bucket_pa; 708 unsigned long pstate, bucket_pa;
688 struct pt_regs *old_regs; 709 struct pt_regs *old_regs;
710 void *orig_sp;
689 711
690 clear_softint(1 << irq); 712 clear_softint(1 << irq);
691 713
@@ -703,6 +725,8 @@ void handler_irq(int irq, struct pt_regs *regs)
703 "i" (PSTATE_IE) 725 "i" (PSTATE_IE)
704 : "memory"); 726 : "memory");
705 727
728 orig_sp = set_hardirq_stack();
729
706 while (bucket_pa) { 730 while (bucket_pa) {
707 struct irq_desc *desc; 731 struct irq_desc *desc;
708 unsigned long next_pa; 732 unsigned long next_pa;
@@ -719,10 +743,38 @@ void handler_irq(int irq, struct pt_regs *regs)
719 bucket_pa = next_pa; 743 bucket_pa = next_pa;
720 } 744 }
721 745
746 restore_hardirq_stack(orig_sp);
747
722 irq_exit(); 748 irq_exit();
723 set_irq_regs(old_regs); 749 set_irq_regs(old_regs);
724} 750}
725 751
752void do_softirq(void)
753{
754 unsigned long flags;
755
756 if (in_interrupt())
757 return;
758
759 local_irq_save(flags);
760
761 if (local_softirq_pending()) {
762 void *orig_sp, *sp = softirq_stack[smp_processor_id()];
763
764 sp += THREAD_SIZE - 192 - STACK_BIAS;
765
766 __asm__ __volatile__("mov %%sp, %0\n\t"
767 "mov %1, %%sp"
768 : "=&r" (orig_sp)
769 : "r" (sp));
770 __do_softirq();
771 __asm__ __volatile__("mov %0, %%sp"
772 : : "r" (orig_sp));
773 }
774
775 local_irq_restore(flags);
776}
777
726#ifdef CONFIG_HOTPLUG_CPU 778#ifdef CONFIG_HOTPLUG_CPU
727void fixup_irqs(void) 779void fixup_irqs(void)
728{ 780{
diff --git a/arch/sparc64/kernel/kstack.h b/arch/sparc64/kernel/kstack.h
new file mode 100644
index 000000000000..43909d5680ea
--- /dev/null
+++ b/arch/sparc64/kernel/kstack.h
@@ -0,0 +1,58 @@
1#ifndef _KSTACK_H
2#define _KSTACK_H
3
4#include <linux/thread_info.h>
5#include <linux/sched.h>
6#include <asm/ptrace.h>
7#include <asm/irq.h>
8
9/* SP must be STACK_BIAS adjusted already. */
10static inline bool kstack_valid(struct thread_info *tp, unsigned long sp)
11{
12 unsigned long base = (unsigned long) tp;
13
14 if (sp >= (base + sizeof(struct thread_info)) &&
15 sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
16 return true;
17
18 base = (unsigned long) hardirq_stack[tp->cpu];
19 if (sp >= base &&
20 sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
21 return true;
22 base = (unsigned long) softirq_stack[tp->cpu];
23 if (sp >= base &&
24 sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
25 return true;
26
27 return false;
28}
29
30/* Does "regs" point to a valid pt_regs trap frame? */
31static inline bool kstack_is_trap_frame(struct thread_info *tp, struct pt_regs *regs)
32{
33 unsigned long base = (unsigned long) tp;
34 unsigned long addr = (unsigned long) regs;
35
36 if (addr >= base &&
37 addr <= (base + THREAD_SIZE - sizeof(*regs)))
38 goto check_magic;
39
40 base = (unsigned long) hardirq_stack[tp->cpu];
41 if (addr >= base &&
42 addr <= (base + THREAD_SIZE - sizeof(*regs)))
43 goto check_magic;
44 base = (unsigned long) softirq_stack[tp->cpu];
45 if (addr >= base &&
46 addr <= (base + THREAD_SIZE - sizeof(*regs)))
47 goto check_magic;
48
49 return false;
50
51check_magic:
52 if ((regs->magic & ~0x1ff) == PT_REGS_MAGIC)
53 return true;
54 return false;
55
56}
57
58#endif /* _KSTACK_H */
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index 7f5debdc5fed..15f4178592e7 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -52,6 +52,8 @@
52#include <asm/irq_regs.h> 52#include <asm/irq_regs.h>
53#include <asm/smp.h> 53#include <asm/smp.h>
54 54
55#include "kstack.h"
56
55static void sparc64_yield(int cpu) 57static void sparc64_yield(int cpu)
56{ 58{
57 if (tlb_type != hypervisor) 59 if (tlb_type != hypervisor)
@@ -235,19 +237,6 @@ void show_regs(struct pt_regs *regs)
235struct global_reg_snapshot global_reg_snapshot[NR_CPUS]; 237struct global_reg_snapshot global_reg_snapshot[NR_CPUS];
236static DEFINE_SPINLOCK(global_reg_snapshot_lock); 238static DEFINE_SPINLOCK(global_reg_snapshot_lock);
237 239
238static bool kstack_valid(struct thread_info *tp, struct reg_window *rw)
239{
240 unsigned long thread_base, fp;
241
242 thread_base = (unsigned long) tp;
243 fp = (unsigned long) rw;
244
245 if (fp < (thread_base + sizeof(struct thread_info)) ||
246 fp >= (thread_base + THREAD_SIZE))
247 return false;
248 return true;
249}
250
251static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs, 240static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs,
252 int this_cpu) 241 int this_cpu)
253{ 242{
@@ -264,11 +253,11 @@ static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs,
264 253
265 rw = (struct reg_window *) 254 rw = (struct reg_window *)
266 (regs->u_regs[UREG_FP] + STACK_BIAS); 255 (regs->u_regs[UREG_FP] + STACK_BIAS);
267 if (kstack_valid(tp, rw)) { 256 if (kstack_valid(tp, (unsigned long) rw)) {
268 global_reg_snapshot[this_cpu].i7 = rw->ins[7]; 257 global_reg_snapshot[this_cpu].i7 = rw->ins[7];
269 rw = (struct reg_window *) 258 rw = (struct reg_window *)
270 (rw->ins[6] + STACK_BIAS); 259 (rw->ins[6] + STACK_BIAS);
271 if (kstack_valid(tp, rw)) 260 if (kstack_valid(tp, (unsigned long) rw))
272 global_reg_snapshot[this_cpu].rpc = rw->ins[7]; 261 global_reg_snapshot[this_cpu].rpc = rw->ins[7];
273 } 262 }
274 } else { 263 } else {
@@ -828,7 +817,7 @@ out:
828unsigned long get_wchan(struct task_struct *task) 817unsigned long get_wchan(struct task_struct *task)
829{ 818{
830 unsigned long pc, fp, bias = 0; 819 unsigned long pc, fp, bias = 0;
831 unsigned long thread_info_base; 820 struct thread_info *tp;
832 struct reg_window *rw; 821 struct reg_window *rw;
833 unsigned long ret = 0; 822 unsigned long ret = 0;
834 int count = 0; 823 int count = 0;
@@ -837,14 +826,12 @@ unsigned long get_wchan(struct task_struct *task)
837 task->state == TASK_RUNNING) 826 task->state == TASK_RUNNING)
838 goto out; 827 goto out;
839 828
840 thread_info_base = (unsigned long) task_stack_page(task); 829 tp = task_thread_info(task);
841 bias = STACK_BIAS; 830 bias = STACK_BIAS;
842 fp = task_thread_info(task)->ksp + bias; 831 fp = task_thread_info(task)->ksp + bias;
843 832
844 do { 833 do {
845 /* Bogus frame pointer? */ 834 if (!kstack_valid(tp, fp))
846 if (fp < (thread_info_base + sizeof(struct thread_info)) ||
847 fp >= (thread_info_base + THREAD_SIZE))
848 break; 835 break;
849 rw = (struct reg_window *) fp; 836 rw = (struct reg_window *) fp;
850 pc = rw->ins[7]; 837 pc = rw->ins[7];
diff --git a/arch/sparc64/kernel/stacktrace.c b/arch/sparc64/kernel/stacktrace.c
index e9d7f0660f2e..237e7f8a40ac 100644
--- a/arch/sparc64/kernel/stacktrace.c
+++ b/arch/sparc64/kernel/stacktrace.c
@@ -5,6 +5,8 @@
5#include <asm/ptrace.h> 5#include <asm/ptrace.h>
6#include <asm/stacktrace.h> 6#include <asm/stacktrace.h>
7 7
8#include "kstack.h"
9
8void save_stack_trace(struct stack_trace *trace) 10void save_stack_trace(struct stack_trace *trace)
9{ 11{
10 unsigned long ksp, fp, thread_base; 12 unsigned long ksp, fp, thread_base;
@@ -24,17 +26,13 @@ void save_stack_trace(struct stack_trace *trace)
24 struct pt_regs *regs; 26 struct pt_regs *regs;
25 unsigned long pc; 27 unsigned long pc;
26 28
27 /* Bogus frame pointer? */ 29 if (!kstack_valid(tp, fp))
28 if (fp < (thread_base + sizeof(struct thread_info)) ||
29 fp > (thread_base + THREAD_SIZE - sizeof(struct sparc_stackf)))
30 break; 30 break;
31 31
32 sf = (struct sparc_stackf *) fp; 32 sf = (struct sparc_stackf *) fp;
33 regs = (struct pt_regs *) (sf + 1); 33 regs = (struct pt_regs *) (sf + 1);
34 34
35 if (((unsigned long)regs <= 35 if (kstack_is_trap_frame(tp, regs)) {
36 (thread_base + THREAD_SIZE - sizeof(*regs))) &&
37 (regs->magic & ~0x1ff) == PT_REGS_MAGIC) {
38 if (!(regs->tstate & TSTATE_PRIV)) 36 if (!(regs->tstate & TSTATE_PRIV))
39 break; 37 break;
40 pc = regs->tpc; 38 pc = regs->tpc;
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 404e8561e2d0..3d924121c796 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -39,6 +39,7 @@
39#include <asm/prom.h> 39#include <asm/prom.h>
40 40
41#include "entry.h" 41#include "entry.h"
42#include "kstack.h"
42 43
43/* When an irrecoverable trap occurs at tl > 0, the trap entry 44/* When an irrecoverable trap occurs at tl > 0, the trap entry
44 * code logs the trap state registers at every level in the trap 45 * code logs the trap state registers at every level in the trap
@@ -2115,14 +2116,12 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
2115 struct pt_regs *regs; 2116 struct pt_regs *regs;
2116 unsigned long pc; 2117 unsigned long pc;
2117 2118
2118 /* Bogus frame pointer? */ 2119 if (!kstack_valid(tp, fp))
2119 if (fp < (thread_base + sizeof(struct thread_info)) ||
2120 fp >= (thread_base + THREAD_SIZE))
2121 break; 2120 break;
2122 sf = (struct sparc_stackf *) fp; 2121 sf = (struct sparc_stackf *) fp;
2123 regs = (struct pt_regs *) (sf + 1); 2122 regs = (struct pt_regs *) (sf + 1);
2124 2123
2125 if ((regs->magic & ~0x1ff) == PT_REGS_MAGIC) { 2124 if (kstack_is_trap_frame(tp, regs)) {
2126 if (!(regs->tstate & TSTATE_PRIV)) 2125 if (!(regs->tstate & TSTATE_PRIV))
2127 break; 2126 break;
2128 pc = regs->tpc; 2127 pc = regs->tpc;
diff --git a/arch/sparc64/lib/mcount.S b/arch/sparc64/lib/mcount.S
index 734caf0cec09..fad90ddb3a28 100644
--- a/arch/sparc64/lib/mcount.S
+++ b/arch/sparc64/lib/mcount.S
@@ -49,6 +49,28 @@ mcount:
49 cmp %sp, %g3 49 cmp %sp, %g3
50 bg,pt %xcc, 1f 50 bg,pt %xcc, 1f
51 nop 51 nop
52 lduh [%g6 + TI_CPU], %g1
53 sethi %hi(hardirq_stack), %g3
54 or %g3, %lo(hardirq_stack), %g3
55 sllx %g1, 3, %g1
56 ldx [%g3 + %g1], %g7
57 sub %g7, STACK_BIAS, %g7
58 cmp %sp, %g7
59 bleu,pt %xcc, 2f
60 sethi %hi(THREAD_SIZE), %g3
61 add %g7, %g3, %g7
62 cmp %sp, %g7
63 blu,pn %xcc, 1f
642: sethi %hi(softirq_stack), %g3
65 or %g3, %lo(softirq_stack), %g3
66 ldx [%g3 + %g1], %g7
67 cmp %sp, %g7
68 bleu,pt %xcc, 2f
69 sethi %hi(THREAD_SIZE), %g3
70 add %g7, %g3, %g7
71 cmp %sp, %g7
72 blu,pn %xcc, 1f
73 nop
52 /* If we are already on ovstack, don't hop onto it 74 /* If we are already on ovstack, don't hop onto it
53 * again, we are already trying to output the stack overflow 75 * again, we are already trying to output the stack overflow
54 * message. 76 * message.
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 4e821b3ecb03..217de3ea29e8 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -49,6 +49,7 @@
49#include <asm/sstate.h> 49#include <asm/sstate.h>
50#include <asm/mdesc.h> 50#include <asm/mdesc.h>
51#include <asm/cpudata.h> 51#include <asm/cpudata.h>
52#include <asm/irq.h>
52 53
53#define MAX_PHYS_ADDRESS (1UL << 42UL) 54#define MAX_PHYS_ADDRESS (1UL << 42UL)
54#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL) 55#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
@@ -1771,6 +1772,16 @@ void __init paging_init(void)
1771 if (tlb_type == hypervisor) 1772 if (tlb_type == hypervisor)
1772 sun4v_mdesc_init(); 1773 sun4v_mdesc_init();
1773 1774
1775 /* Once the OF device tree and MDESC have been setup, we know
1776 * the list of possible cpus. Therefore we can allocate the
1777 * IRQ stacks.
1778 */
1779 for_each_possible_cpu(i) {
1780 /* XXX Use node local allocations... XXX */
1781 softirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
1782 hardirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
1783 }
1784
1774 /* Setup bootmem... */ 1785 /* Setup bootmem... */
1775 last_valid_pfn = end_pfn = bootmem_init(phys_base); 1786 last_valid_pfn = end_pfn = bootmem_init(phys_base);
1776 1787
n class="hl opt">, &tb->owners) { if (sk != sk2 && !tcp_v6_ipv6only(sk2) && (!sk->sk_bound_dev_if || !sk2->sk_bound_dev_if || sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { if (!reuse || !sk2->sk_reuse || sk2->sk_state == TCP_LISTEN) { const u32 sk2_rcv_saddr = tcp_v4_rcv_saddr(sk2); if (!sk2_rcv_saddr || !sk_rcv_saddr || sk2_rcv_saddr == sk_rcv_saddr) break; } } } return node != NULL; } /* Obtain a reference to a local port for the given sock, * if snum is zero it means select any available local port. */ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) { struct tcp_bind_hashbucket *head; struct hlist_node *node; struct tcp_bind_bucket *tb; int ret; local_bh_disable(); if (!snum) { int low = sysctl_local_port_range[0]; int high = sysctl_local_port_range[1]; int remaining = (high - low) + 1; int rover; spin_lock(&tcp_portalloc_lock); rover = tcp_port_rover; do { rover++; if (rover < low || rover > high) rover = low; head = &tcp_bhash[tcp_bhashfn(rover)]; spin_lock(&head->lock); tb_for_each(tb, node, &head->chain) if (tb->port == rover) goto next; break; next: spin_unlock(&head->lock); } while (--remaining > 0); tcp_port_rover = rover; spin_unlock(&tcp_portalloc_lock); /* Exhausted local port range during search? */ ret = 1; if (remaining <= 0) goto fail; /* OK, here is the one we will use. HEAD is * non-NULL and we hold it's mutex. */ snum = rover; } else { head = &tcp_bhash[tcp_bhashfn(snum)]; spin_lock(&head->lock); tb_for_each(tb, node, &head->chain) if (tb->port == snum) goto tb_found; } tb = NULL; goto tb_not_found; tb_found: if (!hlist_empty(&tb->owners)) { if (sk->sk_reuse > 1) goto success; if (tb->fastreuse > 0 && sk->sk_reuse && sk->sk_state != TCP_LISTEN) { goto success; } else { ret = 1; if (tcp_bind_conflict(sk, tb)) goto fail_unlock; } } tb_not_found: ret = 1; if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL) goto fail_unlock; if (hlist_empty(&tb->owners)) { if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) tb->fastreuse = 1; else tb->fastreuse = 0; } else if (tb->fastreuse && (!sk->sk_reuse || sk->sk_state == TCP_LISTEN)) tb->fastreuse = 0; success: if (!tcp_sk(sk)->bind_hash) tcp_bind_hash(sk, tb, snum); BUG_TRAP(tcp_sk(sk)->bind_hash == tb); ret = 0; fail_unlock: spin_unlock(&head->lock); fail: local_bh_enable(); return ret; } /* Get rid of any references to a local port held by the * given sock. */ static void __tcp_put_port(struct sock *sk) { struct inet_sock *inet = inet_sk(sk); struct tcp_bind_hashbucket *head = &tcp_bhash[tcp_bhashfn(inet->num)]; struct tcp_bind_bucket *tb; spin_lock(&head->lock); tb = tcp_sk(sk)->bind_hash; __sk_del_bind_node(sk); tcp_sk(sk)->bind_hash = NULL; inet->num = 0; tcp_bucket_destroy(tb); spin_unlock(&head->lock); } void tcp_put_port(struct sock *sk) { local_bh_disable(); __tcp_put_port(sk); local_bh_enable(); } /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP. * Look, when several writers sleep and reader wakes them up, all but one * immediately hit write lock and grab all the cpus. Exclusive sleep solves * this, _but_ remember, it adds useless work on UP machines (wake up each * exclusive lock release). It should be ifdefed really. */ void tcp_listen_wlock(void) { write_lock(&tcp_lhash_lock); if (atomic_read(&tcp_lhash_users)) { DEFINE_WAIT(wait); for (;;) { prepare_to_wait_exclusive(&tcp_lhash_wait, &wait, TASK_UNINTERRUPTIBLE); if (!atomic_read(&tcp_lhash_users)) break; write_unlock_bh(&tcp_lhash_lock); schedule(); write_lock_bh(&tcp_lhash_lock); } finish_wait(&tcp_lhash_wait, &wait); } } static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible) { struct hlist_head *list; rwlock_t *lock; BUG_TRAP(sk_unhashed(sk)); if (listen_possible && sk->sk_state == TCP_LISTEN) { list = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)]; lock = &tcp_lhash_lock; tcp_listen_wlock(); } else { list = &tcp_ehash[(sk->sk_hashent = tcp_sk_hashfn(sk))].chain; lock = &tcp_ehash[sk->sk_hashent].lock; write_lock(lock); } __sk_add_node(sk, list); sock_prot_inc_use(sk->sk_prot); write_unlock(lock); if (listen_possible && sk->sk_state == TCP_LISTEN) wake_up(&tcp_lhash_wait); } static void tcp_v4_hash(struct sock *sk) { if (sk->sk_state != TCP_CLOSE) { local_bh_disable(); __tcp_v4_hash(sk, 1); local_bh_enable(); } } void tcp_unhash(struct sock *sk) { rwlock_t *lock; if (sk_unhashed(sk)) goto ende; if (sk->sk_state == TCP_LISTEN) { local_bh_disable(); tcp_listen_wlock(); lock = &tcp_lhash_lock; } else { struct tcp_ehash_bucket *head = &tcp_ehash[sk->sk_hashent]; lock = &head->lock; write_lock_bh(&head->lock); } if (__sk_del_node_init(sk)) sock_prot_dec_use(sk->sk_prot); write_unlock_bh(lock); ende: if (sk->sk_state == TCP_LISTEN) wake_up(&tcp_lhash_wait); } /* Don't inline this cruft. Here are some nice properties to * exploit here. The BSD API does not allow a listening TCP * to specify the remote port nor the remote address for the * connection. So always assume those are both wildcarded * during the search since they can never be otherwise. */ static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head, u32 daddr, unsigned short hnum, int dif) { struct sock *result = NULL, *sk; struct hlist_node *node; int score, hiscore; hiscore=-1; sk_for_each(sk, node, head) { struct inet_sock *inet = inet_sk(sk); if (inet->num == hnum && !ipv6_only_sock(sk)) { __u32 rcv_saddr = inet->rcv_saddr; score = (sk->sk_family == PF_INET ? 1 : 0); if (rcv_saddr) { if (rcv_saddr != daddr) continue; score+=2; } if (sk->sk_bound_dev_if) { if (sk->sk_bound_dev_if != dif) continue; score+=2; } if (score == 5) return sk; if (score > hiscore) { hiscore = score; result = sk; } } } return result; } /* Optimize the common listener case. */ static inline struct sock *tcp_v4_lookup_listener(u32 daddr, unsigned short hnum, int dif) { struct sock *sk = NULL; struct hlist_head *head; read_lock(&tcp_lhash_lock); head = &tcp_listening_hash[tcp_lhashfn(hnum)]; if (!hlist_empty(head)) { struct inet_sock *inet = inet_sk((sk = __sk_head(head))); if (inet->num == hnum && !sk->sk_node.next && (!inet->rcv_saddr || inet->rcv_saddr == daddr) && (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) && !sk->sk_bound_dev_if) goto sherry_cache; sk = __tcp_v4_lookup_listener(head, daddr, hnum, dif); } if (sk) { sherry_cache: sock_hold(sk); } read_unlock(&tcp_lhash_lock); return sk; } /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM * * Local BH must be disabled here. */ static inline struct sock *__tcp_v4_lookup_established(u32 saddr, u16 sport, u32 daddr, u16 hnum, int dif) { struct tcp_ehash_bucket *head; TCP_V4_ADDR_COOKIE(acookie, saddr, daddr) __u32 ports = TCP_COMBINED_PORTS(sport, hnum); struct sock *sk; struct hlist_node *node; /* Optimize here for direct hit, only listening connections can * have wildcards anyways. */ int hash = tcp_hashfn(daddr, hnum, saddr, sport); head = &tcp_ehash[hash]; read_lock(&head->lock); sk_for_each(sk, node, &head->chain) { if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif)) goto hit; /* You sunk my battleship! */ } /* Must check for a TIME_WAIT'er before going to listener hash. */ sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) { if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif)) goto hit; } sk = NULL; out: read_unlock(&head->lock); return sk; hit: sock_hold(sk); goto out; } static inline struct sock *__tcp_v4_lookup(u32 saddr, u16 sport, u32 daddr, u16 hnum, int dif) { struct sock *sk = __tcp_v4_lookup_established(saddr, sport, daddr, hnum, dif); return sk ? : tcp_v4_lookup_listener(daddr, hnum, dif); } inline struct sock *tcp_v4_lookup(u32 saddr, u16 sport, u32 daddr, u16 dport, int dif) { struct sock *sk; local_bh_disable(); sk = __tcp_v4_lookup(saddr, sport, daddr, ntohs(dport), dif); local_bh_enable(); return sk; } EXPORT_SYMBOL_GPL(tcp_v4_lookup); static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb) { return secure_tcp_sequence_number(skb->nh.iph->daddr, skb->nh.iph->saddr, skb->h.th->dest, skb->h.th->source); } /* called with local bh disabled */ static int __tcp_v4_check_established(struct sock *sk, __u16 lport, struct tcp_tw_bucket **twp) { struct inet_sock *inet = inet_sk(sk); u32 daddr = inet->rcv_saddr; u32 saddr = inet->daddr; int dif = sk->sk_bound_dev_if; TCP_V4_ADDR_COOKIE(acookie, saddr, daddr) __u32 ports = TCP_COMBINED_PORTS(inet->dport, lport); int hash = tcp_hashfn(daddr, lport, saddr, inet->dport); struct tcp_ehash_bucket *head = &tcp_ehash[hash]; struct sock *sk2; struct hlist_node *node; struct tcp_tw_bucket *tw; write_lock(&head->lock); /* Check TIME-WAIT sockets first. */ sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) { tw = (struct tcp_tw_bucket *)sk2; if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) { struct tcp_sock *tp = tcp_sk(sk); /* With PAWS, it is safe from the viewpoint of data integrity. Even without PAWS it is safe provided sequence spaces do not overlap i.e. at data rates <= 80Mbit/sec. Actually, the idea is close to VJ's one, only timestamp cache is held not per host, but per port pair and TW bucket is used as state holder. If TW bucket has been already destroyed we fall back to VJ's scheme and use initial timestamp retrieved from peer table. */ if (tw->tw_ts_recent_stamp && (!twp || (sysctl_tcp_tw_reuse && xtime.tv_sec - tw->tw_ts_recent_stamp > 1))) { if ((tp->write_seq = tw->tw_snd_nxt + 65535 + 2) == 0) tp->write_seq = 1; tp->rx_opt.ts_recent = tw->tw_ts_recent; tp->rx_opt.ts_recent_stamp = tw->tw_ts_recent_stamp; sock_hold(sk2); goto unique; } else goto not_unique; } } tw = NULL; /* And established part... */ sk_for_each(sk2, node, &head->chain) { if (TCP_IPV4_MATCH(sk2, acookie, saddr, daddr, ports, dif)) goto not_unique; } unique: /* Must record num and sport now. Otherwise we will see * in hash table socket with a funny identity. */ inet->num = lport; inet->sport = htons(lport); sk->sk_hashent = hash; BUG_TRAP(sk_unhashed(sk)); __sk_add_node(sk, &head->chain); sock_prot_inc_use(sk->sk_prot); write_unlock(&head->lock); if (twp) { *twp = tw; NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); } else if (tw) { /* Silly. Should hash-dance instead... */ tcp_tw_deschedule(tw); NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); tcp_tw_put(tw); } return 0; not_unique: write_unlock(&head->lock); return -EADDRNOTAVAIL; } static inline u32 connect_port_offset(const struct sock *sk) { const struct inet_sock *inet = inet_sk(sk); return secure_tcp_port_ephemeral(inet->rcv_saddr, inet->daddr, inet->dport); } /* * Bind a port for a connect operation and hash it. */ static inline int tcp_v4_hash_connect(struct sock *sk) { unsigned short snum = inet_sk(sk)->num; struct tcp_bind_hashbucket *head; struct tcp_bind_bucket *tb; int ret; if (!snum) { int low = sysctl_local_port_range[0]; int high = sysctl_local_port_range[1]; int range = high - low; int i; int port; static u32 hint; u32 offset = hint + connect_port_offset(sk); struct hlist_node *node; struct tcp_tw_bucket *tw = NULL; local_bh_disable(); for (i = 1; i <= range; i++) { port = low + (i + offset) % range; head = &tcp_bhash[tcp_bhashfn(port)]; spin_lock(&head->lock); /* Does not bother with rcv_saddr checks, * because the established check is already * unique enough. */ tb_for_each(tb, node, &head->chain) { if (tb->port == port) { BUG_TRAP(!hlist_empty(&tb->owners)); if (tb->fastreuse >= 0) goto next_port; if (!__tcp_v4_check_established(sk, port, &tw)) goto ok; goto next_port; } } tb = tcp_bucket_create(head, port); if (!tb) { spin_unlock(&head->lock); break; } tb->fastreuse = -1; goto ok; next_port: spin_unlock(&head->lock); } local_bh_enable(); return -EADDRNOTAVAIL; ok: hint += i; /* Head lock still held and bh's disabled */ tcp_bind_hash(sk, tb, port); if (sk_unhashed(sk)) { inet_sk(sk)->sport = htons(port); __tcp_v4_hash(sk, 0); } spin_unlock(&head->lock); if (tw) { tcp_tw_deschedule(tw); tcp_tw_put(tw); } ret = 0; goto out; } head = &tcp_bhash[tcp_bhashfn(snum)]; tb = tcp_sk(sk)->bind_hash; spin_lock_bh(&head->lock); if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { __tcp_v4_hash(sk, 0); spin_unlock_bh(&head->lock); return 0; } else { spin_unlock(&head->lock); /* No definite answer... Walk to established hash table */ ret = __tcp_v4_check_established(sk, snum, NULL); out: local_bh_enable(); return ret; } } /* This will initiate an outgoing connection. */ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct inet_sock *inet = inet_sk(sk); struct tcp_sock *tp = tcp_sk(sk); struct sockaddr_in *usin = (struct sockaddr_in *)uaddr; struct rtable *rt; u32 daddr, nexthop; int tmp; int err; if (addr_len < sizeof(struct sockaddr_in)) return -EINVAL; if (usin->sin_family != AF_INET) return -EAFNOSUPPORT; nexthop = daddr = usin->sin_addr.s_addr; if (inet->opt && inet->opt->srr) { if (!daddr) return -EINVAL; nexthop = inet->opt->faddr; } tmp = ip_route_connect(&rt, nexthop, inet->saddr, RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, IPPROTO_TCP, inet->sport, usin->sin_port, sk); if (tmp < 0) return tmp; if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { ip_rt_put(rt); return -ENETUNREACH; } if (!inet->opt || !inet->opt->srr) daddr = rt->rt_dst; if (!inet->saddr) inet->saddr = rt->rt_src; inet->rcv_saddr = inet->saddr; if (tp->rx_opt.ts_recent_stamp && inet->daddr != daddr) { /* Reset inherited state */ tp->rx_opt.ts_recent = 0; tp->rx_opt.ts_recent_stamp = 0; tp->write_seq = 0; } if (sysctl_tcp_tw_recycle && !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) { struct inet_peer *peer = rt_get_peer(rt); /* VJ's idea. We save last timestamp seen from * the destination in peer table, when entering state TIME-WAIT * and initialize rx_opt.ts_recent from it, when trying new connection. */ if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) { tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp; tp->rx_opt.ts_recent = peer->tcp_ts; } } inet->dport = usin->sin_port; inet->daddr = daddr; tp->ext_header_len = 0; if (inet->opt) tp->ext_header_len = inet->opt->optlen; tp->rx_opt.mss_clamp = 536; /* Socket identity is still unknown (sport may be zero). * However we set state to SYN-SENT and not releasing socket * lock select source port, enter ourselves into the hash tables and * complete initialization after this. */ tcp_set_state(sk, TCP_SYN_SENT); err = tcp_v4_hash_connect(sk); if (err) goto failure; err = ip_route_newports(&rt, inet->sport, inet->dport, sk); if (err) goto failure; /* OK, now commit destination to socket. */ __sk_dst_set(sk, &rt->u.dst); tcp_v4_setup_caps(sk, &rt->u.dst); if (!tp->write_seq) tp->write_seq = secure_tcp_sequence_number(inet->saddr, inet->daddr, inet->sport, usin->sin_port); inet->id = tp->write_seq ^ jiffies; err = tcp_connect(sk); rt = NULL; if (err) goto failure; return 0; failure: /* This unhashes the socket and releases the local port, if necessary. */ tcp_set_state(sk, TCP_CLOSE); ip_rt_put(rt); sk->sk_route_caps = 0; inet->dport = 0; return err; } static __inline__ int tcp_v4_iif(struct sk_buff *skb) { return ((struct rtable *)skb->dst)->rt_iif; } static __inline__ u32 tcp_v4_synq_hash(u32 raddr, u16 rport, u32 rnd) { return (jhash_2words(raddr, (u32) rport, rnd) & (TCP_SYNQ_HSIZE - 1)); } static struct open_request *tcp_v4_search_req(struct tcp_sock *tp, struct open_request ***prevp, __u16 rport, __u32 raddr, __u32 laddr) { struct tcp_listen_opt *lopt = tp->listen_opt; struct open_request *req, **prev; for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)]; (req = *prev) != NULL; prev = &req->dl_next) { if (req->rmt_port == rport && req->af.v4_req.rmt_addr == raddr && req->af.v4_req.loc_addr == laddr && TCP_INET_FAMILY(req->class->family)) { BUG_TRAP(!req->sk); *prevp = prev; break; } } return req; } static void tcp_v4_synq_add(struct sock *sk, struct open_request *req) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_listen_opt *lopt = tp->listen_opt; u32 h = tcp_v4_synq_hash(req->af.v4_req.rmt_addr, req->rmt_port, lopt->hash_rnd); req->expires = jiffies + TCP_TIMEOUT_INIT; req->retrans = 0; req->sk = NULL; req->dl_next = lopt->syn_table[h]; write_lock(&tp->syn_wait_lock); lopt->syn_table[h] = req; write_unlock(&tp->syn_wait_lock); tcp_synq_added(sk); } /* * This routine does path mtu discovery as defined in RFC1191. */ static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu) { struct dst_entry *dst; struct inet_sock *inet = inet_sk(sk); struct tcp_sock *tp = tcp_sk(sk); /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs * send out by Linux are always <576bytes so they should go through * unfragmented). */ if (sk->sk_state == TCP_LISTEN) return; /* We don't check in the destentry if pmtu discovery is forbidden * on this route. We just assume that no packet_to_big packets * are send back when pmtu discovery is not active. * There is a small race when the user changes this flag in the * route, but I think that's acceptable. */ if ((dst = __sk_dst_check(sk, 0)) == NULL) return; dst->ops->update_pmtu(dst, mtu); /* Something is about to be wrong... Remember soft error * for the case, if this connection will not able to recover. */ if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst)) sk->sk_err_soft = EMSGSIZE; mtu = dst_mtu(dst); if (inet->pmtudisc != IP_PMTUDISC_DONT && tp->pmtu_cookie > mtu) { tcp_sync_mss(sk, mtu); /* Resend the TCP packet because it's * clear that the old packet has been * dropped. This is the new "fast" path mtu * discovery. */ tcp_simple_retransmit(sk); } /* else let the usual retransmit timer handle it */ } /* * This routine is called by the ICMP module when it gets some * sort of error condition. If err < 0 then the socket should * be closed and the error returned to the user. If err > 0 * it's just the icmp type << 8 | icmp code. After adjustment * header points to the first 8 bytes of the tcp header. We need * to find the appropriate port. * * The locking strategy used here is very "optimistic". When * someone else accesses the socket the ICMP is just dropped * and for some paths there is no check at all. * A more general error queue to queue errors for later handling * is probably better. * */ void tcp_v4_err(struct sk_buff *skb, u32 info) { struct iphdr *iph = (struct iphdr *)skb->data; struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); struct tcp_sock *tp; struct inet_sock *inet; int type = skb->h.icmph->type; int code = skb->h.icmph->code; struct sock *sk; __u32 seq; int err; if (skb->len < (iph->ihl << 2) + 8) { ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); return; } sk = tcp_v4_lookup(iph->daddr, th->dest, iph->saddr, th->source, tcp_v4_iif(skb)); if (!sk) { ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); return; } if (sk->sk_state == TCP_TIME_WAIT) { tcp_tw_put((struct tcp_tw_bucket *)sk); return; } bh_lock_sock(sk); /* If too many ICMPs get dropped on busy * servers this needs to be solved differently. */ if (sock_owned_by_user(sk)) NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS); if (sk->sk_state == TCP_CLOSE) goto out; tp = tcp_sk(sk); seq = ntohl(th->seq); if (sk->sk_state != TCP_LISTEN && !between(seq, tp->snd_una, tp->snd_nxt)) { NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS); goto out; } switch (type) { case ICMP_SOURCE_QUENCH: /* Just silently ignore these. */ goto out; case ICMP_PARAMETERPROB: err = EPROTO; break; case ICMP_DEST_UNREACH: if (code > NR_ICMP_UNREACH) goto out; if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ if (!sock_owned_by_user(sk)) do_pmtu_discovery(sk, iph, info); goto out; } err = icmp_err_convert[code].errno; break; case ICMP_TIME_EXCEEDED: err = EHOSTUNREACH; break; default: goto out; } switch (sk->sk_state) { struct open_request *req, **prev; case TCP_LISTEN: if (sock_owned_by_user(sk)) goto out; req = tcp_v4_search_req(tp, &prev, th->dest, iph->daddr, iph->saddr); if (!req) goto out; /* ICMPs are not backlogged, hence we cannot get an established socket here. */ BUG_TRAP(!req->sk); if (seq != req->snt_isn) { NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); goto out; } /* * Still in SYN_RECV, just remove it silently. * There is no good way to pass the error to the newly * created socket, and POSIX does not want network * errors returned from accept(). */ tcp_synq_drop(sk, req, prev); goto out; case TCP_SYN_SENT: case TCP_SYN_RECV: /* Cannot happen. It can f.e. if SYNs crossed. */ if (!sock_owned_by_user(sk)) { TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); sk->sk_err = err; sk->sk_error_report(sk); tcp_done(sk); } else { sk->sk_err_soft = err; } goto out; } /* If we've already connected we will keep trying * until we time out, or the user gives up. * * rfc1122 4.2.3.9 allows to consider as hard errors * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too, * but it is obsoleted by pmtu discovery). * * Note, that in modern internet, where routing is unreliable * and in each dark corner broken firewalls sit, sending random * errors ordered by their masters even this two messages finally lose * their original sense (even Linux sends invalid PORT_UNREACHs) * * Now we are in compliance with RFCs. * --ANK (980905) */ inet = inet_sk(sk); if (!sock_owned_by_user(sk) && inet->recverr) { sk->sk_err = err; sk->sk_error_report(sk); } else { /* Only an error on timeout */ sk->sk_err_soft = err; } out: bh_unlock_sock(sk); sock_put(sk); } /* This routine computes an IPv4 TCP checksum. */ void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len, struct sk_buff *skb) { struct inet_sock *inet = inet_sk(sk); if (skb->ip_summed == CHECKSUM_HW) { th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0); skb->csum = offsetof(struct tcphdr, check); } else { th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr, csum_partial((char *)th, th->doff << 2, skb->csum)); } } /* * This routine will send an RST to the other tcp. * * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.) * for reset. * Answer: if a packet caused RST, it is not for a socket * existing in our system, if it is matched to a socket, * it is just duplicate segment or bug in other side's TCP. * So that we build reply only basing on parameters * arrived with segment. * Exception: precedence violation. We do not implement it in any case. */ static void tcp_v4_send_reset(struct sk_buff *skb) { struct tcphdr *th = skb->h.th; struct tcphdr rth; struct ip_reply_arg arg; /* Never send a reset in response to a reset. */ if (th->rst) return; if (((struct rtable *)skb->dst)->rt_type != RTN_LOCAL) return; /* Swap the send and the receive. */ memset(&rth, 0, sizeof(struct tcphdr)); rth.dest = th->source; rth.source = th->dest; rth.doff = sizeof(struct tcphdr) / 4; rth.rst = 1; if (th->ack) { rth.seq = th->ack_seq; } else { rth.ack = 1; rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin + skb->len - (th->doff << 2)); } memset(&arg, 0, sizeof arg); arg.iov[0].iov_base = (unsigned char *)&rth; arg.iov[0].iov_len = sizeof rth; arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr, skb->nh.iph->saddr, /*XXX*/ sizeof(struct tcphdr), IPPROTO_TCP, 0); arg.csumoffset = offsetof(struct tcphdr, check) / 2; ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth); TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); TCP_INC_STATS_BH(TCP_MIB_OUTRSTS); } /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states outside socket context is ugly, certainly. What can I do? */ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts) { struct tcphdr *th = skb->h.th; struct { struct tcphdr th; u32 tsopt[3]; } rep; struct ip_reply_arg arg; memset(&rep.th, 0, sizeof(struct tcphdr)); memset(&arg, 0, sizeof arg); arg.iov[0].iov_base = (unsigned char *)&rep; arg.iov[0].iov_len = sizeof(rep.th); if (ts) { rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); rep.tsopt[1] = htonl(tcp_time_stamp); rep.tsopt[2] = htonl(ts);