aboutsummaryrefslogtreecommitdiffstats
path: root/net/lapb/lapb_timer.c
blob: 54563ad8aeb1f02bbedd18959c8cf9cd77479942 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
/*
 *	LAPB release 002
 *
 *	This code REQUIRES 2.1.15 or higher/ NET3.038
 *
 *	This module:
 *		This module is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 *
 *	History
 *	LAPB 001	Jonathan Naylor	Started Coding
 *	LAPB 002	Jonathan Naylor	New timer architecture.
 */

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/inet.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <net/lapb.h>

static void lapb_t1timer_expiry(unsigned long);
static void lapb_t2timer_expiry(unsigned long);

void lapb_start_t1timer(struct lapb_cb *lapb)
{
	del_timer(&lapb->t1timer);

	lapb->t1timer.data     = (unsigned long)lapb;
	lapb->t1timer.function = &lapb_t1timer_expiry;
	lapb->t1timer.expires  = jiffies + lapb->t1;

	add_timer(&lapb->t1timer);
}

void lapb_start_t2timer(struct lapb_cb *lapb)
{
	del_timer(&lapb->t2timer);

	lapb->t2timer.data     = (unsigned long)lapb;
	lapb->t2timer.function = &lapb_t2timer_expiry;
	lapb->t2timer.expires  = jiffies + lapb->t2;

	add_timer(&lapb->t2timer);
}

void lapb_stop_t1timer(struct lapb_cb *lapb)
{
	del_timer(&lapb->t1timer);
}

void lapb_stop_t2timer(struct lapb_cb *lapb)
{
	del_timer(&lapb->t2timer);
}

int lapb_t1timer_running(struct lapb_cb *lapb)
{
	return timer_pending(&lapb->t1timer);
}

static void lapb_t2timer_expiry(unsigned long param)
{
	struct lapb_cb *lapb = (struct lapb_cb *)param;

	if (lapb->condition & LAPB_ACK_PENDING_CONDITION) {
		lapb->condition &= ~LAPB_ACK_PENDING_CONDITION;
		lapb_timeout_response(lapb);
	}
}

static void lapb_t1timer_expiry(unsigned long param)
{
	struct lapb_cb *lapb = (struct lapb_cb *)param;

	switch (lapb->state) {

		/*
		 *	If we are a DCE, keep going DM .. DM .. DM
		 */
		case LAPB_STATE_0:
			if (lapb->mode & LAPB_DCE)
				lapb_send_control(lapb, LAPB_DM, LAPB_POLLOFF, LAPB_RESPONSE);
			break;

		/*
		 *	Awaiting connection state, send SABM(E), up to N2 times.
		 */
		case LAPB_STATE_1:
			if (lapb->n2count == lapb->n2) {
				lapb_clear_queues(lapb);
				lapb->state = LAPB_STATE_0;
				lapb_disconnect_indication(lapb, LAPB_TIMEDOUT);
				lapb_dbg(0, "(%p) S1 -> S0\n", lapb->dev);
				return;
			} else {
				lapb->n2count++;
				if (lapb->mode & LAPB_EXTENDED) {
					lapb_dbg(1, "(%p) S1 TX SABME(1)\n",
						 lapb->dev);
					lapb_send_control(lapb, LAPB_SABME, LAPB_POLLON, LAPB_COMMAND);
				} else {
					lapb_dbg(1, "(%p) S1 TX SABM(1)\n",
						 lapb->dev);
					lapb_send_control(lapb, LAPB_SABM, LAPB_POLLON, LAPB_COMMAND);
				}
			}
			break;

		/*
		 *	Awaiting disconnection state, send DISC, up to N2 times.
		 */
		case LAPB_STATE_2:
			if (lapb->n2count == lapb->n2) {
				lapb_clear_queues(lapb);
				lapb->state = LAPB_STATE_0;
				lapb_disconnect_confirmation(lapb, LAPB_TIMEDOUT);
				lapb_dbg(0, "(%p) S2 -> S0\n", lapb->dev);
				return;
			} else {
				lapb->n2count++;
				lapb_dbg(1, "(%p) S2 TX DISC(1)\n", lapb->dev);
				lapb_send_control(lapb, LAPB_DISC, LAPB_POLLON, LAPB_COMMAND);
			}
			break;

		/*
		 *	Data transfer state, restransmit I frames, up to N2 times.
		 */
		case LAPB_STATE_3:
			if (lapb->n2count == lapb->n2) {
				lapb_clear_queues(lapb);
				lapb->state = LAPB_STATE_0;
				lapb_stop_t2timer(lapb);
				lapb_disconnect_indication(lapb, LAPB_TIMEDOUT);
				lapb_dbg(0, "(%p) S3 -> S0\n", lapb->dev);
				return;
			} else {
				lapb->n2count++;
				lapb_requeue_frames(lapb);
			}
			break;

		/*
		 *	Frame reject state, restransmit FRMR frames, up to N2 times.
		 */
		case LAPB_STATE_4:
			if (lapb->n2count == lapb->n2) {
				lapb_clear_queues(lapb);
				lapb->state = LAPB_STATE_0;
				lapb_disconnect_indication(lapb, LAPB_TIMEDOUT);
				lapb_dbg(0, "(%p) S4 -> S0\n", lapb->dev);
				return;
			} else {
				lapb->n2count++;
				lapb_transmit_frmr(lapb);
			}
			break;
	}

	lapb_start_t1timer(lapb);
}
>*vma, unsigned long address) { unsigned long paddr, maddr; unsigned long pfn; int index; struct vma_data *vdata = vma->vm_private_data; index = (address - vma->vm_start) >> PAGE_SHIFT; maddr = (volatile unsigned long) vdata->maddr[index]; if (maddr == 0) { maddr = uncached_alloc_page(numa_node_id()); if (maddr == 0) return NOPFN_OOM; spin_lock(&vdata->lock); if (vdata->maddr[index] == 0) { vdata->count++; vdata->maddr[index] = maddr; } else { uncached_free_page(maddr); maddr = vdata->maddr[index]; } spin_unlock(&vdata->lock); } if (vdata->type == MSPEC_FETCHOP) paddr = TO_AMO(maddr); else paddr = maddr & ~__IA64_UNCACHED_OFFSET; pfn = paddr >> PAGE_SHIFT; return pfn; } static struct vm_operations_struct mspec_vm_ops = { .open = mspec_open, .close = mspec_close, .nopfn = mspec_nopfn }; /* * mspec_mmap * * Called when mmaping the device. Initializes the vma with a fault handler * and private data structure necessary to allocate, track, and free the * underlying pages. */ static int mspec_mmap(struct file *file, struct vm_area_struct *vma, int type) { struct vma_data *vdata; int pages, vdata_size; if (vma->vm_pgoff != 0) return -EINVAL; if ((vma->vm_flags & VM_SHARED) == 0) return -EINVAL; if ((vma->vm_flags & VM_WRITE) == 0) return -EPERM; pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; vdata_size = sizeof(struct vma_data) + pages * sizeof(long); if (vdata_size <= PAGE_SIZE) vdata = kmalloc(vdata_size, GFP_KERNEL); else vdata = vmalloc(vdata_size); if (!vdata) return -ENOMEM; memset(vdata, 0, vdata_size); vdata->type = type; spin_lock_init(&vdata->lock); vdata->refcnt = ATOMIC_INIT(1); vma->vm_private_data = vdata; vma->vm_flags |= (VM_IO | VM_LOCKED | VM_RESERVED | VM_PFNMAP); if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_ops = &mspec_vm_ops; return 0; } static int fetchop_mmap(struct file *file, struct vm_area_struct *vma) { return mspec_mmap(file, vma, MSPEC_FETCHOP); } static int cached_mmap(struct file *file, struct vm_area_struct *vma) { return mspec_mmap(file, vma, MSPEC_CACHED); } static int uncached_mmap(struct file *file, struct vm_area_struct *vma) { return mspec_mmap(file, vma, MSPEC_UNCACHED); } static const struct file_operations fetchop_fops = { .owner = THIS_MODULE, .mmap = fetchop_mmap }; static struct miscdevice fetchop_miscdev = { .minor = MISC_DYNAMIC_MINOR, .name = "sgi_fetchop", .fops = &fetchop_fops }; static const struct file_operations cached_fops = { .owner = THIS_MODULE, .mmap = cached_mmap }; static struct miscdevice cached_miscdev = { .minor = MISC_DYNAMIC_MINOR, .name = "mspec_cached", .fops = &cached_fops }; static const struct file_operations uncached_fops = { .owner = THIS_MODULE, .mmap = uncached_mmap }; static struct miscdevice uncached_miscdev = { .minor = MISC_DYNAMIC_MINOR, .name = "mspec_uncached", .fops = &uncached_fops }; /* * mspec_init * * Called at boot time to initialize the mspec facility. */ static int __init mspec_init(void) { int ret; int nid; /* * The fetchop device only works on SN2 hardware, uncached and cached * memory drivers should both be valid on all ia64 hardware */ #ifdef CONFIG_SGI_SN if (ia64_platform_is("sn2")) { is_sn2 = 1; if (is_shub2()) { ret = -ENOMEM; for_each_online_node(nid) { int actual_nid; int nasid; unsigned long phys; scratch_page[nid] = uncached_alloc_page(nid); if (scratch_page[nid] == 0) goto free_scratch_pages; phys = __pa(scratch_page[nid]); nasid = get_node_number(phys); actual_nid = nasid_to_cnodeid(nasid); if (actual_nid != nid) goto free_scratch_pages; } } ret = misc_register(&fetchop_miscdev); if (ret) { printk(KERN_ERR "%s: failed to register device %i\n", FETCHOP_ID, ret); goto free_scratch_pages; } } #endif ret = misc_register(&cached_miscdev); if (ret) { printk(KERN_ERR "%s: failed to register device %i\n", CACHED_ID, ret); if (is_sn2) misc_deregister(&fetchop_miscdev); goto free_scratch_pages; } ret = misc_register(&uncached_miscdev); if (ret) { printk(KERN_ERR "%s: failed to register device %i\n", UNCACHED_ID, ret); misc_deregister(&cached_miscdev); if (is_sn2) misc_deregister(&fetchop_miscdev); goto free_scratch_pages; } printk(KERN_INFO "%s %s initialized devices: %s %s %s\n", MSPEC_BASENAME, REVISION, is_sn2 ? FETCHOP_ID : "", CACHED_ID, UNCACHED_ID); return 0; free_scratch_pages: for_each_node(nid) { if (scratch_page[nid] != 0) uncached_free_page(scratch_page[nid]); } return ret; } static void __exit mspec_exit(void) { int nid; misc_deregister(&uncached_miscdev); misc_deregister(&cached_miscdev); if (is_sn2) { misc_deregister(&fetchop_miscdev); for_each_node(nid) { if (scratch_page[nid] != 0) uncached_free_page(scratch_page[nid]); } } } module_init(mspec_init); module_exit(mspec_exit); MODULE_AUTHOR("Silicon Graphics, Inc. <linux-altix@sgi.com>"); MODULE_DESCRIPTION("Driver for SGI SN special memory operations"); MODULE_LICENSE("GPL");