aboutsummaryrefslogtreecommitdiffstats
path: root/net/lapb/lapb_timer.c
blob: af6d14b44e2e66abe826896c8fb4c2672f661509 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
/*
 *	LAPB release 002
 *
 *	This code REQUIRES 2.1.15 or higher/ NET3.038
 *
 *	This module:
 *		This module is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 *
 *	History
 *	LAPB 001	Jonathan Naylor	Started Coding
 *	LAPB 002	Jonathan Naylor	New timer architecture.
 */

#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/inet.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <net/lapb.h>

static void lapb_t1timer_expiry(unsigned long);
static void lapb_t2timer_expiry(unsigned long);

void lapb_start_t1timer(struct lapb_cb *lapb)
{
	del_timer(&lapb->t1timer);

	lapb->t1timer.data     = (unsigned long)lapb;
	lapb->t1timer.function = &lapb_t1timer_expiry;
	lapb->t1timer.expires  = jiffies + lapb->t1;

	add_timer(&lapb->t1timer);
}

void lapb_start_t2timer(struct lapb_cb *lapb)
{
	del_timer(&lapb->t2timer);

	lapb->t2timer.data     = (unsigned long)lapb;
	lapb->t2timer.function = &lapb_t2timer_expiry;
	lapb->t2timer.expires  = jiffies + lapb->t2;

	add_timer(&lapb->t2timer);
}

void lapb_stop_t1timer(struct lapb_cb *lapb)
{
	del_timer(&lapb->t1timer);
}

void lapb_stop_t2timer(struct lapb_cb *lapb)
{
	del_timer(&lapb->t2timer);
}

int lapb_t1timer_running(struct lapb_cb *lapb)
{
	return timer_pending(&lapb->t1timer);
}

static void lapb_t2timer_expiry(unsigned long param)
{
	struct lapb_cb *lapb = (struct lapb_cb *)param;

	if (lapb->condition & LAPB_ACK_PENDING_CONDITION) {
		lapb->condition &= ~LAPB_ACK_PENDING_CONDITION;
		lapb_timeout_response(lapb);
	}
}

static void lapb_t1timer_expiry(unsigned long param)
{
	struct lapb_cb *lapb = (struct lapb_cb *)param;

	switch (lapb->state) {

		/*
		 *	If we are a DCE, keep going DM .. DM .. DM
		 */
		case LAPB_STATE_0:
			if (lapb->mode & LAPB_DCE)
				lapb_send_control(lapb, LAPB_DM, LAPB_POLLOFF, LAPB_RESPONSE);
			break;

		/*
		 *	Awaiting connection state, send SABM(E), up to N2 times.
		 */
		case LAPB_STATE_1:
			if (lapb->n2count == lapb->n2) {
				lapb_clear_queues(lapb);
				lapb->state = LAPB_STATE_0;
				lapb_disconnect_indication(lapb, LAPB_TIMEDOUT);
#if LAPB_DEBUG > 0
				printk(KERN_DEBUG "lapb: (%p) S1 -> S0\n", lapb->dev);
#endif
				return;
			} else {
				lapb->n2count++;
				if (lapb->mode & LAPB_EXTENDED) {
#if LAPB_DEBUG > 1
					printk(KERN_DEBUG "lapb: (%p) S1 TX SABME(1)\n", lapb->dev);
#endif
					lapb_send_control(lapb, LAPB_SABME, LAPB_POLLON, LAPB_COMMAND);
				} else {
#if LAPB_DEBUG > 1
					printk(KERN_DEBUG "lapb: (%p) S1 TX SABM(1)\n", lapb->dev);
#endif
					lapb_send_control(lapb, LAPB_SABM, LAPB_POLLON, LAPB_COMMAND);
				}
			}
			break;

		/*
		 *	Awaiting disconnection state, send DISC, up to N2 times.
		 */
		case LAPB_STATE_2:
			if (lapb->n2count == lapb->n2) {
				lapb_clear_queues(lapb);
				lapb->state = LAPB_STATE_0;
				lapb_disconnect_confirmation(lapb, LAPB_TIMEDOUT);
#if LAPB_DEBUG > 0
				printk(KERN_DEBUG "lapb: (%p) S2 -> S0\n", lapb->dev);
#endif
				return;
			} else {
				lapb->n2count++;
#if LAPB_DEBUG > 1
				printk(KERN_DEBUG "lapb: (%p) S2 TX DISC(1)\n", lapb->dev);
#endif
				lapb_send_control(lapb, LAPB_DISC, LAPB_POLLON, LAPB_COMMAND);
			}
			break;

		/*
		 *	Data transfer state, restransmit I frames, up to N2 times.
		 */
		case LAPB_STATE_3:
			if (lapb->n2count == lapb->n2) {
				lapb_clear_queues(lapb);
				lapb->state = LAPB_STATE_0;
				lapb_stop_t2timer(lapb);
				lapb_disconnect_indication(lapb, LAPB_TIMEDOUT);
#if LAPB_DEBUG > 0
				printk(KERN_DEBUG "lapb: (%p) S3 -> S0\n", lapb->dev);
#endif
				return;
			} else {
				lapb->n2count++;
				lapb_requeue_frames(lapb);
			}
			break;

		/*
		 *	Frame reject state, restransmit FRMR frames, up to N2 times.
		 */
		case LAPB_STATE_4:
			if (lapb->n2count == lapb->n2) {
				lapb_clear_queues(lapb);
				lapb->state = LAPB_STATE_0;
				lapb_disconnect_indication(lapb, LAPB_TIMEDOUT);
#if LAPB_DEBUG > 0
				printk(KERN_DEBUG "lapb: (%p) S4 -> S0\n", lapb->dev);
#endif
				return;
			} else {
				lapb->n2count++;
				lapb_transmit_frmr(lapb);
			}
			break;
	}

	lapb_start_t1timer(lapb);
}
sk = *it; foreach(tsk->get_requests(), jt) { const RequestBound& req = *jt; unsigned int res = req.get_resource_id(); while (res >= resources.size()) resources.push_back(ContentionSet()); resources[res].push_back(&req); } } } void split_by_resource(const Clusters& clusters, ClusterResources& resources) { foreach(clusters, it) { resources.push_back(Resources()); split_by_resource(*it, resources.back()); } } void split_by_type(const ContentionSet& requests, ContentionSet& reads, ContentionSet& writes) { foreach(requests, it) { const RequestBound *req = *it; if (req->get_request_type() == READ) reads.push_back(req); else writes.push_back(req); } } void split_by_type(const Resources& resources, Resources &reads, Resources &writes) { reads.reserve(resources.size()); writes.reserve(resources.size()); foreach(resources, it) { reads.push_back(ContentionSet()); writes.push_back(ContentionSet()); split_by_type(*it, reads.back(), writes.back()); } } void split_by_type(const ClusterResources& per_cluster, ClusterResources &reads, ClusterResources &writes) { reads.reserve(per_cluster.size()); writes.reserve(per_cluster.size()); foreach(per_cluster, it) { reads.push_back(Resources()); writes.push_back(Resources()); split_by_type(*it, reads.back(), writes.back()); } } static bool has_longer_request_length(const RequestBound* a, const RequestBound* b) { return a->get_request_length() > b->get_request_length(); } void sort_by_request_length(ContentionSet& cs) { std::sort(cs.begin(), cs.end(), has_longer_request_length); } static bool has_longer_request_length_lcs(const LimitedRequestBound &a, const LimitedRequestBound &b) { return has_longer_request_length(a.request_bound, b.request_bound); } void sort_by_request_length(LimitedContentionSet &lcs) { std::sort(lcs.begin(), lcs.end(), has_longer_request_length_lcs); } void sort_by_request_length(Resources& resources) { apply_foreach(resources, sort_by_request_length); } void sort_by_request_length(ClusterResources& resources) { apply_foreach(resources, sort_by_request_length); } void determine_priority_ceilings(const Resources& resources, PriorityCeilings& ceilings) { ceilings.reserve(resources.size()); foreach(resources, it) { unsigned int ceiling = UINT_MAX; const ContentionSet& cs = *it; foreach(cs, jt) { const RequestBound* req = *jt; ceiling = std::min(ceiling, req->get_task()->get_priority()); } ceilings.push_back(ceiling); } } PriorityCeilings get_priority_ceilings(const ResourceSharingInfo& info) { Resources resources; PriorityCeilings ceilings; split_by_resource(info, resources); determine_priority_ceilings(resources, ceilings); return ceilings; } typedef std::vector<TaskContention> ClusterContention; typedef std::vector<ContentionSet> TaskContention; Interference bound_blocking(const ContentionSet& cont, unsigned long interval, unsigned int max_total_requests, unsigned int max_requests_per_source, const TaskInfo* exclude_tsk, // Note: the following parameter excludes // *high-priority* tasks. Used to exclude local higher-priority tasks. // Default: all tasks can block (suitable for remote blocking). unsigned int min_priority /* default == 0 */) { Interference inter; unsigned int remaining; remaining = max_total_requests; foreach(cont, it) { const RequestBound* req = *it; if (!remaining) break; // only use this source if it is not excluded if (req->get_task() != exclude_tsk && req->get_task()->get_priority() >= min_priority) { unsigned int num; // This makes the assumption that there is only one // request object per task. This makes sense if the // contention set has been split by resource. This may // be pessimistic for contention sets that contain // request objects for multiple resources. The // assumption also works out if max_total_requests == // max_requests_per_source. num = std::min(req->get_max_num_requests(interval), max_requests_per_source); num = std::min(num, remaining); inter.total_length += num * req->get_request_length(); inter.count += num; remaining -= num; } } return inter; } Interference bound_blocking(const ContentionSet& cont, unsigned long interval, unsigned int max_total_requests, unsigned int max_requests_per_source, bool exclude_whole_cluster, const TaskInfo* exclude_tsk) { Interference inter; unsigned int remaining; remaining = max_total_requests; foreach(cont, it) { const RequestBound* req = *it; if (!remaining) break; // only use this source if it is not excluded if (req->get_task() != exclude_tsk && (!exclude_whole_cluster || req->get_task()->get_cluster() != exclude_tsk->get_cluster())) { unsigned int num; num = std::min(req->get_max_num_requests(interval), max_requests_per_source); num = std::min(num, remaining); inter.total_length += num * req->get_request_length(); inter.count += num; remaining -= num; } } return inter; } Interference bound_blocking_all_clusters( const ClusterResources& clusters, const ClusterLimits& limits, unsigned int res_id, unsigned long interval, const TaskInfo* exclude_tsk) { Interference inter; unsigned int i; // add interference from each non-excluded cluster enumerate(clusters, it, i) { const Resources& resources = *it; const ClusterLimit& limit = limits[i]; if (resources.size() > res_id) inter += bound_blocking(resources[res_id], interval, limit.max_total_requests, limit.max_requests_per_source, exclude_tsk); } return inter; } static Interference max_local_request_span(const TaskInfo &tsk, const TaskInfos &tasks, const BlockingBounds& bounds) { Interference span; unsigned int i = 0; enumerate(tasks, it, i) { const TaskInfo& t = *it; if (&t != &tsk) { // only consider local, lower-priority tasks if (t.get_cluster() == tsk.get_cluster() && t.get_priority() >= tsk.get_priority()) { Interference b = bounds.get_max_request_span(i); span = std::max(span, b); } } } return span; } void charge_arrival_blocking(const ResourceSharingInfo& info, BlockingBounds& bounds) { unsigned int i = 0; const TaskInfos& tasks = info.get_tasks(); enumerate(tasks, it, i) { Interference inf = max_local_request_span(*it, tasks, bounds); bounds[i] += inf; // charge to total bounds.set_arrival_blocking(i, inf); } } // **** blocking term analysis **** ClusterLimits np_fifo_limits( const TaskInfo& tsk, const ClusterResources& clusters, unsigned int procs_per_cluster, const unsigned int issued, int dedicated_irq) { ClusterLimits limits; int idx; limits.reserve(clusters.size()); enumerate(clusters, ct, idx) { unsigned int total, parallelism = procs_per_cluster; if (idx == dedicated_irq) parallelism--; if (parallelism && (int) tsk.get_cluster() == idx) parallelism--; // At most one blocking request per remote CPU in // cluster per request. total = issued * parallelism; limits.push_back(ClusterLimit(total, issued)); } return limits; } Interference np_fifo_per_resource( const TaskInfo& tsk, const ClusterResources& clusters, unsigned int procs_per_cluster, unsigned int res_id, unsigned int issued, int dedicated_irq) { const unsigned long interval = tsk.get_response(); ClusterLimits limits = np_fifo_limits(tsk, clusters, procs_per_cluster, issued, dedicated_irq); return bound_blocking_all_clusters(clusters, limits, res_id, interval, &tsk); } #include "rw-blocking.h" void merge_rw_requests(const TaskInfo &tsk, RWCounts &counts) { foreach(tsk.get_requests(), req) { unsigned int res_id = req->get_resource_id(); while (counts.size() <= res_id) counts.push_back(RWCount(counts.size())); if (req->is_read()) { counts[res_id].num_reads += req->get_num_requests(); counts[res_id].rlength = req->get_request_length(); } else { counts[res_id].num_writes += req->get_num_requests(); counts[res_id].wlength = req->get_request_length(); } } }