aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/ftape/lowlevel/ftape-calibr.c
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2006-12-03 22:22:41 -0500
committerJeff Garzik <jeff@garzik.org>2006-12-03 22:22:41 -0500
commitd916faace3efc0bf19fe9a615a1ab8fa1a24cd93 (patch)
treee6adbc42541498306728490a4978afe116131299 /drivers/char/ftape/lowlevel/ftape-calibr.c
parent2b5f6dcce5bf94b9b119e9ed8d537098ec61c3d2 (diff)
Remove long-unmaintained ftape driver subsystem.
It's bitrotten, long unmaintained, long hidden under BROKEN_ON_SMP, etc. As scheduled in feature-removal-schedule.txt, and ack'd several times on lkml. Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/char/ftape/lowlevel/ftape-calibr.c')
-rw-r--r--drivers/char/ftape/lowlevel/ftape-calibr.c275
1 files changed, 0 insertions, 275 deletions
diff --git a/drivers/char/ftape/lowlevel/ftape-calibr.c b/drivers/char/ftape/lowlevel/ftape-calibr.c
deleted file mode 100644
index 8e50bfd35a52..000000000000
--- a/drivers/char/ftape/lowlevel/ftape-calibr.c
+++ /dev/null
@@ -1,275 +0,0 @@
1/*
2 * Copyright (C) 1993-1996 Bas Laarhoven.
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2, or (at your option)
7 any later version.
8
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
13
14 You should have received a copy of the GNU General Public License
15 along with this program; see the file COPYING. If not, write to
16 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
17
18 *
19 * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/ftape-calibr.c,v $
20 * $Revision: 1.2 $
21 * $Date: 1997/10/05 19:18:08 $
22 *
23 * GP calibration routine for processor speed dependent
24 * functions.
25 */
26
27#include <linux/errno.h>
28#include <linux/jiffies.h>
29#include <asm/system.h>
30#include <asm/io.h>
31#if defined(__alpha__)
32# include <asm/hwrpb.h>
33#elif defined(__x86_64__)
34# include <asm/msr.h>
35# include <asm/timex.h>
36#elif defined(__i386__)
37# include <linux/timex.h>
38#endif
39#include <linux/ftape.h>
40#include "../lowlevel/ftape-tracing.h"
41#include "../lowlevel/ftape-calibr.h"
42#include "../lowlevel/fdc-io.h"
43
44#undef DEBUG
45
46#if !defined(__alpha__) && !defined(__i386__) && !defined(__x86_64__)
47# error Ftape is not implemented for this architecture!
48#endif
49
50#if defined(__alpha__) || defined(__x86_64__)
51static unsigned long ps_per_cycle = 0;
52#endif
53
54static spinlock_t calibr_lock;
55
56/*
57 * Note: On Intel PCs, the clock ticks at 100 Hz (HZ==100) which is
58 * too slow for certain timeouts (and that clock doesn't even tick
59 * when interrupts are disabled). For that reason, the 8254 timer is
60 * used directly to implement fine-grained timeouts. However, on
61 * Alpha PCs, the 8254 is *not* used to implement the clock tick
62 * (which is 1024 Hz, normally) and the 8254 timer runs at some
63 * "random" frequency (it seems to run at 18Hz, but it's not safe to
64 * rely on this value). Instead, we use the Alpha's "rpcc"
65 * instruction to read cycle counts. As this is a 32 bit counter,
66 * it will overflow only once per 30 seconds (on a 200MHz machine),
67 * which is plenty.
68 */
69
70unsigned int ftape_timestamp(void)
71{
72#if defined(__alpha__)
73 unsigned long r;
74
75 asm volatile ("rpcc %0" : "=r" (r));
76 return r;
77#elif defined(__x86_64__)
78 unsigned long r;
79 rdtscl(r);
80 return r;
81#elif defined(__i386__)
82
83/*
84 * Note that there is some time between counter underflowing and jiffies
85 * increasing, so the code below won't always give correct output.
86 * -Vojtech
87 */
88
89 unsigned long flags;
90 __u16 lo;
91 __u16 hi;
92
93 spin_lock_irqsave(&calibr_lock, flags);
94 outb_p(0x00, 0x43); /* latch the count ASAP */
95 lo = inb_p(0x40); /* read the latched count */
96 lo |= inb(0x40) << 8;
97 hi = jiffies;
98 spin_unlock_irqrestore(&calibr_lock, flags);
99 return ((hi + 1) * (unsigned int) LATCH) - lo; /* downcounter ! */
100#endif
101}
102
103static unsigned int short_ftape_timestamp(void)
104{
105#if defined(__alpha__) || defined(__x86_64__)
106 return ftape_timestamp();
107#elif defined(__i386__)
108 unsigned int count;
109 unsigned long flags;
110
111 spin_lock_irqsave(&calibr_lock, flags);
112 outb_p(0x00, 0x43); /* latch the count ASAP */
113 count = inb_p(0x40); /* read the latched count */
114 count |= inb(0x40) << 8;
115 spin_unlock_irqrestore(&calibr_lock, flags);
116 return (LATCH - count); /* normal: downcounter */
117#endif
118}
119
120static unsigned int diff(unsigned int t0, unsigned int t1)
121{
122#if defined(__alpha__) || defined(__x86_64__)
123 return (t1 - t0);
124#elif defined(__i386__)
125 /*
126 * This is tricky: to work for both short and full ftape_timestamps
127 * we'll have to discriminate between these.
128 * If it _looks_ like short stamps with wrapping around we'll
129 * asume it are. This will generate a small error if it really
130 * was a (very large) delta from full ftape_timestamps.
131 */
132 return (t1 <= t0 && t0 <= LATCH) ? t1 + LATCH - t0 : t1 - t0;
133#endif
134}
135
136static unsigned int usecs(unsigned int count)
137{
138#if defined(__alpha__) || defined(__x86_64__)
139 return (ps_per_cycle * count) / 1000000UL;
140#elif defined(__i386__)
141 return (10000 * count) / ((CLOCK_TICK_RATE + 50) / 100);
142#endif
143}
144
145unsigned int ftape_timediff(unsigned int t0, unsigned int t1)
146{
147 /*
148 * Calculate difference in usec for ftape_timestamp results t0 & t1.
149 * Note that on the i386 platform with short time-stamps, the
150 * maximum allowed timespan is 1/HZ or we'll lose ticks!
151 */
152 return usecs(diff(t0, t1));
153}
154
155/* To get an indication of the I/O performance,
156 * measure the duration of the inb() function.
157 */
158static void time_inb(void)
159{
160 int i;
161 int t0, t1;
162 unsigned long flags;
163 int status;
164 TRACE_FUN(ft_t_any);
165
166 spin_lock_irqsave(&calibr_lock, flags);
167 t0 = short_ftape_timestamp();
168 for (i = 0; i < 1000; ++i) {
169 status = inb(fdc.msr);
170 }
171 t1 = short_ftape_timestamp();
172 spin_unlock_irqrestore(&calibr_lock, flags);
173 TRACE(ft_t_info, "inb() duration: %d nsec", ftape_timediff(t0, t1));
174 TRACE_EXIT;
175}
176
177static void init_clock(void)
178{
179 TRACE_FUN(ft_t_any);
180
181#if defined(__x86_64__)
182 ps_per_cycle = 1000000000UL / cpu_khz;
183#elif defined(__alpha__)
184 extern struct hwrpb_struct *hwrpb;
185 ps_per_cycle = (1000*1000*1000*1000UL) / hwrpb->cycle_freq;
186#endif
187 TRACE_EXIT;
188}
189
190/*
191 * Input: function taking int count as parameter.
192 * pointers to calculated calibration variables.
193 */
194void ftape_calibrate(char *name,
195 void (*fun) (unsigned int),
196 unsigned int *calibr_count,
197 unsigned int *calibr_time)
198{
199 static int first_time = 1;
200 int i;
201 unsigned int tc = 0;
202 unsigned int count;
203 unsigned int time;
204#if defined(__i386__)
205 unsigned int old_tc = 0;
206 unsigned int old_count = 1;
207 unsigned int old_time = 1;
208#endif
209 TRACE_FUN(ft_t_flow);
210
211 if (first_time) { /* get idea of I/O performance */
212 init_clock();
213 time_inb();
214 first_time = 0;
215 }
216 /* value of timeout must be set so that on very slow systems
217 * it will give a time less than one jiffy, and on
218 * very fast systems it'll give reasonable precision.
219 */
220
221 count = 40;
222 for (i = 0; i < 15; ++i) {
223 unsigned int t0;
224 unsigned int t1;
225 unsigned int once;
226 unsigned int multiple;
227 unsigned long flags;
228
229 *calibr_count =
230 *calibr_time = count; /* set TC to 1 */
231 spin_lock_irqsave(&calibr_lock, flags);
232 fun(0); /* dummy, get code into cache */
233 t0 = short_ftape_timestamp();
234 fun(0); /* overhead + one test */
235 t1 = short_ftape_timestamp();
236 once = diff(t0, t1);
237 t0 = short_ftape_timestamp();
238 fun(count); /* overhead + count tests */
239 t1 = short_ftape_timestamp();
240 multiple = diff(t0, t1);
241 spin_unlock_irqrestore(&calibr_lock, flags);
242 time = ftape_timediff(0, multiple - once);
243 tc = (1000 * time) / (count - 1);
244 TRACE(ft_t_any, "once:%3d us,%6d times:%6d us, TC:%5d ns",
245 usecs(once), count - 1, usecs(multiple), tc);
246#if defined(__alpha__) || defined(__x86_64__)
247 /*
248 * Increase the calibration count exponentially until the
249 * calibration time exceeds 100 ms.
250 */
251 if (time >= 100*1000) {
252 break;
253 }
254#elif defined(__i386__)
255 /*
256 * increase the count until the resulting time nears 2/HZ,
257 * then the tc will drop sharply because we lose LATCH counts.
258 */
259 if (tc <= old_tc / 2) {
260 time = old_time;
261 count = old_count;
262 break;
263 }
264 old_tc = tc;
265 old_count = count;
266 old_time = time;
267#endif
268 count *= 2;
269 }
270 *calibr_count = count - 1;
271 *calibr_time = time;
272 TRACE(ft_t_info, "TC for `%s()' = %d nsec (at %d counts)",
273 name, (1000 * *calibr_time) / *calibr_count, *calibr_count);
274 TRACE_EXIT;
275}