OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [drivers/] [char/] [ftape/] [lowlevel/] [ftape-calibr.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 *      Copyright (C) 1993-1996 Bas Laarhoven.
3
 
4
 This program is free software; you can redistribute it and/or modify
5
 it under the terms of the GNU General Public License as published by
6
 the Free Software Foundation; either version 2, or (at your option)
7
 any later version.
8
 
9
 This program is distributed in the hope that it will be useful,
10
 but WITHOUT ANY WARRANTY; without even the implied warranty of
11
 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12
 GNU General Public License for more details.
13
 
14
 You should have received a copy of the GNU General Public License
15
 along with this program; see the file COPYING.  If not, write to
16
 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
17
 
18
 *
19
 * $Source: /home/marcus/revision_ctrl_test/oc_cvs/cvs/or1k/linux/linux-2.4/drivers/char/ftape/lowlevel/ftape-calibr.c,v $
20
 * $Revision: 1.1.1.1 $
21
 * $Date: 2004-04-15 02:02:39 $
22
 *
23
 *      GP calibration routine for processor speed dependent
24
 *      functions.
25
 */
26
 
27
#include <linux/config.h>
28
#include <linux/errno.h>
29
#include <linux/sched.h>
30
#include <asm/system.h>
31
#include <asm/io.h>
32
#if defined(__alpha__)
33
# include <asm/hwrpb.h>
34
#elif defined(__x86_64__)
35
# include <asm/msr.h>
36
# include <asm/timex.h>
37
#elif defined(__i386__) 
38
# include <linux/timex.h>
39
#endif
40
#include <linux/ftape.h>
41
#include "../lowlevel/ftape-tracing.h"
42
#include "../lowlevel/ftape-calibr.h"
43
#include "../lowlevel/fdc-io.h"
44
 
45
#undef DEBUG
46
 
47
#if !defined(__alpha__) && !defined(__i386__) && !defined(__x86_64__)
48
# error Ftape is not implemented for this architecture!
49
#endif
50
 
51
#if defined(__alpha__) || defined(__x86_64__)
52
static unsigned long ps_per_cycle = 0;
53
#endif
54
 
55
#if defined(__i386__)
56
extern spinlock_t i8253_lock;
57
#endif
58
 
59
/*
60
 * Note: On Intel PCs, the clock ticks at 100 Hz (HZ==100) which is
61
 * too slow for certain timeouts (and that clock doesn't even tick
62
 * when interrupts are disabled).  For that reason, the 8254 timer is
63
 * used directly to implement fine-grained timeouts.  However, on
64
 * Alpha PCs, the 8254 is *not* used to implement the clock tick
65
 * (which is 1024 Hz, normally) and the 8254 timer runs at some
66
 * "random" frequency (it seems to run at 18Hz, but its not safe to
67
 * rely on this value).  Instead, we use the Alpha's "rpcc"
68
 * instruction to read cycle counts.  As this is a 32 bit counter,
69
 * it will overflow only once per 30 seconds (on a 200MHz machine),
70
 * which is plenty.
71
 */
72
 
73
unsigned int ftape_timestamp(void)
74
{
75
#if defined(__alpha__)
76
        unsigned long r;
77
        asm volatile ("rpcc %0" : "=r" (r));
78
        return r;
79
#elif defined(__x86_64__)
80
        unsigned long r;
81
        rdtscl(r);
82
        return r;
83
#elif defined(__i386__)
84
 
85
/*
86
 * Note that there is some time between counter underflowing and jiffies
87
 * increasing, so the code below won't always give correct output.
88
 * -Vojtech
89
 */
90
 
91
        unsigned long flags;
92
        __u16 lo;
93
        __u16 hi;
94
 
95
        spin_lock_irqsave(&i8253_lock, flags);
96
        outb_p(0x00, 0x43);     /* latch the count ASAP */
97
        lo = inb_p(0x40);       /* read the latched count */
98
        lo |= inb(0x40) << 8;
99
        hi = jiffies;
100
        spin_unlock_irqrestore(&i8253_lock, flags);
101
 
102
        return ((hi + 1) * (unsigned int) LATCH) - lo;  /* downcounter ! */
103
#endif
104
}
105
 
106
static unsigned int short_ftape_timestamp(void)
107
{
108
#if defined(__alpha__) || defined(__x86_64__)
109
        return ftape_timestamp();
110
#elif defined(__i386__)
111
        unsigned int count;
112
        unsigned long flags;
113
 
114
        spin_lock_irqsave(&i8253_lock, flags);
115
        outb_p(0x00, 0x43);     /* latch the count ASAP */
116
        count = inb_p(0x40);    /* read the latched count */
117
        count |= inb(0x40) << 8;
118
        spin_unlock_irqrestore(&i8253_lock, flags);
119
 
120
        return (LATCH - count); /* normal: downcounter */
121
#endif
122
}
123
 
124
static unsigned int diff(unsigned int t0, unsigned int t1)
125
{
126
#if defined(__alpha__) || defined(__x86_64__)
127
        return (t1 - t0);
128
#elif defined(__i386__)
129
        /*
130
         * This is tricky: to work for both short and full ftape_timestamps
131
         * we'll have to discriminate between these.
132
         * If it _looks_ like short stamps with wrapping around we'll
133
         * asume it are. This will generate a small error if it really
134
         * was a (very large) delta from full ftape_timestamps.
135
         */
136
        return (t1 <= t0 && t0 <= LATCH) ? t1 + LATCH - t0 : t1 - t0;
137
#endif
138
}
139
 
140
static unsigned int usecs(unsigned int count)
141
{
142
#if defined(__alpha__) || defined(__x86_64__)
143
        return (ps_per_cycle * count) / 1000000UL;
144
#elif defined(__i386__)
145
        return (10000 * count) / ((CLOCK_TICK_RATE + 50) / 100);
146
#endif
147
}
148
 
149
unsigned int ftape_timediff(unsigned int t0, unsigned int t1)
150
{
151
        /*
152
         *  Calculate difference in usec for ftape_timestamp results t0 & t1.
153
         *  Note that on the i386 platform with short time-stamps, the
154
         *  maximum allowed timespan is 1/HZ or we'll lose ticks!
155
         */
156
        return usecs(diff(t0, t1));
157
}
158
 
159
/*      To get an indication of the I/O performance,
160
 *      measure the duration of the inb() function.
161
 */
162
static void time_inb(void)
163
{
164
        int i;
165
        int t0, t1;
166
        unsigned long flags;
167
        int status;
168
        TRACE_FUN(ft_t_any);
169
 
170
        save_flags(flags);
171
        cli();
172
        t0 = short_ftape_timestamp();
173
        for (i = 0; i < 1000; ++i)
174
                status = inb(fdc.msr);
175
        t1 = short_ftape_timestamp();
176
        restore_flags(flags);
177
 
178
        TRACE(ft_t_info, "inb() duration: %d nsec", ftape_timediff(t0, t1));
179
        TRACE_EXIT;
180
}
181
 
182
static void init_clock(void)
183
{
184
        TRACE_FUN(ft_t_any);
185
 
186
#if defined(__x86_64__)
187
        ps_per_cycle = 1000000000UL / cpu_khz;
188
#elif defined(__alpha__)
189
        extern struct hwrpb_struct *hwrpb;
190
                ps_per_cycle = (1000*1000*1000*1000UL) / hwrpb->cycle_freq;
191
#endif
192
        TRACE_EXIT;
193
}
194
 
195
/*
196
 *      Input:  function taking int count as parameter.
197
 *              pointers to calculated calibration variables.
198
 */
199
void ftape_calibrate(char *name,
200
                    void (*fun) (unsigned int),
201
                    unsigned int *calibr_count,
202
                    unsigned int *calibr_time)
203
{
204
        static int first_time = 1;
205
        int i;
206
        unsigned int tc = 0;
207
        unsigned int count;
208
        unsigned int time;
209
#if defined(__i386__)
210
        unsigned int old_tc = 0;
211
        unsigned int old_count = 1;
212
        unsigned int old_time = 1;
213
#endif
214
        TRACE_FUN(ft_t_flow);
215
 
216
        if (first_time) {             /* get idea of I/O performance */
217
                init_clock();
218
                time_inb();
219
                first_time = 0;
220
        }
221
        /*    value of timeout must be set so that on very slow systems
222
         *    it will give a time less than one jiffy, and on
223
         *    very fast systems it'll give reasonable precision.
224
         */
225
 
226
        count = 40;
227
        for (i = 0; i < 15; ++i) {
228
                unsigned int t0;
229
                unsigned int t1;
230
                unsigned int once;
231
                unsigned int multiple;
232
                unsigned long flags;
233
 
234
                *calibr_count =
235
                *calibr_time = count;   /* set TC to 1 */
236
                save_flags(flags);
237
                cli();
238
                fun(0);          /* dummy, get code into cache */
239
                t0 = short_ftape_timestamp();
240
                fun(0);          /* overhead + one test */
241
                t1 = short_ftape_timestamp();
242
                once = diff(t0, t1);
243
                t0 = short_ftape_timestamp();
244
                fun(count);             /* overhead + count tests */
245
                t1 = short_ftape_timestamp();
246
                multiple = diff(t0, t1);
247
                restore_flags(flags);
248
                time = ftape_timediff(0, multiple - once);
249
                tc = (1000 * time) / (count - 1);
250
                TRACE(ft_t_any, "once:%3d us,%6d times:%6d us, TC:%5d ns",
251
                        usecs(once), count - 1, usecs(multiple), tc);
252
#if defined(__alpha__) || defined(__x86_64__)
253
                /*
254
                 * Increase the calibration count exponentially until the
255
                 * calibration time exceeds 100 ms.
256
                 */
257
                if (time >= 100*1000)
258
                        break;
259
#elif defined(__i386__)
260
                /*
261
                 * increase the count until the resulting time nears 2/HZ,
262
                 * then the tc will drop sharply because we lose LATCH counts.
263
                 */
264
                if (tc <= old_tc / 2) {
265
                        time = old_time;
266
                        count = old_count;
267
                        break;
268
                }
269
                old_tc = tc;
270
                old_count = count;
271
                old_time = time;
272
#endif
273
                count *= 2;
274
        }
275
        *calibr_count = count - 1;
276
        *calibr_time  = time;
277
        TRACE(ft_t_info, "TC for `%s()' = %d nsec (at %d counts)",
278
             name, (1000 * *calibr_time) / *calibr_count, *calibr_count);
279
        TRACE_EXIT;
280
}

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.