OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [libgo/] [go/] [testing/] [benchmark.go] - Blame information for rev 747

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 747 jeremybenn
// Copyright 2009 The Go Authors. All rights reserved.
2
// Use of this source code is governed by a BSD-style
3
// license that can be found in the LICENSE file.
4
 
5
package testing
6
 
7
import (
8
        "flag"
9
        "fmt"
10
        "os"
11
        "runtime"
12
        "time"
13
)
14
 
15
var matchBenchmarks = flag.String("test.bench", "", "regular expression to select benchmarks to run")
16
var benchTime = flag.Float64("test.benchtime", 1, "approximate run time for each benchmark, in seconds")
17
 
18
// An internal type but exported because it is cross-package; part of the implementation
19
// of gotest.
20
type InternalBenchmark struct {
21
        Name string
22
        F    func(b *B)
23
}
24
 
25
// B is a type passed to Benchmark functions to manage benchmark
26
// timing and to specify the number of iterations to run.
27
type B struct {
28
        common
29
        N         int
30
        benchmark InternalBenchmark
31
        bytes     int64
32
        timerOn   bool
33
        result    BenchmarkResult
34
}
35
 
36
// StartTimer starts timing a test.  This function is called automatically
37
// before a benchmark starts, but it can also used to resume timing after
38
// a call to StopTimer.
39
func (b *B) StartTimer() {
40
        if !b.timerOn {
41
                b.start = time.Now()
42
                b.timerOn = true
43
        }
44
}
45
 
46
// StopTimer stops timing a test.  This can be used to pause the timer
47
// while performing complex initialization that you don't
48
// want to measure.
49
func (b *B) StopTimer() {
50
        if b.timerOn {
51
                b.duration += time.Now().Sub(b.start)
52
                b.timerOn = false
53
        }
54
}
55
 
56
// ResetTimer sets the elapsed benchmark time to zero.
57
// It does not affect whether the timer is running.
58
func (b *B) ResetTimer() {
59
        if b.timerOn {
60
                b.start = time.Now()
61
        }
62
        b.duration = 0
63
}
64
 
65
// SetBytes records the number of bytes processed in a single operation.
66
// If this is called, the benchmark will report ns/op and MB/s.
67
func (b *B) SetBytes(n int64) { b.bytes = n }
68
 
69
func (b *B) nsPerOp() int64 {
70
        if b.N <= 0 {
71
                return 0
72
        }
73
        return b.duration.Nanoseconds() / int64(b.N)
74
}
75
 
76
// runN runs a single benchmark for the specified number of iterations.
77
func (b *B) runN(n int) {
78
        // Try to get a comparable environment for each run
79
        // by clearing garbage from previous runs.
80
        runtime.GC()
81
        b.N = n
82
        b.ResetTimer()
83
        b.StartTimer()
84
        b.benchmark.F(b)
85
        b.StopTimer()
86
}
87
 
88
func min(x, y int) int {
89
        if x > y {
90
                return y
91
        }
92
        return x
93
}
94
 
95
func max(x, y int) int {
96
        if x < y {
97
                return y
98
        }
99
        return x
100
}
101
 
102
// roundDown10 rounds a number down to the nearest power of 10.
103
func roundDown10(n int) int {
104
        var tens = 0
105
        // tens = floor(log_10(n))
106
        for n > 10 {
107
                n = n / 10
108
                tens++
109
        }
110
        // result = 10^tens
111
        result := 1
112
        for i := 0; i < tens; i++ {
113
                result *= 10
114
        }
115
        return result
116
}
117
 
118
// roundUp rounds x up to a number of the form [1eX, 2eX, 5eX].
119
func roundUp(n int) int {
120
        base := roundDown10(n)
121
        if n < (2 * base) {
122
                return 2 * base
123
        }
124
        if n < (5 * base) {
125
                return 5 * base
126
        }
127
        return 10 * base
128
}
129
 
130
// run times the benchmark function in a separate goroutine.
131
func (b *B) run() BenchmarkResult {
132
        go b.launch()
133
        <-b.signal
134
        return b.result
135
}
136
 
137
// launch launches the benchmark function.  It gradually increases the number
138
// of benchmark iterations until the benchmark runs for a second in order
139
// to get a reasonable measurement.  It prints timing information in this form
140
//              testing.BenchmarkHello  100000          19 ns/op
141
// launch is run by the fun function as a separate goroutine.
142
func (b *B) launch() {
143
        // Run the benchmark for a single iteration in case it's expensive.
144
        n := 1
145
 
146
        // Signal that we're done whether we return normally
147
        // or by FailNow's runtime.Goexit.
148
        defer func() {
149
                b.signal <- b
150
        }()
151
 
152
        b.runN(n)
153
        // Run the benchmark for at least the specified amount of time.
154
        d := time.Duration(*benchTime * float64(time.Second))
155
        for !b.failed && b.duration < d && n < 1e9 {
156
                last := n
157
                // Predict iterations/sec.
158
                if b.nsPerOp() == 0 {
159
                        n = 1e9
160
                } else {
161
                        n = int(d.Nanoseconds() / b.nsPerOp())
162
                }
163
                // Run more iterations than we think we'll need for a second (1.5x).
164
                // Don't grow too fast in case we had timing errors previously.
165
                // Be sure to run at least one more than last time.
166
                n = max(min(n+n/2, 100*last), last+1)
167
                // Round up to something easy to read.
168
                n = roundUp(n)
169
                b.runN(n)
170
        }
171
        b.result = BenchmarkResult{b.N, b.duration, b.bytes}
172
}
173
 
174
// The results of a benchmark run.
175
type BenchmarkResult struct {
176
        N     int           // The number of iterations.
177
        T     time.Duration // The total time taken.
178
        Bytes int64         // Bytes processed in one iteration.
179
}
180
 
181
func (r BenchmarkResult) NsPerOp() int64 {
182
        if r.N <= 0 {
183
                return 0
184
        }
185
        return r.T.Nanoseconds() / int64(r.N)
186
}
187
 
188
func (r BenchmarkResult) mbPerSec() float64 {
189
        if r.Bytes <= 0 || r.T <= 0 || r.N <= 0 {
190
                return 0
191
        }
192
        return (float64(r.Bytes) * float64(r.N) / 1e6) / r.T.Seconds()
193
}
194
 
195
func (r BenchmarkResult) String() string {
196
        mbs := r.mbPerSec()
197
        mb := ""
198
        if mbs != 0 {
199
                mb = fmt.Sprintf("\t%7.2f MB/s", mbs)
200
        }
201
        nsop := r.NsPerOp()
202
        ns := fmt.Sprintf("%10d ns/op", nsop)
203
        if r.N > 0 && nsop < 100 {
204
                // The format specifiers here make sure that
205
                // the ones digits line up for all three possible formats.
206
                if nsop < 10 {
207
                        ns = fmt.Sprintf("%13.2f ns/op", float64(r.T.Nanoseconds())/float64(r.N))
208
                } else {
209
                        ns = fmt.Sprintf("%12.1f ns/op", float64(r.T.Nanoseconds())/float64(r.N))
210
                }
211
        }
212
        return fmt.Sprintf("%8d\t%s%s", r.N, ns, mb)
213
}
214
 
215
// An internal function but exported because it is cross-package; part of the implementation
216
// of gotest.
217
func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark) {
218
        // If no flag was specified, don't run benchmarks.
219
        if len(*matchBenchmarks) == 0 {
220
                return
221
        }
222
        for _, Benchmark := range benchmarks {
223
                matched, err := matchString(*matchBenchmarks, Benchmark.Name)
224
                if err != nil {
225
                        fmt.Fprintf(os.Stderr, "testing: invalid regexp for -test.bench: %s\n", err)
226
                        os.Exit(1)
227
                }
228
                if !matched {
229
                        continue
230
                }
231
                for _, procs := range cpuList {
232
                        runtime.GOMAXPROCS(procs)
233
                        b := &B{
234
                                common: common{
235
                                        signal: make(chan interface{}),
236
                                },
237
                                benchmark: Benchmark,
238
                        }
239
                        benchName := Benchmark.Name
240
                        if procs != 1 {
241
                                benchName = fmt.Sprintf("%s-%d", Benchmark.Name, procs)
242
                        }
243
                        fmt.Printf("%s\t", benchName)
244
                        r := b.run()
245
                        if b.failed {
246
                                // The output could be very long here, but probably isn't.
247
                                // We print it all, regardless, because we don't want to trim the reason
248
                                // the benchmark failed.
249
                                fmt.Printf("--- FAIL: %s\n%s", benchName, b.output)
250
                                continue
251
                        }
252
                        fmt.Printf("%v\n", r)
253
                        // Unlike with tests, we ignore the -chatty flag and always print output for
254
                        // benchmarks since the output generation time will skew the results.
255
                        if len(b.output) > 0 {
256
                                b.trimOutput()
257
                                fmt.Printf("--- BENCH: %s\n%s", benchName, b.output)
258
                        }
259
                        if p := runtime.GOMAXPROCS(-1); p != procs {
260
                                fmt.Fprintf(os.Stderr, "testing: %s left GOMAXPROCS set to %d\n", benchName, p)
261
                        }
262
                }
263
        }
264
}
265
 
266
// trimOutput shortens the output from a benchmark, which can be very long.
267
func (b *B) trimOutput() {
268
        // The output is likely to appear multiple times because the benchmark
269
        // is run multiple times, but at least it will be seen. This is not a big deal
270
        // because benchmarks rarely print, but just in case, we trim it if it's too long.
271
        const maxNewlines = 10
272
        for nlCount, j := 0, 0; j < len(b.output); j++ {
273
                if b.output[j] == '\n' {
274
                        nlCount++
275
                        if nlCount >= maxNewlines {
276
                                b.output = append(b.output[:j], "\n\t... [output truncated]\n"...)
277
                                break
278
                        }
279
                }
280
        }
281
}
282
 
283
// Benchmark benchmarks a single function. Useful for creating
284
// custom benchmarks that do not use gotest.
285
func Benchmark(f func(b *B)) BenchmarkResult {
286
        b := &B{
287
                common: common{
288
                        signal: make(chan interface{}),
289
                },
290
                benchmark: InternalBenchmark{"", f},
291
        }
292
        return b.run()
293
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.