OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [libgo/] [go/] [go/] [scanner/] [scanner_test.go] - Blame information for rev 774

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 747 jeremybenn
// Copyright 2009 The Go Authors. All rights reserved.
2
// Use of this source code is governed by a BSD-style
3
// license that can be found in the LICENSE file.
4
 
5
package scanner
6
 
7
import (
8
        "go/token"
9
        "os"
10
        "path/filepath"
11
        "runtime"
12
        "testing"
13
)
14
 
15
var fset = token.NewFileSet()
16
 
17
const /* class */ (
18
        special = iota
19
        literal
20
        operator
21
        keyword
22
)
23
 
24
func tokenclass(tok token.Token) int {
25
        switch {
26
        case tok.IsLiteral():
27
                return literal
28
        case tok.IsOperator():
29
                return operator
30
        case tok.IsKeyword():
31
                return keyword
32
        }
33
        return special
34
}
35
 
36
type elt struct {
37
        tok   token.Token
38
        lit   string
39
        class int
40
}
41
 
42
var tokens = [...]elt{
43
        // Special tokens
44
        {token.COMMENT, "/* a comment */", special},
45
        {token.COMMENT, "// a comment \n", special},
46
 
47
        // Identifiers and basic type literals
48
        {token.IDENT, "foobar", literal},
49
        {token.IDENT, "a۰۱۸", literal},
50
        {token.IDENT, "foo६४", literal},
51
        {token.IDENT, "bar9876", literal},
52
        {token.INT, "0", literal},
53
        {token.INT, "1", literal},
54
        {token.INT, "123456789012345678890", literal},
55
        {token.INT, "01234567", literal},
56
        {token.INT, "0xcafebabe", literal},
57
        {token.FLOAT, "0.", literal},
58
        {token.FLOAT, ".0", literal},
59
        {token.FLOAT, "3.14159265", literal},
60
        {token.FLOAT, "1e0", literal},
61
        {token.FLOAT, "1e+100", literal},
62
        {token.FLOAT, "1e-100", literal},
63
        {token.FLOAT, "2.71828e-1000", literal},
64
        {token.IMAG, "0i", literal},
65
        {token.IMAG, "1i", literal},
66
        {token.IMAG, "012345678901234567889i", literal},
67
        {token.IMAG, "123456789012345678890i", literal},
68
        {token.IMAG, "0.i", literal},
69
        {token.IMAG, ".0i", literal},
70
        {token.IMAG, "3.14159265i", literal},
71
        {token.IMAG, "1e0i", literal},
72
        {token.IMAG, "1e+100i", literal},
73
        {token.IMAG, "1e-100i", literal},
74
        {token.IMAG, "2.71828e-1000i", literal},
75
        {token.CHAR, "'a'", literal},
76
        {token.CHAR, "'\\000'", literal},
77
        {token.CHAR, "'\\xFF'", literal},
78
        {token.CHAR, "'\\uff16'", literal},
79
        {token.CHAR, "'\\U0000ff16'", literal},
80
        {token.STRING, "`foobar`", literal},
81
        {token.STRING, "`" + `foo
82
                                bar` +
83
                "`",
84
                literal,
85
        },
86
        {token.STRING, "`\r`", literal},
87
        {token.STRING, "`foo\r\nbar`", literal},
88
 
89
        // Operators and delimiters
90
        {token.ADD, "+", operator},
91
        {token.SUB, "-", operator},
92
        {token.MUL, "*", operator},
93
        {token.QUO, "/", operator},
94
        {token.REM, "%", operator},
95
 
96
        {token.AND, "&", operator},
97
        {token.OR, "|", operator},
98
        {token.XOR, "^", operator},
99
        {token.SHL, "<<", operator},
100
        {token.SHR, ">>", operator},
101
        {token.AND_NOT, "&^", operator},
102
 
103
        {token.ADD_ASSIGN, "+=", operator},
104
        {token.SUB_ASSIGN, "-=", operator},
105
        {token.MUL_ASSIGN, "*=", operator},
106
        {token.QUO_ASSIGN, "/=", operator},
107
        {token.REM_ASSIGN, "%=", operator},
108
 
109
        {token.AND_ASSIGN, "&=", operator},
110
        {token.OR_ASSIGN, "|=", operator},
111
        {token.XOR_ASSIGN, "^=", operator},
112
        {token.SHL_ASSIGN, "<<=", operator},
113
        {token.SHR_ASSIGN, ">>=", operator},
114
        {token.AND_NOT_ASSIGN, "&^=", operator},
115
 
116
        {token.LAND, "&&", operator},
117
        {token.LOR, "||", operator},
118
        {token.ARROW, "<-", operator},
119
        {token.INC, "++", operator},
120
        {token.DEC, "--", operator},
121
 
122
        {token.EQL, "==", operator},
123
        {token.LSS, "<", operator},
124
        {token.GTR, ">", operator},
125
        {token.ASSIGN, "=", operator},
126
        {token.NOT, "!", operator},
127
 
128
        {token.NEQ, "!=", operator},
129
        {token.LEQ, "<=", operator},
130
        {token.GEQ, ">=", operator},
131
        {token.DEFINE, ":=", operator},
132
        {token.ELLIPSIS, "...", operator},
133
 
134
        {token.LPAREN, "(", operator},
135
        {token.LBRACK, "[", operator},
136
        {token.LBRACE, "{", operator},
137
        {token.COMMA, ",", operator},
138
        {token.PERIOD, ".", operator},
139
 
140
        {token.RPAREN, ")", operator},
141
        {token.RBRACK, "]", operator},
142
        {token.RBRACE, "}", operator},
143
        {token.SEMICOLON, ";", operator},
144
        {token.COLON, ":", operator},
145
 
146
        // Keywords
147
        {token.BREAK, "break", keyword},
148
        {token.CASE, "case", keyword},
149
        {token.CHAN, "chan", keyword},
150
        {token.CONST, "const", keyword},
151
        {token.CONTINUE, "continue", keyword},
152
 
153
        {token.DEFAULT, "default", keyword},
154
        {token.DEFER, "defer", keyword},
155
        {token.ELSE, "else", keyword},
156
        {token.FALLTHROUGH, "fallthrough", keyword},
157
        {token.FOR, "for", keyword},
158
 
159
        {token.FUNC, "func", keyword},
160
        {token.GO, "go", keyword},
161
        {token.GOTO, "goto", keyword},
162
        {token.IF, "if", keyword},
163
        {token.IMPORT, "import", keyword},
164
 
165
        {token.INTERFACE, "interface", keyword},
166
        {token.MAP, "map", keyword},
167
        {token.PACKAGE, "package", keyword},
168
        {token.RANGE, "range", keyword},
169
        {token.RETURN, "return", keyword},
170
 
171
        {token.SELECT, "select", keyword},
172
        {token.STRUCT, "struct", keyword},
173
        {token.SWITCH, "switch", keyword},
174
        {token.TYPE, "type", keyword},
175
        {token.VAR, "var", keyword},
176
}
177
 
178
const whitespace = "  \t  \n\n\n" // to separate tokens
179
 
180
var source = func() []byte {
181
        var src []byte
182
        for _, t := range tokens {
183
                src = append(src, t.lit...)
184
                src = append(src, whitespace...)
185
        }
186
        return src
187
}()
188
 
189
type testErrorHandler struct {
190
        t *testing.T
191
}
192
 
193
func (h *testErrorHandler) Error(pos token.Position, msg string) {
194
        h.t.Errorf("Error() called (msg = %s)", msg)
195
}
196
 
197
func newlineCount(s string) int {
198
        n := 0
199
        for i := 0; i < len(s); i++ {
200
                if s[i] == '\n' {
201
                        n++
202
                }
203
        }
204
        return n
205
}
206
 
207
func checkPos(t *testing.T, lit string, p token.Pos, expected token.Position) {
208
        pos := fset.Position(p)
209
        if pos.Filename != expected.Filename {
210
                t.Errorf("bad filename for %q: got %s, expected %s", lit, pos.Filename, expected.Filename)
211
        }
212
        if pos.Offset != expected.Offset {
213
                t.Errorf("bad position for %q: got %d, expected %d", lit, pos.Offset, expected.Offset)
214
        }
215
        if pos.Line != expected.Line {
216
                t.Errorf("bad line for %q: got %d, expected %d", lit, pos.Line, expected.Line)
217
        }
218
        if pos.Column != expected.Column {
219
                t.Errorf("bad column for %q: got %d, expected %d", lit, pos.Column, expected.Column)
220
        }
221
}
222
 
223
// Verify that calling Scan() provides the correct results.
224
func TestScan(t *testing.T) {
225
        // make source
226
        src_linecount := newlineCount(string(source))
227
        whitespace_linecount := newlineCount(whitespace)
228
 
229
        // verify scan
230
        var s Scanner
231
        s.Init(fset.AddFile("", fset.Base(), len(source)), source, &testErrorHandler{t}, ScanComments|dontInsertSemis)
232
        index := 0
233
        // epos is the expected position
234
        epos := token.Position{
235
                Filename: "",
236
                Offset:   0,
237
                Line:     1,
238
                Column:   1,
239
        }
240
        for {
241
                pos, tok, lit := s.Scan()
242
                if lit == "" {
243
                        // no literal value for non-literal tokens
244
                        lit = tok.String()
245
                }
246
                e := elt{token.EOF, "", special}
247
                if index < len(tokens) {
248
                        e = tokens[index]
249
                }
250
                if tok == token.EOF {
251
                        lit = ""
252
                        epos.Line = src_linecount
253
                        epos.Column = 2
254
                }
255
                checkPos(t, lit, pos, epos)
256
                if tok != e.tok {
257
                        t.Errorf("bad token for %q: got %s, expected %s", lit, tok, e.tok)
258
                }
259
                if e.tok.IsLiteral() {
260
                        // no CRs in raw string literals
261
                        elit := e.lit
262
                        if elit[0] == '`' {
263
                                elit = string(stripCR([]byte(elit)))
264
                                epos.Offset += len(e.lit) - len(lit) // correct position
265
                        }
266
                        if lit != elit {
267
                                t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, elit)
268
                        }
269
                }
270
                if tokenclass(tok) != e.class {
271
                        t.Errorf("bad class for %q: got %d, expected %d", lit, tokenclass(tok), e.class)
272
                }
273
                epos.Offset += len(lit) + len(whitespace)
274
                epos.Line += newlineCount(lit) + whitespace_linecount
275
                if tok == token.COMMENT && lit[1] == '/' {
276
                        // correct for unaccounted '/n' in //-style comment
277
                        epos.Offset++
278
                        epos.Line++
279
                }
280
                index++
281
                if tok == token.EOF {
282
                        break
283
                }
284
        }
285
        if s.ErrorCount != 0 {
286
                t.Errorf("found %d errors", s.ErrorCount)
287
        }
288
}
289
 
290
func checkSemi(t *testing.T, line string, mode Mode) {
291
        var S Scanner
292
        file := fset.AddFile("TestSemis", fset.Base(), len(line))
293
        S.Init(file, []byte(line), nil, mode)
294
        pos, tok, lit := S.Scan()
295
        for tok != token.EOF {
296
                if tok == token.ILLEGAL {
297
                        // the illegal token literal indicates what
298
                        // kind of semicolon literal to expect
299
                        semiLit := "\n"
300
                        if lit[0] == '#' {
301
                                semiLit = ";"
302
                        }
303
                        // next token must be a semicolon
304
                        semiPos := file.Position(pos)
305
                        semiPos.Offset++
306
                        semiPos.Column++
307
                        pos, tok, lit = S.Scan()
308
                        if tok == token.SEMICOLON {
309
                                if lit != semiLit {
310
                                        t.Errorf(`bad literal for %q: got %q, expected %q`, line, lit, semiLit)
311
                                }
312
                                checkPos(t, line, pos, semiPos)
313
                        } else {
314
                                t.Errorf("bad token for %q: got %s, expected ;", line, tok)
315
                        }
316
                } else if tok == token.SEMICOLON {
317
                        t.Errorf("bad token for %q: got ;, expected no ;", line)
318
                }
319
                pos, tok, lit = S.Scan()
320
        }
321
}
322
 
323
var lines = []string{
324
        // # indicates a semicolon present in the source
325
        // $ indicates an automatically inserted semicolon
326
        "",
327
        "#;",
328
        "foo$\n",
329
        "123$\n",
330
        "1.2$\n",
331
        "'x'$\n",
332
        `"x"` + "$\n",
333
        "`x`$\n",
334
 
335
        "+\n",
336
        "-\n",
337
        "*\n",
338
        "/\n",
339
        "%\n",
340
 
341
        "&\n",
342
        "|\n",
343
        "^\n",
344
        "<<\n",
345
        ">>\n",
346
        "&^\n",
347
 
348
        "+=\n",
349
        "-=\n",
350
        "*=\n",
351
        "/=\n",
352
        "%=\n",
353
 
354
        "&=\n",
355
        "|=\n",
356
        "^=\n",
357
        "<<=\n",
358
        ">>=\n",
359
        "&^=\n",
360
 
361
        "&&\n",
362
        "||\n",
363
        "<-\n",
364
        "++$\n",
365
        "--$\n",
366
 
367
        "==\n",
368
        "<\n",
369
        ">\n",
370
        "=\n",
371
        "!\n",
372
 
373
        "!=\n",
374
        "<=\n",
375
        ">=\n",
376
        ":=\n",
377
        "...\n",
378
 
379
        "(\n",
380
        "[\n",
381
        "{\n",
382
        ",\n",
383
        ".\n",
384
 
385
        ")$\n",
386
        "]$\n",
387
        "}$\n",
388
        "#;\n",
389
        ":\n",
390
 
391
        "break$\n",
392
        "case\n",
393
        "chan\n",
394
        "const\n",
395
        "continue$\n",
396
 
397
        "default\n",
398
        "defer\n",
399
        "else\n",
400
        "fallthrough$\n",
401
        "for\n",
402
 
403
        "func\n",
404
        "go\n",
405
        "goto\n",
406
        "if\n",
407
        "import\n",
408
 
409
        "interface\n",
410
        "map\n",
411
        "package\n",
412
        "range\n",
413
        "return$\n",
414
 
415
        "select\n",
416
        "struct\n",
417
        "switch\n",
418
        "type\n",
419
        "var\n",
420
 
421
        "foo$//comment\n",
422
        "foo$//comment",
423
        "foo$/*comment*/\n",
424
        "foo$/*\n*/",
425
        "foo$/*comment*/    \n",
426
        "foo$/*\n*/    ",
427
 
428
        "foo    $// comment\n",
429
        "foo    $// comment",
430
        "foo    $/*comment*/\n",
431
        "foo    $/*\n*/",
432
        "foo    $/*  */ /* \n */ bar$/**/\n",
433
        "foo    $/*0*/ /*1*/ /*2*/\n",
434
 
435
        "foo    $/*comment*/    \n",
436
        "foo    $/*0*/ /*1*/ /*2*/    \n",
437
        "foo    $/**/ /*-------------*/       /*----\n*/bar       $/*  \n*/baa$\n",
438
        "foo    $/* an EOF terminates a line */",
439
        "foo    $/* an EOF terminates a line */ /*",
440
        "foo    $/* an EOF terminates a line */ //",
441
 
442
        "package main$\n\nfunc main() {\n\tif {\n\t\treturn /* */ }$\n}$\n",
443
        "package main$",
444
}
445
 
446
func TestSemis(t *testing.T) {
447
        for _, line := range lines {
448
                checkSemi(t, line, 0)
449
                checkSemi(t, line, ScanComments)
450
 
451
                // if the input ended in newlines, the input must tokenize the
452
                // same with or without those newlines
453
                for i := len(line) - 1; i >= 0 && line[i] == '\n'; i-- {
454
                        checkSemi(t, line[0:i], 0)
455
                        checkSemi(t, line[0:i], ScanComments)
456
                }
457
        }
458
}
459
 
460
type segment struct {
461
        srcline  string // a line of source text
462
        filename string // filename for current token
463
        line     int    // line number for current token
464
}
465
 
466
var segments = []segment{
467
        // exactly one token per line since the test consumes one token per segment
468
        {"  line1", filepath.Join("dir", "TestLineComments"), 1},
469
        {"\nline2", filepath.Join("dir", "TestLineComments"), 2},
470
        {"\nline3  //line File1.go:100", filepath.Join("dir", "TestLineComments"), 3}, // bad line comment, ignored
471
        {"\nline4", filepath.Join("dir", "TestLineComments"), 4},
472
        {"\n//line File1.go:100\n  line100", filepath.Join("dir", "File1.go"), 100},
473
        {"\n//line File2.go:200\n  line200", filepath.Join("dir", "File2.go"), 200},
474
        {"\n//line :1\n  line1", "dir", 1},
475
        {"\n//line foo:42\n  line42", filepath.Join("dir", "foo"), 42},
476
        {"\n //line foo:42\n  line44", filepath.Join("dir", "foo"), 44},           // bad line comment, ignored
477
        {"\n//line foo 42\n  line46", filepath.Join("dir", "foo"), 46},            // bad line comment, ignored
478
        {"\n//line foo:42 extra text\n  line48", filepath.Join("dir", "foo"), 48}, // bad line comment, ignored
479
        {"\n//line ./foo:42\n  line42", filepath.Join("dir", "foo"), 42},
480
        {"\n//line a/b/c/File1.go:100\n  line100", filepath.Join("dir", "a", "b", "c", "File1.go"), 100},
481
}
482
 
483
var unixsegments = []segment{
484
        {"\n//line /bar:42\n  line42", "/bar", 42},
485
}
486
 
487
var winsegments = []segment{
488
        {"\n//line c:\\bar:42\n  line42", "c:\\bar", 42},
489
        {"\n//line c:\\dir\\File1.go:100\n  line100", "c:\\dir\\File1.go", 100},
490
}
491
 
492
// Verify that comments of the form "//line filename:line" are interpreted correctly.
493
func TestLineComments(t *testing.T) {
494
        segs := segments
495
        if runtime.GOOS == "windows" {
496
                segs = append(segs, winsegments...)
497
        } else {
498
                segs = append(segs, unixsegments...)
499
        }
500
 
501
        // make source
502
        var src string
503
        for _, e := range segs {
504
                src += e.srcline
505
        }
506
 
507
        // verify scan
508
        var S Scanner
509
        file := fset.AddFile(filepath.Join("dir", "TestLineComments"), fset.Base(), len(src))
510
        S.Init(file, []byte(src), nil, dontInsertSemis)
511
        for _, s := range segs {
512
                p, _, lit := S.Scan()
513
                pos := file.Position(p)
514
                checkPos(t, lit, p, token.Position{
515
                        Filename: s.filename,
516
                        Offset:   pos.Offset,
517
                        Line:     s.line,
518
                        Column:   pos.Column,
519
                })
520
        }
521
 
522
        if S.ErrorCount != 0 {
523
                t.Errorf("found %d errors", S.ErrorCount)
524
        }
525
}
526
 
527
// Verify that initializing the same scanner more then once works correctly.
528
func TestInit(t *testing.T) {
529
        var s Scanner
530
 
531
        // 1st init
532
        src1 := "if true { }"
533
        f1 := fset.AddFile("src1", fset.Base(), len(src1))
534
        s.Init(f1, []byte(src1), nil, dontInsertSemis)
535
        if f1.Size() != len(src1) {
536
                t.Errorf("bad file size: got %d, expected %d", f1.Size(), len(src1))
537
        }
538
        s.Scan()              // if
539
        s.Scan()              // true
540
        _, tok, _ := s.Scan() // {
541
        if tok != token.LBRACE {
542
                t.Errorf("bad token: got %s, expected %s", tok, token.LBRACE)
543
        }
544
 
545
        // 2nd init
546
        src2 := "go true { ]"
547
        f2 := fset.AddFile("src2", fset.Base(), len(src2))
548
        s.Init(f2, []byte(src2), nil, dontInsertSemis)
549
        if f2.Size() != len(src2) {
550
                t.Errorf("bad file size: got %d, expected %d", f2.Size(), len(src2))
551
        }
552
        _, tok, _ = s.Scan() // go
553
        if tok != token.GO {
554
                t.Errorf("bad token: got %s, expected %s", tok, token.GO)
555
        }
556
 
557
        if s.ErrorCount != 0 {
558
                t.Errorf("found %d errors", s.ErrorCount)
559
        }
560
}
561
 
562
func TestStdErrorHander(t *testing.T) {
563
        const src = "@\n" + // illegal character, cause an error
564
                "@ @\n" + // two errors on the same line
565
                "//line File2:20\n" +
566
                "@\n" + // different file, but same line
567
                "//line File2:1\n" +
568
                "@ @\n" + // same file, decreasing line number
569
                "//line File1:1\n" +
570
                "@ @ @" // original file, line 1 again
571
 
572
        v := new(ErrorVector)
573
        var s Scanner
574
        s.Init(fset.AddFile("File1", fset.Base(), len(src)), []byte(src), v, dontInsertSemis)
575
        for {
576
                if _, tok, _ := s.Scan(); tok == token.EOF {
577
                        break
578
                }
579
        }
580
 
581
        list := v.GetErrorList(Raw)
582
        if len(list) != 9 {
583
                t.Errorf("found %d raw errors, expected 9", len(list))
584
                PrintError(os.Stderr, list)
585
        }
586
 
587
        list = v.GetErrorList(Sorted)
588
        if len(list) != 9 {
589
                t.Errorf("found %d sorted errors, expected 9", len(list))
590
                PrintError(os.Stderr, list)
591
        }
592
 
593
        list = v.GetErrorList(NoMultiples)
594
        if len(list) != 4 {
595
                t.Errorf("found %d one-per-line errors, expected 4", len(list))
596
                PrintError(os.Stderr, list)
597
        }
598
 
599
        if v.ErrorCount() != s.ErrorCount {
600
                t.Errorf("found %d errors, expected %d", v.ErrorCount(), s.ErrorCount)
601
        }
602
}
603
 
604
type errorCollector struct {
605
        cnt int            // number of errors encountered
606
        msg string         // last error message encountered
607
        pos token.Position // last error position encountered
608
}
609
 
610
func (h *errorCollector) Error(pos token.Position, msg string) {
611
        h.cnt++
612
        h.msg = msg
613
        h.pos = pos
614
}
615
 
616
func checkError(t *testing.T, src string, tok token.Token, pos int, err string) {
617
        var s Scanner
618
        var h errorCollector
619
        s.Init(fset.AddFile("", fset.Base(), len(src)), []byte(src), &h, ScanComments|dontInsertSemis)
620
        _, tok0, _ := s.Scan()
621
        _, tok1, _ := s.Scan()
622
        if tok0 != tok {
623
                t.Errorf("%q: got %s, expected %s", src, tok0, tok)
624
        }
625
        if tok1 != token.EOF {
626
                t.Errorf("%q: got %s, expected EOF", src, tok1)
627
        }
628
        cnt := 0
629
        if err != "" {
630
                cnt = 1
631
        }
632
        if h.cnt != cnt {
633
                t.Errorf("%q: got cnt %d, expected %d", src, h.cnt, cnt)
634
        }
635
        if h.msg != err {
636
                t.Errorf("%q: got msg %q, expected %q", src, h.msg, err)
637
        }
638
        if h.pos.Offset != pos {
639
                t.Errorf("%q: got offset %d, expected %d", src, h.pos.Offset, pos)
640
        }
641
}
642
 
643
var errors = []struct {
644
        src string
645
        tok token.Token
646
        pos int
647
        err string
648
}{
649
        {"\a", token.ILLEGAL, 0, "illegal character U+0007"},
650
        {`#`, token.ILLEGAL, 0, "illegal character U+0023 '#'"},
651
        {`…`, token.ILLEGAL, 0, "illegal character U+2026 '…'"},
652
        {`' '`, token.CHAR, 0, ""},
653
        {`''`, token.CHAR, 0, "illegal character literal"},
654
        {`'\8'`, token.CHAR, 2, "unknown escape sequence"},
655
        {`'\08'`, token.CHAR, 3, "illegal character in escape sequence"},
656
        {`'\x0g'`, token.CHAR, 4, "illegal character in escape sequence"},
657
        {`'\Uffffffff'`, token.CHAR, 2, "escape sequence is invalid Unicode code point"},
658
        {`'`, token.CHAR, 0, "character literal not terminated"},
659
        {`""`, token.STRING, 0, ""},
660
        {`"`, token.STRING, 0, "string not terminated"},
661
        {"``", token.STRING, 0, ""},
662
        {"`", token.STRING, 0, "string not terminated"},
663
        {"/**/", token.COMMENT, 0, ""},
664
        {"/*", token.COMMENT, 0, "comment not terminated"},
665
        {"077", token.INT, 0, ""},
666
        {"078.", token.FLOAT, 0, ""},
667
        {"07801234567.", token.FLOAT, 0, ""},
668
        {"078e0", token.FLOAT, 0, ""},
669
        {"078", token.INT, 0, "illegal octal number"},
670
        {"07800000009", token.INT, 0, "illegal octal number"},
671
        {"0x", token.INT, 0, "illegal hexadecimal number"},
672
        {"0X", token.INT, 0, "illegal hexadecimal number"},
673
        {"\"abc\x00def\"", token.STRING, 4, "illegal character NUL"},
674
        {"\"abc\x80def\"", token.STRING, 4, "illegal UTF-8 encoding"},
675
}
676
 
677
func TestScanErrors(t *testing.T) {
678
        for _, e := range errors {
679
                checkError(t, e.src, e.tok, e.pos, e.err)
680
        }
681
}
682
 
683
func BenchmarkScan(b *testing.B) {
684
        b.StopTimer()
685
        fset := token.NewFileSet()
686
        file := fset.AddFile("", fset.Base(), len(source))
687
        var s Scanner
688
        b.StartTimer()
689
        for i := b.N - 1; i >= 0; i-- {
690
                s.Init(file, source, nil, ScanComments)
691
                for {
692
                        _, tok, _ := s.Scan()
693
                        if tok == token.EOF {
694
                                break
695
                        }
696
                }
697
        }
698
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.