URL
https://opencores.org/ocsvn/openrisc/openrisc/trunk
Subversion Repositories openrisc
[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [libgo/] [go/] [go/] [printer/] [printer.go] - Rev 747
Compare with Previous | Blame | View Log
// Copyright 2009 The Go Authors. All rights reserved.// Use of this source code is governed by a BSD-style// license that can be found in the LICENSE file.// Package printer implements printing of AST nodes.package printerimport ("bytes""fmt""go/ast""go/token""io""os""strconv""strings""text/tabwriter")const (maxNewlines = 2 // max. number of newlines between source textdebug = false // enable for debugginginfinity = 1 << 30)type whiteSpace byteconst (ignore = whiteSpace(0)blank = whiteSpace(' ')vtab = whiteSpace('\v')newline = whiteSpace('\n')formfeed = whiteSpace('\f')indent = whiteSpace('>')unindent = whiteSpace('<'))// Use ignoreMultiLine if the multiLine information is not important.var ignoreMultiLine = new(bool)// A pmode value represents the current printer mode.type pmode intconst (noExtraLinebreak pmode = 1 << iota)type printer struct {// Configuration (does not change after initialization)Configfset *token.FileSet// Current stateoutput bytes.Buffer // raw printer resultindent int // current indentationmode pmode // current printer modeimpliedSemi bool // if set, a linebreak implies a semicolonlastTok token.Token // the last token printed (token.ILLEGAL if it's whitespace)wsbuf []whiteSpace // delayed white space// The (possibly estimated) position in the generated output;// in AST space (i.e., pos is set whenever a token position is// known accurately, and updated dependending on what has been// written).pos token.Position// The value of pos immediately after the last item has been// written using writeItem.last token.Position// The list of all source comments, in order of appearance.comments []*ast.CommentGroup // may be nilcindex int // current comment indexuseNodeComments bool // if not set, ignore lead and line comments of nodes// Information about p.comments[p.cindex]; set up by nextComment.comment *ast.CommentGroup // = p.comments[p.cindex]; or nilcommentOffset int // = p.posFor(p.comments[p.cindex].List[0].Pos()).Offset; or infinitycommentNewline bool // true if the comment group contains newlines// Cache of already computed node sizes.nodeSizes map[ast.Node]int// Cache of most recently computed line position.cachedPos token.PoscachedLine int // line corresponding to cachedPos}func (p *printer) init(cfg *Config, fset *token.FileSet, nodeSizes map[ast.Node]int) {p.Config = *cfgp.fset = fsetp.wsbuf = make([]whiteSpace, 0, 16) // whitespace sequences are shortp.nodeSizes = nodeSizesp.cachedPos = -1}// commentsHaveNewline reports whether a list of comments belonging to// an *ast.CommentGroup contains newlines. Because the position information// may only be partially correct, we also have to read the comment text.func (p *printer) commentsHaveNewline(list []*ast.Comment) bool {// len(list) > 0line := p.lineFor(list[0].Pos())for i, c := range list {if i > 0 && p.lineFor(list[i].Pos()) != line {// not all comments on the same linereturn true}if t := c.Text; len(t) >= 2 && (t[1] == '/' || strings.Contains(t, "\n")) {return true}}_ = linereturn false}func (p *printer) nextComment() {for p.cindex < len(p.comments) {c := p.comments[p.cindex]p.cindex++if list := c.List; len(list) > 0 {p.comment = cp.commentOffset = p.posFor(list[0].Pos()).Offsetp.commentNewline = p.commentsHaveNewline(list)return}// we should not reach here (correct ASTs don't have empty// ast.CommentGroup nodes), but be conservative and try again}// no more commentsp.commentOffset = infinity}func (p *printer) internalError(msg ...interface{}) {if debug {fmt.Print(p.pos.String() + ": ")fmt.Println(msg...)panic("go/printer")}}func (p *printer) posFor(pos token.Pos) token.Position {// not used frequently enough to cache entire token.Positionreturn p.fset.Position(pos)}func (p *printer) lineFor(pos token.Pos) int {if pos != p.cachedPos {p.cachedPos = posp.cachedLine = p.fset.Position(pos).Line}return p.cachedLine}// writeByte writes ch to p.output and updates p.pos.func (p *printer) writeByte(ch byte) {p.output.WriteByte(ch)p.pos.Offset++p.pos.Column++if ch == '\n' || ch == '\f' {// write indentation// use "hard" htabs - indentation columns// must not be discarded by the tabwriterconst htabs = "\t\t\t\t\t\t\t\t"j := p.indentfor j > len(htabs) {p.output.WriteString(htabs)j -= len(htabs)}p.output.WriteString(htabs[0:j])// update p.posp.pos.Line++p.pos.Offset += p.indentp.pos.Column = 1 + p.indent}}// writeByteN writes ch n times to p.output and updates p.pos.func (p *printer) writeByteN(ch byte, n int) {for n > 0 {p.writeByte(ch)n--}}// writeString writes the string s to p.output and updates p.pos.// If isLit is set, s is escaped w/ tabwriter.Escape characters// to protect s from being interpreted by the tabwriter.//// Note: writeString is only used to write Go tokens, literals, and// comments, all of which must be written literally. Thus, it is correct// to always set isLit = true. However, setting it explicitly only when// needed (i.e., when we don't know that s contains no tabs or line breaks)// avoids processing extra escape characters and reduces run time of the// printer benchmark by up to 10%.//func (p *printer) writeString(s string, isLit bool) {if isLit {// Protect s such that is passes through the tabwriter// unchanged. Note that valid Go programs cannot contain// tabwriter.Escape bytes since they do not appear in legal// UTF-8 sequences.p.output.WriteByte(tabwriter.Escape)}p.output.WriteString(s)// update p.posnlines := 0column := p.pos.Column + len(s)for i := 0; i < len(s); i++ {if s[i] == '\n' {nlines++column = len(s) - i}}p.pos.Offset += len(s)p.pos.Line += nlinesp.pos.Column = columnif isLit {p.output.WriteByte(tabwriter.Escape)}}// writeItem writes data at position pos. data is the text corresponding to// a single lexical token, but may also be comment text. pos is the actual// (or at least very accurately estimated) position of the data in the original// source text. writeItem updates p.last to the position immediately following// the data.//func (p *printer) writeItem(pos token.Position, data string, isLit bool) {if pos.IsValid() {// continue with previous position if we don't have a valid posif p.last.IsValid() && p.last.Filename != pos.Filename {// the file has changed - reset state// (used when printing merged ASTs of different files// e.g., the result of ast.MergePackageFiles)p.indent = 0p.mode = 0p.wsbuf = p.wsbuf[0:0]}p.pos = pos}if debug {// do not update p.pos - use write0fmt.Fprintf(&p.output, "/*%s*/", pos)}p.writeString(data, isLit)p.last = p.pos}const linePrefix = "//line "// writeCommentPrefix writes the whitespace before a comment.// If there is any pending whitespace, it consumes as much of// it as is likely to help position the comment nicely.// pos is the comment position, next the position of the item// after all pending comments, prev is the previous comment in// a group of comments (or nil), and isKeyword indicates if the// next item is a keyword.//func (p *printer) writeCommentPrefix(pos, next token.Position, prev, comment *ast.Comment, isKeyword bool) {if p.output.Len() == 0 {// the comment is the first item to be printed - don't write any whitespacereturn}if pos.IsValid() && pos.Filename != p.last.Filename {// comment in a different file - separate with newlinesp.writeByteN('\f', maxNewlines)return}if pos.Line == p.last.Line && (prev == nil || prev.Text[1] != '/') {// comment on the same line as last item:// separate with at least one separatorhasSep := falseif prev == nil {// first comment of a comment groupj := 0for i, ch := range p.wsbuf {switch ch {case blank:// ignore any blanks before a commentp.wsbuf[i] = ignorecontinuecase vtab:// respect existing tabs - important// for proper formatting of commented structshasSep = truecontinuecase indent:// apply pending indentationcontinue}j = ibreak}p.writeWhitespace(j)}// make sure there is at least one separatorif !hasSep {sep := byte('\t')if pos.Line == next.Line {// next item is on the same line as the comment// (which must be a /*-style comment): separate// with a blank instead of a tabsep = ' '}p.writeByte(sep)}} else {// comment on a different line:// separate with at least one line breakdroppedLinebreak := falseif prev == nil {// first comment of a comment groupj := 0for i, ch := range p.wsbuf {switch ch {case blank, vtab:// ignore any horizontal whitespace before line breaksp.wsbuf[i] = ignorecontinuecase indent:// apply pending indentationcontinuecase unindent:// if the next token is a keyword, apply the outdent// if it appears that the comment is aligned with the// keyword; otherwise assume the outdent is part of a// closing block and stop (this scenario appears with// comments before a case label where the comments// apply to the next case instead of the current one)if isKeyword && pos.Column == next.Column {continue}case newline, formfeed:// TODO(gri): may want to keep formfeed info in some casesp.wsbuf[i] = ignoredroppedLinebreak = true}j = ibreak}p.writeWhitespace(j)}// determine number of linebreaks before the commentn := 0if pos.IsValid() && p.last.IsValid() {n = pos.Line - p.last.Lineif n < 0 { // should never happenn = 0}}// at the package scope level only (p.indent == 0),// add an extra newline if we dropped one before:// this preserves a blank line before documentation// comments at the package scope level (issue 2570)if p.indent == 0 && droppedLinebreak {n++}// make sure there is at least one line break// if the previous comment was a line commentif n == 0 && prev != nil && prev.Text[1] == '/' {n = 1}if n > 0 {// turn off indent if we're about to print a line directiveindent := p.indentif strings.HasPrefix(comment.Text, linePrefix) {p.indent = 0}// use formfeeds to break columns before a comment;// this is analogous to using formfeeds to separate// individual lines of /*-style commentsp.writeByteN('\f', nlimit(n))p.indent = indent // restore indent}}}// Split comment text into lines// (using strings.Split(text, "\n") is significantly slower for// this specific purpose, as measured with: gotest -bench=Print)func split(text string) []string {// count lines (comment text never ends in a newline)n := 1for i := 0; i < len(text); i++ {if text[i] == '\n' {n++}}// splitlines := make([]string, n)n = 0i := 0for j := 0; j < len(text); j++ {if text[j] == '\n' {lines[n] = text[i:j] // exclude newlinei = j + 1 // discard newlinen++}}lines[n] = text[i:]return lines}// Returns true if s contains only white space// (only tabs and blanks can appear in the printer's context).func isBlank(s string) bool {for i := 0; i < len(s); i++ {if s[i] > ' ' {return false}}return true}func commonPrefix(a, b string) string {i := 0for i < len(a) && i < len(b) && a[i] == b[i] && (a[i] <= ' ' || a[i] == '*') {i++}return a[0:i]}func stripCommonPrefix(lines []string) {if len(lines) < 2 {return // at most one line - nothing to do}// len(lines) >= 2// The heuristic in this function tries to handle a few// common patterns of /*-style comments: Comments where// the opening /* and closing */ are aligned and the// rest of the comment text is aligned and indented with// blanks or tabs, cases with a vertical "line of stars"// on the left, and cases where the closing */ is on the// same line as the last comment text.// Compute maximum common white prefix of all but the first,// last, and blank lines, and replace blank lines with empty// lines (the first line starts with /* and has no prefix).// In case of two-line comments, consider the last line for// the prefix computation since otherwise the prefix would// be empty.//// Note that the first and last line are never empty (they// contain the opening /* and closing */ respectively) and// thus they can be ignored by the blank line check.var prefix stringif len(lines) > 2 {first := truefor i, line := range lines[1 : len(lines)-1] {switch {case isBlank(line):lines[1+i] = "" // range starts at line 1case first:prefix = commonPrefix(line, line)first = falsedefault:prefix = commonPrefix(prefix, line)}}} else { // len(lines) == 2, lines cannot be blank (contain /* and */)line := lines[1]prefix = commonPrefix(line, line)}/** Check for vertical "line of stars" and correct prefix accordingly.*/lineOfStars := falseif i := strings.Index(prefix, "*"); i >= 0 {// Line of stars present.if i > 0 && prefix[i-1] == ' ' {i-- // remove trailing blank from prefix so stars remain aligned}prefix = prefix[0:i]lineOfStars = true} else {// No line of stars present.// Determine the white space on the first line after the /*// and before the beginning of the comment text, assume two// blanks instead of the /* unless the first character after// the /* is a tab. If the first comment line is empty but// for the opening /*, assume up to 3 blanks or a tab. This// whitespace may be found as suffix in the common prefix.first := lines[0]if isBlank(first[2:]) {// no comment text on the first line:// reduce prefix by up to 3 blanks or a tab// if present - this keeps comment text indented// relative to the /* and */'s if it was indented// in the first placei := len(prefix)for n := 0; n < 3 && i > 0 && prefix[i-1] == ' '; n++ {i--}if i == len(prefix) && i > 0 && prefix[i-1] == '\t' {i--}prefix = prefix[0:i]} else {// comment text on the first linesuffix := make([]byte, len(first))n := 2 // start after opening /*for n < len(first) && first[n] <= ' ' {suffix[n] = first[n]n++}if n > 2 && suffix[2] == '\t' {// assume the '\t' compensates for the /*suffix = suffix[2:n]} else {// otherwise assume two blankssuffix[0], suffix[1] = ' ', ' 'suffix = suffix[0:n]}// Shorten the computed common prefix by the length of// suffix, if it is found as suffix of the prefix.if strings.HasSuffix(prefix, string(suffix)) {prefix = prefix[0 : len(prefix)-len(suffix)]}}}// Handle last line: If it only contains a closing */, align it// with the opening /*, otherwise align the text with the other// lines.last := lines[len(lines)-1]closing := "*/"i := strings.Index(last, closing) // i >= 0 (closing is always present)if isBlank(last[0:i]) {// last line only contains closing */if lineOfStars {closing = " */" // add blank to align final star}lines[len(lines)-1] = prefix + closing} else {// last line contains more comment text - assume// it is aligned like the other lines and include// in prefix computationprefix = commonPrefix(prefix, last)}// Remove the common prefix from all but the first and empty lines.for i, line := range lines[1:] {if len(line) != 0 {lines[1+i] = line[len(prefix):] // range starts at line 1}}}func (p *printer) writeComment(comment *ast.Comment) {text := comment.Textif strings.HasPrefix(text, linePrefix) {pos := strings.TrimSpace(text[len(linePrefix):])i := strings.LastIndex(pos, ":")if i >= 0 {// The line directive we are about to print changed// the Filename and Line number used by go/token// as it was reading the input originally.// In order to match the original input, we have to// update our own idea of the file and line number// accordingly, after printing the directive.file := pos[:i]line, _ := strconv.Atoi(pos[i+1:])defer func() {p.pos.Filename = filep.pos.Line = linep.pos.Column = 1}()}}// shortcut common case of //-style commentsif text[1] == '/' {p.writeItem(p.posFor(comment.Pos()), text, true)return}// for /*-style comments, print line by line and let the// write function take care of the proper indentationlines := split(text)stripCommonPrefix(lines)// write comment lines, separated by formfeed,// without a line break after the last linepos := p.posFor(comment.Pos())for i, line := range lines {if i > 0 {p.writeByte('\f')pos = p.pos}if len(line) > 0 {p.writeItem(pos, line, true)}}}// writeCommentSuffix writes a line break after a comment if indicated// and processes any leftover indentation information. If a line break// is needed, the kind of break (newline vs formfeed) depends on the// pending whitespace. The writeCommentSuffix result indicates if a// newline was written or if a formfeed was dropped from the whitespace// buffer.//func (p *printer) writeCommentSuffix(needsLinebreak bool) (wroteNewline, droppedFF bool) {for i, ch := range p.wsbuf {switch ch {case blank, vtab:// ignore trailing whitespacep.wsbuf[i] = ignorecase indent, unindent:// don't lose indentation informationcase newline, formfeed:// if we need a line break, keep exactly one// but remember if we dropped any formfeedsif needsLinebreak {needsLinebreak = falsewroteNewline = true} else {if ch == formfeed {droppedFF = true}p.wsbuf[i] = ignore}}}p.writeWhitespace(len(p.wsbuf))// make sure we have a line breakif needsLinebreak {p.writeByte('\n')wroteNewline = true}return}// intersperseComments consumes all comments that appear before the next token// tok and prints it together with the buffered whitespace (i.e., the whitespace// that needs to be written before the next token). A heuristic is used to mix// the comments and whitespace. The intersperseComments result indicates if a// newline was written or if a formfeed was dropped from the whitespace buffer.//func (p *printer) intersperseComments(next token.Position, tok token.Token) (wroteNewline, droppedFF bool) {var last *ast.Commentfor p.commentBefore(next) {for _, c := range p.comment.List {p.writeCommentPrefix(p.posFor(c.Pos()), next, last, c, tok.IsKeyword())p.writeComment(c)last = c}p.nextComment()}if last != nil {if last.Text[1] == '*' && p.lineFor(last.Pos()) == next.Line {// the last comment is a /*-style comment and the next item// follows on the same line: separate with an extra blankp.writeByte(' ')}// ensure that there is a line break after a //-style comment,// before a closing '}' unless explicitly disabled, or at eofneedsLinebreak :=last.Text[1] == '/' ||tok == token.RBRACE && p.mode&noExtraLinebreak == 0 ||tok == token.EOFreturn p.writeCommentSuffix(needsLinebreak)}// no comment was written - we should never reach here since// intersperseComments should not be called in that casep.internalError("intersperseComments called without pending comments")return}// whiteWhitespace writes the first n whitespace entries.func (p *printer) writeWhitespace(n int) {// write entriesfor i := 0; i < n; i++ {switch ch := p.wsbuf[i]; ch {case ignore:// ignore!case indent:p.indent++case unindent:p.indent--if p.indent < 0 {p.internalError("negative indentation:", p.indent)p.indent = 0}case newline, formfeed:// A line break immediately followed by a "correcting"// unindent is swapped with the unindent - this permits// proper label positioning. If a comment is between// the line break and the label, the unindent is not// part of the comment whitespace prefix and the comment// will be positioned correctly indented.if i+1 < n && p.wsbuf[i+1] == unindent {// Use a formfeed to terminate the current section.// Otherwise, a long label name on the next line leading// to a wide column may increase the indentation column// of lines before the label; effectively leading to wrong// indentation.p.wsbuf[i], p.wsbuf[i+1] = unindent, formfeedi-- // do it againcontinue}fallthroughdefault:p.writeByte(byte(ch))}}// shift remaining entries downi := 0for ; n < len(p.wsbuf); n++ {p.wsbuf[i] = p.wsbuf[n]i++}p.wsbuf = p.wsbuf[0:i]}// ----------------------------------------------------------------------------// Printing interface// nlines limits n to maxNewlines.func nlimit(n int) int {if n > maxNewlines {n = maxNewlines}return n}func mayCombine(prev token.Token, next byte) (b bool) {switch prev {case token.INT:b = next == '.' // 1.case token.ADD:b = next == '+' // ++case token.SUB:b = next == '-' // --case token.QUO:b = next == '*' // /*case token.LSS:b = next == '-' || next == '<' // <- or <<case token.AND:b = next == '&' || next == '^' // && or &^}return}// print prints a list of "items" (roughly corresponding to syntactic// tokens, but also including whitespace and formatting information).// It is the only print function that should be called directly from// any of the AST printing functions in nodes.go.//// Whitespace is accumulated until a non-whitespace token appears. Any// comments that need to appear before that token are printed first,// taking into account the amount and structure of any pending white-// space for best comment placement. Then, any leftover whitespace is// printed, followed by the actual token.//func (p *printer) print(args ...interface{}) {for _, arg := range args {// information about the current argvar data stringvar isLit boolvar impliedSemi bool // value for p.impliedSemi after this argswitch x := arg.(type) {case pmode:// toggle printer modep.mode ^= xcontinuecase whiteSpace:if x == ignore {// don't add ignore's to the buffer; they// may screw up "correcting" unindents (see// LabeledStmt)continue}i := len(p.wsbuf)if i == cap(p.wsbuf) {// Whitespace sequences are very short so this should// never happen. Handle gracefully (but possibly with// bad comment placement) if it does happen.p.writeWhitespace(i)i = 0}p.wsbuf = p.wsbuf[0 : i+1]p.wsbuf[i] = xif x == newline || x == formfeed {// newlines affect the current state (p.impliedSemi)// and not the state after printing arg (impliedSemi)// because comments can be interspersed before the arg// in this casep.impliedSemi = false}p.lastTok = token.ILLEGALcontinuecase *ast.Ident:data = x.NameimpliedSemi = truep.lastTok = token.IDENTcase *ast.BasicLit:data = x.ValueisLit = trueimpliedSemi = truep.lastTok = x.Kindcase token.Token:s := x.String()if mayCombine(p.lastTok, s[0]) {// the previous and the current token must be// separated by a blank otherwise they combine// into a different incorrect token sequence// (except for token.INT followed by a '.' this// should never happen because it is taken care// of via binary expression formatting)if len(p.wsbuf) != 0 {p.internalError("whitespace buffer not empty")}p.wsbuf = p.wsbuf[0:1]p.wsbuf[0] = ' '}data = s// some keywords followed by a newline imply a semicolonswitch x {case token.BREAK, token.CONTINUE, token.FALLTHROUGH, token.RETURN,token.INC, token.DEC, token.RPAREN, token.RBRACK, token.RBRACE:impliedSemi = true}p.lastTok = xcase token.Pos:if x.IsValid() {p.pos = p.posFor(x) // accurate position of next item}continuecase string:// incorrect AST - print error messagedata = xisLit = trueimpliedSemi = truep.lastTok = token.STRINGdefault:fmt.Fprintf(os.Stderr, "print: unsupported argument %v (%T)\n", arg, arg)panic("go/printer type")}// data != ""next := p.pos // estimated/accurate position of next itemwroteNewline, droppedFF := p.flush(next, p.lastTok)// intersperse extra newlines if present in the source and// if they don't cause extra semicolons (don't do this in// flush as it will cause extra newlines at the end of a file)if !p.impliedSemi {n := nlimit(next.Line - p.pos.Line)// don't exceed maxNewlines if we already wrote oneif wroteNewline && n == maxNewlines {n = maxNewlines - 1}if n > 0 {ch := byte('\n')if droppedFF {ch = '\f' // use formfeed since we dropped one before}p.writeByteN(ch, n)impliedSemi = false}}p.writeItem(next, data, isLit)p.impliedSemi = impliedSemi}}// commentBefore returns true iff the current comment group occurs// before the next position in the source code and printing it does// not introduce implicit semicolons.//func (p *printer) commentBefore(next token.Position) (result bool) {return p.commentOffset < next.Offset && (!p.impliedSemi || !p.commentNewline)}// flush prints any pending comments and whitespace occurring textually// before the position of the next token tok. The flush result indicates// if a newline was written or if a formfeed was dropped from the whitespace// buffer.//func (p *printer) flush(next token.Position, tok token.Token) (wroteNewline, droppedFF bool) {if p.commentBefore(next) {// if there are comments before the next item, intersperse themwroteNewline, droppedFF = p.intersperseComments(next, tok)} else {// otherwise, write any leftover whitespacep.writeWhitespace(len(p.wsbuf))}return}// getNode returns the ast.CommentGroup associated with n, if any.func getDoc(n ast.Node) *ast.CommentGroup {switch n := n.(type) {case *ast.Field:return n.Doccase *ast.ImportSpec:return n.Doccase *ast.ValueSpec:return n.Doccase *ast.TypeSpec:return n.Doccase *ast.GenDecl:return n.Doccase *ast.FuncDecl:return n.Doccase *ast.File:return n.Doc}return nil}func (p *printer) printNode(node interface{}) error {// unpack *CommentedNode, if anyvar comments []*ast.CommentGroupif cnode, ok := node.(*CommentedNode); ok {node = cnode.Nodecomments = cnode.Comments}if comments != nil {// commented node - restrict comment list to relevant rangen, ok := node.(ast.Node)if !ok {goto unsupported}beg := n.Pos()end := n.End()// if the node has associated documentation,// include that commentgroup in the range// (the comment list is sorted in the order// of the comment appearance in the source code)if doc := getDoc(n); doc != nil {beg = doc.Pos()}// token.Pos values are global offsets, we can// compare them directlyi := 0for i < len(comments) && comments[i].End() < beg {i++}j := ifor j < len(comments) && comments[j].Pos() < end {j++}if i < j {p.comments = comments[i:j]}} else if n, ok := node.(*ast.File); ok {// use ast.File comments, if anyp.comments = n.Comments}// if there are no comments, use node commentsp.useNodeComments = p.comments == nil// get comments ready for usep.nextComment()// format nodeswitch n := node.(type) {case ast.Expr:p.expr(n, ignoreMultiLine)case ast.Stmt:// A labeled statement will un-indent to position the// label. Set indent to 1 so we don't get indent "underflow".if _, labeledStmt := n.(*ast.LabeledStmt); labeledStmt {p.indent = 1}p.stmt(n, false, ignoreMultiLine)case ast.Decl:p.decl(n, ignoreMultiLine)case ast.Spec:p.spec(n, 1, false, ignoreMultiLine)case *ast.File:p.file(n)default:goto unsupported}return nilunsupported:return fmt.Errorf("go/printer: unsupported node type %T", node)}// ----------------------------------------------------------------------------// Trimmer// A trimmer is an io.Writer filter for stripping tabwriter.Escape// characters, trailing blanks and tabs, and for converting formfeed// and vtab characters into newlines and htabs (in case no tabwriter// is used). Text bracketed by tabwriter.Escape characters is passed// through unchanged.//type trimmer struct {output io.Writerstate intspace bytes.Buffer}// trimmer is implemented as a state machine.// It can be in one of the following states:const (inSpace = iota // inside spaceinEscape // inside text bracketed by tabwriter.EscapesinText // inside text)// Design note: It is tempting to eliminate extra blanks occurring in// whitespace in this function as it could simplify some// of the blanks logic in the node printing functions.// However, this would mess up any formatting done by// the tabwriter.var aNewline = []byte("\n")func (p *trimmer) Write(data []byte) (n int, err error) {// invariants:// p.state == inSpace:// p.space is unwritten// p.state == inEscape, inText:// data[m:n] is unwrittenm := 0var b bytefor n, b = range data {if b == '\v' {b = '\t' // convert to htab}switch p.state {case inSpace:switch b {case '\t', ' ':p.space.WriteByte(b) // WriteByte returns no errorscase '\n', '\f':p.space.Reset() // discard trailing space_, err = p.output.Write(aNewline)case tabwriter.Escape:_, err = p.output.Write(p.space.Bytes())p.state = inEscapem = n + 1 // +1: skip tabwriter.Escapedefault:_, err = p.output.Write(p.space.Bytes())p.state = inTextm = n}case inEscape:if b == tabwriter.Escape {_, err = p.output.Write(data[m:n])p.state = inSpacep.space.Reset()}case inText:switch b {case '\t', ' ':_, err = p.output.Write(data[m:n])p.state = inSpacep.space.Reset()p.space.WriteByte(b) // WriteByte returns no errorscase '\n', '\f':_, err = p.output.Write(data[m:n])p.state = inSpacep.space.Reset()_, err = p.output.Write(aNewline)case tabwriter.Escape:_, err = p.output.Write(data[m:n])p.state = inEscapem = n + 1 // +1: skip tabwriter.Escape}default:panic("unreachable")}if err != nil {return}}n = len(data)switch p.state {case inEscape, inText:_, err = p.output.Write(data[m:n])p.state = inSpacep.space.Reset()}return}// ----------------------------------------------------------------------------// Public interface// General printing is controlled with these Config.Mode flags.const (RawFormat uint = 1 << iota // do not use a tabwriter; if set, UseSpaces is ignoredTabIndent // use tabs for indentation independent of UseSpacesUseSpaces // use spaces instead of tabs for alignment)// A Config node controls the output of Fprint.type Config struct {Mode uint // default: 0Tabwidth int // default: 8}// fprint implements Fprint and takes a nodesSizes map for setting up the printer state.func (cfg *Config) fprint(output io.Writer, fset *token.FileSet, node interface{}, nodeSizes map[ast.Node]int) (err error) {// print nodevar p printerp.init(cfg, fset, nodeSizes)if err = p.printNode(node); err != nil {return}// print outstanding commentsp.impliedSemi = false // EOF acts like a newlinep.flush(token.Position{Offset: infinity, Line: infinity}, token.EOF)// redirect output through a trimmer to eliminate trailing whitespace// (Input to a tabwriter must be untrimmed since trailing tabs provide// formatting information. The tabwriter could provide trimming// functionality but no tabwriter is used when RawFormat is set.)output = &trimmer{output: output}// redirect output through a tabwriter if necessaryif cfg.Mode&RawFormat == 0 {minwidth := cfg.Tabwidthpadchar := byte('\t')if cfg.Mode&UseSpaces != 0 {padchar = ' '}twmode := tabwriter.DiscardEmptyColumnsif cfg.Mode&TabIndent != 0 {minwidth = 0twmode |= tabwriter.TabIndent}output = tabwriter.NewWriter(output, minwidth, cfg.Tabwidth, 1, padchar, twmode)}// write printer result via tabwriter/trimmer to outputif _, err = output.Write(p.output.Bytes()); err != nil {return}// flush tabwriter, if anyif tw, _ := (output).(*tabwriter.Writer); tw != nil {err = tw.Flush()}return}// A CommentedNode bundles an AST node and corresponding comments.// It may be provided as argument to any of the Fprint functions.//type CommentedNode struct {Node interface{} // *ast.File, or ast.Expr, ast.Decl, ast.Spec, or ast.StmtComments []*ast.CommentGroup}// Fprint "pretty-prints" an AST node to output for a given configuration cfg.// Position information is interpreted relative to the file set fset.// The node type must be *ast.File, *CommentedNode, or assignment-compatible// to ast.Expr, ast.Decl, ast.Spec, or ast.Stmt.//func (cfg *Config) Fprint(output io.Writer, fset *token.FileSet, node interface{}) error {return cfg.fprint(output, fset, node, make(map[ast.Node]int))}// Fprint "pretty-prints" an AST node to output.// It calls Config.Fprint with default settings.//func Fprint(output io.Writer, fset *token.FileSet, node interface{}) error {return (&Config{Tabwidth: 8}).Fprint(output, fset, node)}
