OpenCores
URL https://opencores.org/ocsvn/s80186/s80186/trunk

Subversion Repositories s80186

[/] [s80186/] [trunk/] [scripts/] [gcovr] - Blame information for rev 2

Details | Compare with Previous | View Log

''')
Line No. Rev Author Line
1 2 jamieiles
#!/usr/bin/env python
2
 
3
# Copyright Jamie Iles, 2017
4
#
5
# This file is part of s80x86.
6
#
7
# s80x86 is free software: you can redistribute it and/or modify
8
# it under the terms of the GNU General Public License as published by
9
# the Free Software Foundation, either version 3 of the License, or
10
# (at your option) any later version.
11
#
12
# s80x86 is distributed in the hope that it will be useful,
13
# but WITHOUT ANY WARRANTY; without even the implied warranty of
14
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
# GNU General Public License for more details.
16
#
17
# You should have received a copy of the GNU General Public License
18
# along with s80x86.  If not, see .
19
 
20
# -*- coding:utf-8 -*-
21
#
22
# A report generator for gcov 3.4
23
#
24
# This routine generates a format that is similar to the format generated
25
# by the Python coverage.py module.  This code is similar to the
26
# data processing performed by lcov's geninfo command.  However, we
27
# don't worry about parsing the *.gcna files, and backwards compatibility for
28
# older versions of gcov is not supported.
29
#
30
# Outstanding issues
31
#   - verify that gcov 3.4 or newer is being used
32
#   - verify support for symbolic links
33
#
34
# gcovr is a FAST project.  For documentation, bug reporting, and
35
# updates, see https://software.sandia.gov/trac/fast/wiki/gcovr
36
#
37
#  _________________________________________________________________________
38
#
39
#  Gcovr: A parsing and reporting tool for gcov
40
#  Copyright (c) 2013 Sandia Corporation.
41
#  This software is distributed under the BSD License.
42
#  Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
43
#  the U.S. Government retains certain rights in this software.
44
#  For more information, see the README.md file.
45
# _________________________________________________________________________
46
#
47
# $Revision$
48
# $Date$
49
#
50
 
51
try:
52
    import html
53
except:
54
    import cgi as html
55
import copy
56
import os
57
import re
58
import subprocess
59
import sys
60
import time
61
import xml.dom.minidom
62
import datetime
63
import posixpath
64
 
65
from optparse import OptionParser
66
from string import Template
67
from os.path import normpath
68
 
69
medium_coverage = 75.0
70
high_coverage = 90.0
71
low_color = "LightPink"
72
medium_color = "#FFFF55"
73
high_color = "LightGreen"
74
covered_color = "LightGreen"
75
uncovered_color = "LightPink"
76
 
77
__version__ = "3.3-prerelease"
78
src_revision = "$Revision$"
79
 
80
output_re = re.compile("[Cc]reating [`'](.*)'$")
81
source_re = re.compile("[Cc]annot open (source|graph) file")
82
 
83
starting_dir = os.getcwd()
84
 
85
exclude_line_flag = "_EXCL_"
86
exclude_line_pattern = re.compile('([GL]COVR?)_EXCL_(LINE|START|STOP)')
87
 
88
c_style_comment_pattern = re.compile('/\*.*?\*/')
89
cpp_style_comment_pattern = re.compile('//.*?$')
90
 
91
 
92
def version_str():
93
    ans = __version__
94
    m = re.match('\$Revision:\s*(\S+)\s*\$', src_revision)
95
    if m:
96
        ans = ans + " (r%s)" % (m.group(1))
97
    return ans
98
 
99
 
100
#
101
# Container object for coverage statistics
102
#
103
class CoverageData(object):
104
 
105
    def __init__(
106
            self, fname, uncovered, uncovered_exceptional, covered, branches,
107
            noncode):
108
        self.fname = fname
109
        # Shallow copies are cheap & "safe" because the caller will
110
        # throw away their copies of covered & uncovered after calling
111
        # us exactly *once*
112
        self.uncovered = copy.copy(uncovered)
113
        self.uncovered_exceptional = copy.copy(uncovered_exceptional)
114
        self.covered = copy.copy(covered)
115
        self.noncode = copy.copy(noncode)
116
        # But, a deep copy is required here
117
        self.all_lines = copy.deepcopy(uncovered)
118
        self.all_lines.update(uncovered_exceptional)
119
        self.all_lines.update(covered.keys())
120
        self.branches = copy.deepcopy(branches)
121
 
122
    def update(
123
            self, uncovered, uncovered_exceptional, covered, branches,
124
            noncode):
125
        self.all_lines.update(uncovered)
126
        self.all_lines.update(uncovered_exceptional)
127
        self.all_lines.update(covered.keys())
128
        self.uncovered.update(uncovered)
129
        self.uncovered_exceptional.update(uncovered_exceptional)
130
        self.noncode.intersection_update(noncode)
131
        for k in covered.keys():
132
            self.covered[k] = self.covered.get(k, 0) + covered[k]
133
        for k in branches.keys():
134
            for b in branches[k]:
135
                d = self.branches.setdefault(k, {})
136
                d[b] = d.get(b, 0) + branches[k][b]
137
        self.uncovered.difference_update(self.covered.keys())
138
        self.uncovered_exceptional.difference_update(self.covered.keys())
139
 
140
    def uncovered_str(self, exceptional):
141
        if options.show_branch:
142
            #
143
            # Don't do any aggregation on branch results
144
            #
145
            tmp = []
146
            for line in self.branches.keys():
147
                for branch in self.branches[line]:
148
                    if self.branches[line][branch] == 0:
149
                        tmp.append(line)
150
                        break
151
            tmp.sort()
152
            return ",".join([str(x) for x in tmp]) or ""
153
 
154
        if exceptional:
155
            tmp = list(self.uncovered_exceptional)
156
        else:
157
            tmp = list(self.uncovered)
158
        if len(tmp) == 0:
159
            return ""
160
 
161
        #
162
        # Walk through the uncovered lines in sorted order.
163
        # Find blocks of consecutive uncovered lines, and return
164
        # a string with that information.
165
        #
166
        tmp.sort()
167
        first = None
168
        last = None
169
        ranges = []
170
        for item in tmp:
171
            if last is None:
172
                first = item
173
                last = item
174
            elif item == (last + 1):
175
                last = item
176
            else:
177
                #
178
                # Should we include noncode lines in the range of lines
179
                # to be covered???  This simplifies the ranges summary, but it
180
                # provides a counterintuitive listing.
181
                #
182
                #if len(self.noncode.intersection(range(last+1,item))) \
183
                #       == item - last - 1:
184
                #    last = item
185
                #    continue
186
                #
187
                if first == last:
188
                    ranges.append(str(first))
189
                else:
190
                    ranges.append(str(first) + "-" + str(last))
191
                first = item
192
                last = item
193
        if first == last:
194
            ranges.append(str(first))
195
        else:
196
            ranges.append(str(first) + "-" + str(last))
197
        return ",".join(ranges)
198
 
199
    def coverage(self):
200
        if options.show_branch:
201
            total = 0
202
            cover = 0
203
            for line in self.branches.keys():
204
                for branch in self.branches[line].keys():
205
                    total += 1
206
                    cover += self.branches[line][branch] > 0 and 1 or 0
207
        else:
208
            total = len(self.all_lines)
209
            cover = len(self.covered)
210
 
211
        percent = total and str(int(100.0 * cover / total)) or "--"
212
        return (total, cover, percent)
213
 
214
    def summary(self):
215
        tmp = options.root_filter.sub('', self.fname)
216
        if not self.fname.endswith(tmp):
217
            # Do no truncation if the filter does not start matching at
218
            # the beginning of the string
219
            tmp = self.fname
220
        tmp = tmp.ljust(40)
221
        if len(tmp) > 40:
222
            tmp = tmp + "\n" + " " * 40
223
 
224
        (total, cover, percent) = self.coverage()
225
        uncovered_lines = self.uncovered_str(False)
226
        if not options.show_branch:
227
            t = self.uncovered_str(True)
228
            if len(t):
229
                uncovered_lines += " [* " + t + "]"
230
        return (total, cover,
231
                tmp + str(total).rjust(8) + str(cover).rjust(8) +
232
                percent.rjust(6) + "%   " + uncovered_lines)
233
 
234
 
235
def resolve_symlinks(orig_path):
236
    """
237
    Return the normalized absolute path name with all symbolic links resolved
238
    """
239
    return os.path.realpath(orig_path)
240
    # WEH - why doesn't os.path.realpath() suffice here?
241
    #
242
    drive, tmp = os.path.splitdrive(os.path.abspath(orig_path))
243
    if not drive:
244
        drive = os.path.sep
245
    parts = tmp.split(os.path.sep)
246
    actual_path = [drive]
247
    while parts:
248
        actual_path.append(parts.pop(0))
249
        if not os.path.islink(os.path.join(*actual_path)):
250
            continue
251
        actual_path[-1] = os.readlink(os.path.join(*actual_path))
252
        tmp_drive, tmp_path = os.path.splitdrive(
253
            resolve_symlinks(os.path.join(*actual_path)))
254
        if tmp_drive:
255
            drive = tmp_drive
256
        actual_path = [drive] + tmp_path.split(os.path.sep)
257
    return os.path.join(*actual_path)
258
 
259
 
260
#
261
# Class that creates path aliases
262
#
263
class PathAliaser(object):
264
 
265
    def __init__(self):
266
        self.aliases = {}
267
        self.master_targets = set()
268
        self.preferred_name = {}
269
 
270
    def path_startswith(self, path, base):
271
        return path.startswith(base) and (
272
            len(base) == len(path) or path[len(base)] == os.path.sep)
273
 
274
    def master_path(self, path):
275
        match_found = False
276
        while True:
277
            for base, alias in self.aliases.items():
278
                if self.path_startswith(path, base):
279
                    path = alias + path[len(base):]
280
                    match_found = True
281
                    break
282
            for master_base in self.master_targets:
283
                if self.path_startswith(path, master_base):
284
                    return path, master_base, True
285
            if match_found:
286
                sys.stderr.write(
287
                    "(ERROR) violating fundamental assumption while walking "
288
                    "directory tree.\n\tPlease report this to the gcovr "
289
                    "developers.\n")
290
            return path, None, match_found
291
 
292
    def unalias_path(self, path):
293
        path = resolve_symlinks(path)
294
        path, master_base, known_path = self.master_path(path)
295
        if not known_path:
296
            return path
297
        # Try and resolve the preferred name for this location
298
        if master_base in self.preferred_name:
299
            return self.preferred_name[master_base] + path[len(master_base):]
300
        return path
301
 
302
    def add_master_target(self, master):
303
        self.master_targets.add(master)
304
 
305
    def add_alias(self, target, master):
306
        self.aliases[target] = master
307
 
308
    def set_preferred(self, master, preferred):
309
        self.preferred_name[master] = preferred
310
 
311
aliases = PathAliaser()
312
 
313
 
314
# This is UGLY.  Here's why: UNIX resolves symbolic links by walking the
315
# entire directory structure.  What that means is that relative links
316
# are always relative to the actual directory inode, and not the
317
# "virtual" path that the user might have traversed (over symlinks) on
318
# the way to that directory.  Here's the canonical example:
319
#
320
#   a / b / c / testfile
321
#   a / d / e --> ../../a/b
322
#   m / n --> /a
323
#   x / y / z --> /m/n/d
324
#
325
# If we start in "y", we will see the following directory structure:
326
#   y
327
#   |-- z
328
#       |-- e
329
#           |-- c
330
#               |-- testfile
331
#
332
# The problem is that using a simple traversal based on the Python
333
# documentation:
334
#
335
#    (os.path.join(os.path.dirname(path), os.readlink(result)))
336
#
337
# will not work: we will see a link to /m/n/d from /x/y, but completely
338
# miss the fact that n is itself a link.  If we then naively attempt to
339
# apply the "c" relative link, we get an intermediate path that looks
340
# like "/m/n/d/e/../../a/b", which would get normalized to "/m/n/a/b"; a
341
# nonexistant path.  The solution is that we need to walk the original
342
# path, along with the full path of all links 1 directory at a time and
343
# check for embedded symlinks.
344
#
345
#
346
# NB:  Users have complained that this code causes a performance issue.
347
# I have replaced this logic with os.walk(), which works for Python >= 2.6
348
#
349
def link_walker(path):
350
    if sys.version_info >= (2, 6):
351
        for root, dirs, files in os.walk(
352
            os.path.abspath(path), followlinks=True
353
        ):
354
            yield (os.path.abspath(os.path.realpath(root)), dirs, files)
355
    else:
356
        targets = [os.path.abspath(path)]
357
        while targets:
358
            target_dir = targets.pop(0)
359
            actual_dir = resolve_symlinks(target_dir)
360
            #print "target dir: %s  (%s)" % (target_dir, actual_dir)
361
            master_name, master_base, visited = aliases.master_path(actual_dir)
362
            if visited:
363
                #print "  ...root already visited as %s" % master_name
364
                aliases.add_alias(target_dir, master_name)
365
                continue
366
            if master_name != target_dir:
367
                aliases.set_preferred(master_name, target_dir)
368
                aliases.add_alias(target_dir, master_name)
369
            aliases.add_master_target(master_name)
370
            #print "  ...master name = %s" % master_name
371
            #print "  ...walking %s" % target_dir
372
            for root, dirs, files in os.walk(target_dir, topdown=True):
373
                #print "    ...reading %s" % root
374
                for d in dirs:
375
                    tmp = os.path.abspath(os.path.join(root, d))
376
                    #print "    ...checking %s" % tmp
377
                    if os.path.islink(tmp):
378
                        #print "      ...buffering link %s" % tmp
379
                        targets.append(tmp)
380
                yield (root, dirs, files)
381
 
382
 
383
def search_file(expr, path):
384
    """
385
    Given a search path, recursively descend to find files that match a
386
    regular expression.
387
    """
388
    ans = []
389
    pattern = re.compile(expr)
390
    if path is None or path == ".":
391
        path = os.getcwd()
392
    elif not os.path.exists(path):
393
        raise IOError("Unknown directory '" + path + "'")
394
    for root, dirs, files in link_walker(path):
395
        for name in files:
396
            if pattern.match(name):
397
                name = os.path.join(root, name)
398
                if os.path.islink(name):
399
                    ans.append(os.path.abspath(os.readlink(name)))
400
                else:
401
                    ans.append(os.path.abspath(name))
402
    return ans
403
 
404
 
405
#
406
# Get the list of datafiles in the directories specified by the user
407
#
408
def get_datafiles(flist, options):
409
    allfiles = set()
410
    for dir_ in flist:
411
        if options.gcov_files:
412
            if options.verbose:
413
                sys.stdout.write(
414
                    "Scanning directory %s for gcov files...\n" % (dir_, )
415
                )
416
            files = search_file(".*\.gcov$", dir_)
417
            gcov_files = [file for file in files if file.endswith('gcov')]
418
            if options.verbose:
419
                sys.stdout.write(
420
                    "Found %d files (and will process %d)\n" %
421
                    (len(files), len(gcov_files))
422
                )
423
            allfiles.update(gcov_files)
424
        else:
425
            if options.verbose:
426
                sys.stdout.write(
427
                    "Scanning directory %s for gcda/gcno files...\n" % (dir_, )
428
                )
429
            files = search_file(".*\.gc(da|no)$", dir_)
430
            # gcno files will *only* produce uncovered results; however,
431
            # that is useful information for the case where a compilation
432
            # unit is never actually exercised by the test code.  So, we
433
            # will process gcno files, but ONLY if there is no corresponding
434
            # gcda file.
435
            gcda_files = [
436
                filenm for filenm in files if filenm.endswith('gcda')
437
            ]
438
            tmp = set(gcda_files)
439
            gcno_files = [
440
                filenm for filenm in files if
441
                filenm.endswith('gcno') and filenm[:-2] + 'da' not in tmp
442
            ]
443
            if options.verbose:
444
                sys.stdout.write(
445
                    "Found %d files (and will process %d)\n" %
446
                    (len(files), len(gcda_files) + len(gcno_files)))
447
            allfiles.update(gcda_files)
448
            allfiles.update(gcno_files)
449
    return allfiles
450
 
451
 
452
#
453
# Process a single gcov datafile
454
#
455
def process_gcov_data(data_fname, covdata, options):
456
    INPUT = open(data_fname, "r")
457
    #
458
    # Get the filename
459
    #
460
    line = INPUT.readline()
461
    segments = line.split(':', 3)
462
    if len(segments) != 4 or not \
463
            segments[2].lower().strip().endswith('source'):
464
        raise RuntimeError(
465
            'Fatal error parsing gcov file, line 1: \n\t"%s"' % line.rstrip()
466
        )
467
    currdir = os.getcwd()
468
    os.chdir(starting_dir)
469
    if sys.version_info >= (2, 6):
470
        fname = os.path.abspath((segments[-1]).strip())
471
    else:
472
        fname = aliases.unalias_path(os.path.abspath((segments[-1]).strip()))
473
    os.chdir(currdir)
474
    if options.verbose:
475
        sys.stdout.write("Parsing coverage data for file %s\n" % fname)
476
    #
477
    # Return if the filename does not match the filter
478
    #
479
    filtered_fname = None
480
    for i in range(0, len(options.filter)):
481
        if options.filter[i].match(fname):
482
            filtered_fname = options.root_filter.sub('', fname)
483
            break
484
    if filtered_fname is None:
485
        if options.verbose:
486
            sys.stdout.write("  Filtering coverage data for file %s\n" % fname)
487
        return
488
    #
489
    # Return if the filename matches the exclude pattern
490
    #
491
    for exc in options.exclude:
492
        if (filtered_fname is not None and exc.match(filtered_fname)) or \
493
                exc.match(fname) or \
494
                exc.match(os.path.abspath(fname)):
495
            if options.verbose:
496
                sys.stdout.write(
497
                    "  Excluding coverage data for file %s\n" % fname
498
                )
499
            return
500
    #
501
    # Parse each line, and record the lines
502
    # that are uncovered
503
    #
504
    excluding = []
505
    noncode = set()
506
    uncovered = set()
507
    uncovered_exceptional = set()
508
    covered = {}
509
    branches = {}
510
    #first_record=True
511
    lineno = 0
512
    last_code_line = ""
513
    last_code_lineno = 0
514
    last_code_line_excluded = False
515
    for line in INPUT:
516
        segments = line.split(":", 2)
517
        #print "\t","Y", segments
518
        tmp = segments[0].strip()
519
        if len(segments) > 1:
520
            try:
521
                lineno = int(segments[1].strip())
522
            except:
523
                pass  # keep previous line number!
524
 
525
        if exclude_line_flag in line:
526
            excl_line = False
527
            for header, flag in exclude_line_pattern.findall(line):
528
                if flag == 'START':
529
                    excluding.append((header, lineno))
530
                elif flag == 'STOP':
531
                    if excluding:
532
                        _header, _line = excluding.pop()
533
                        if _header != header:
534
                            sys.stderr.write(
535
                                "(WARNING) %s_EXCL_START found on line %s "
536
                                "was terminated by %s_EXCL_STOP on line %s, "
537
                                "when processing %s\n"
538
                                % (_header, _line, header, lineno, fname)
539
                            )
540
                    else:
541
                        sys.stderr.write(
542
                            "(WARNING) mismatched coverage exclusion flags.\n"
543
                            "\t%s_EXCL_STOP found on line %s without "
544
                            "corresponding %s_EXCL_START, when processing %s\n"
545
                            % (header, lineno, header, fname)
546
                        )
547
                elif flag == 'LINE':
548
                    # We buffer the line exclusion so that it is always
549
                    # the last thing added to the exclusion list (and so
550
                    # only ONE is ever added to the list).  This guards
551
                    # against cases where puts a _LINE and _START (or
552
                    # _STOP) on the same line... it also guards against
553
                    # duplicate _LINE flags.
554
                    excl_line = True
555
            if excl_line:
556
                excluding.append(False)
557
 
558
        is_code_statement = False
559
        if tmp[0] == '-' or (excluding and tmp[0] in "#=0123456789"):
560
            is_code_statement = True
561
            code = segments[2].strip()
562
            # remember certain non-executed lines
563
            if excluding or len(code) == 0 or code == "{" or code == "}" or \
564
                    code.startswith("//") or code == 'else':
565
                noncode.add(lineno)
566
        elif tmp[0] == '#':
567
            is_code_statement = True
568
            uncovered.add(lineno)
569
        elif tmp[0] == '=':
570
            is_code_statement = True
571
            uncovered_exceptional.add(lineno)
572
        elif tmp[0] in "0123456789":
573
            is_code_statement = True
574
            covered[lineno] = int(segments[0].strip())
575
        elif tmp.startswith('branch'):
576
            exclude_branch = False
577
            if options.exclude_unreachable_branches and \
578
                    lineno == last_code_lineno:
579
                if last_code_line_excluded:
580
                    exclude_branch = True
581
                    exclude_reason = "marked with exclude pattern"
582
                else:
583
                    code = last_code_line
584
                    code = re.sub(cpp_style_comment_pattern, '', code)
585
                    code = re.sub(c_style_comment_pattern, '', code)
586
                    code = code.strip()
587
                    code_nospace = code.replace(' ', '')
588
                    exclude_branch = \
589
                        code in ['', '{', '}'] or code_nospace == '{}'
590
                    exclude_reason = "detected as compiler-generated code"
591
 
592
            if exclude_branch:
593
                if options.verbose:
594
                    sys.stdout.write(
595
                        "Excluding unreachable branch on line %d "
596
                        "in file %s (%s).\n"
597
                        % (lineno, fname, exclude_reason)
598
                    )
599
            else:
600
                fields = line.split()
601
                try:
602
                    count = int(fields[3])
603
                except:
604
                    count = 0
605
                branches.setdefault(lineno, {})[int(fields[1])] = count
606
        elif tmp.startswith('call'):
607
            pass
608
        elif tmp.startswith('function'):
609
            pass
610
        elif tmp[0] == 'f':
611
            pass
612
            #if first_record:
613
                #first_record=False
614
                #uncovered.add(prev)
615
            #if prev in uncovered:
616
                #tokens=re.split('[ \t]+',tmp)
617
                #if tokens[3] != "0":
618
                    #uncovered.remove(prev)
619
            #prev = int(segments[1].strip())
620
            #first_record=True
621
        else:
622
            sys.stderr.write(
623
                "(WARNING) Unrecognized GCOV output: '%s'\n"
624
                "\tThis is indicitive of a gcov output parse error.\n"
625
                "\tPlease report this to the gcovr developers." % tmp
626
            )
627
 
628
        # save the code line to use it later with branches
629
        if is_code_statement:
630
            last_code_line = "".join(segments[2:])
631
            last_code_lineno = lineno
632
            last_code_line_excluded = False
633
            if excluding:
634
                last_code_line_excluded = True
635
 
636
        # clear the excluding flag for single-line excludes
637
        if excluding and not excluding[-1]:
638
            excluding.pop()
639
 
640
    ##print 'uncovered',uncovered
641
    ##print 'covered',covered
642
    ##print 'branches',branches
643
    ##print 'noncode',noncode
644
    #
645
    # If the file is already in covdata, then we
646
    # remove lines that are covered here.  Otherwise,
647
    # initialize covdata
648
    #
649
    if not fname in covdata:
650
        covdata[fname] = CoverageData(
651
            fname, uncovered, uncovered_exceptional, covered, branches, noncode
652
        )
653
    else:
654
        covdata[fname].update(
655
            uncovered, uncovered_exceptional, covered, branches, noncode
656
        )
657
    INPUT.close()
658
 
659
    for header, line in excluding:
660
        sys.stderr.write("(WARNING) The coverage exclusion region start flag "
661
                         "%s_EXCL_START\n\ton line %d did not have "
662
                         "corresponding %s_EXCL_STOP flag\n\t in file %s.\n"
663
                         % (header, line, header, fname))
664
 
665
 
666
#
667
# Process a datafile (generated by running the instrumented application)
668
# and run gcov with the corresponding arguments
669
#
670
# This is trickier than it sounds: The gcda/gcno files are stored in the
671
# same directory as the object files; however, gcov must be run from the
672
# same directory where gcc/g++ was run.  Normally, the user would know
673
# where gcc/g++ was invoked from and could tell gcov the path to the
674
# object (and gcda) files with the --object-directory command.
675
# Unfortunately, we do everything backwards: gcovr looks for the gcda
676
# files and then has to infer the original gcc working directory.
677
#
678
# In general, (but not always) we can assume that the gcda file is in a
679
# subdirectory of the original gcc working directory, so we will first
680
# try ".", and on error, move up the directory tree looking for the
681
# correct working directory (letting gcov's own error codes dictate when
682
# we hit the right directory).  This covers 90+% of the "normal" cases.
683
# The exception to this is if gcc was invoked with "-o ../[...]" (i.e.,
684
# the object directory was a peer (not a parent/child) of the cwd.  In
685
# this case, things are really tough.  We accept an argument
686
# (--object-directory) that SHOULD BE THE SAME as the one povided to
687
# gcc.  We will then walk that path (backwards) in the hopes of
688
# identifying the original gcc working directory (there is a bit of
689
# trial-and-error here)
690
#
691
def process_datafile(filename, covdata, options):
692
    #print ""
693
    #print "PROCESS",filename
694
    #
695
    # Launch gcov
696
    #
697
    abs_filename = os.path.abspath(filename)
698
    dirname, fname = os.path.split(abs_filename)
699
    #(name,ext) = os.path.splitext(base)
700
 
701
    potential_wd = []
702
    errors = []
703
    Done = False
704
 
705
    if options.objdir:
706
        #print "X - objdir"
707
        src_components = abs_filename.split(os.sep)
708
        components = normpath(options.objdir).split(os.sep)
709
        idx = 1
710
        while idx <= len(components):
711
            if idx > len(src_components):
712
                break
713
            if components[-1 * idx] != src_components[-1 * idx]:
714
                break
715
            idx += 1
716
        if idx > len(components):
717
            pass  # a parent dir; the normal process will find it
718
        elif components[-1 * idx] == '..':
719
            # NB: os.path.join does not re-add leading '/' characters!?!
720
            dirs = [
721
                os.path.sep.join(src_components[:len(src_components) - idx])
722
            ]
723
            while idx <= len(components) and components[-1 * idx] == '..':
724
                tmp = []
725
                for d in dirs:
726
                    for f in os.listdir(d):
727
                        x = os.path.join(d, f)
728
                        if os.path.isdir(x):
729
                            tmp.append(x)
730
                dirs = tmp
731
                idx += 1
732
            potential_wd = dirs
733
        else:
734
            if components[0] == '':
735
                # absolute path
736
                tmp = [options.objdir]
737
            else:
738
                # relative path: check relative to both the cwd and the
739
                # gcda file
740
                tmp = [
741
                    os.path.join(x, options.objdir) for x in
742
                    [os.path.dirname(abs_filename), os.getcwd()]
743
                ]
744
            potential_wd = [
745
                testdir for testdir in tmp if os.path.isdir(testdir)
746
            ]
747
            if len(potential_wd) == 0:
748
                errors.append("ERROR: cannot identify the location where GCC "
749
                              "was run using --object-directory=%s\n" %
750
                              options.objdir)
751
            # Revert to the normal
752
            #sys.exit(1)
753
 
754
    # no objdir was specified (or it was a parent dir); walk up the dir tree
755
    if len(potential_wd) == 0:
756
        potential_wd.append(root_dir)
757
        #print "X - potential_wd", root_dir
758
        wd = os.path.split(abs_filename)[0]
759
        while True:
760
            potential_wd.append(wd)
761
            wd = os.path.split(wd)[0]
762
            if wd == potential_wd[-1]:
763
                #
764
                # Stop at the root of the file system
765
                #
766
                break
767
 
768
    cmd = [
769
        options.gcov_cmd, abs_filename,
770
        "--branch-counts", "--branch-probabilities", "--preserve-paths",
771
        '--object-directory', dirname
772
    ]
773
 
774
    # NB: Currently, we will only parse English output
775
    env = dict(os.environ)
776
    env['LC_ALL'] = 'en_US'
777
 
778
    #print "HERE", potential_wd
779
    while len(potential_wd) > 0 and not Done:
780
        # NB: either len(potential_wd) == 1, or all entires are absolute
781
        # paths, so we don't have to chdir(starting_dir) at every
782
        # iteration.
783
 
784
        #
785
        # Iterate from the end of the potential_wd list, which is the root
786
        # directory
787
        #
788
        dir_ = potential_wd.pop(0)
789
        #print "X DIR:", dir_
790
        os.chdir(dir_)
791
 
792
        if options.verbose:
793
            sys.stdout.write(
794
                "Running gcov: '%s' in '%s'\n" % (' '.join(cmd), os.getcwd())
795
            )
796
        out, err = subprocess.Popen(
797
            cmd, env=env,
798
            stdout=subprocess.PIPE,
799
            stderr=subprocess.PIPE).communicate()
800
        out = out.decode('utf-8')
801
        err = err.decode('utf-8')
802
 
803
        # find the files that gcov created
804
        gcov_files = {'active' : [], 'filter' : [], 'exclude' : []}
805
        for line in out.splitlines():
806
            found = output_re.search(line.strip())
807
            if found is not None:
808
                fname = found.group(1)
809
                if not options.gcov_filter.match(fname):
810
                    if options.verbose:
811
                        sys.stdout.write("Filtering gcov file %s\n" % fname)
812
                    gcov_files['filter'].append(fname)
813
                    continue
814
                exclude = False
815
                for exc in options.gcov_exclude:
816
                    if exc.match(options.gcov_filter.sub('', fname)) or \
817
                            exc.match(fname) or \
818
                            exc.match(os.path.abspath(fname)):
819
                        exclude = True
820
                        break
821
                if not exclude:
822
                    gcov_files['active'].append(fname)
823
                elif options.verbose:
824
                    sys.stdout.write("Excluding gcov file %s\n" % fname)
825
                    gcov_files['exclude'].append(fname)
826
 
827
        #print "HERE", err, "XXX", source_re.search(err)
828
        if source_re.search(err):
829
            #
830
            # gcov tossed errors: try the next potential_wd
831
            #
832
            errors.append(err)
833
        else:
834
            #
835
            # Process *.gcov files
836
            #
837
            for fname in gcov_files['active']:
838
                process_gcov_data(fname, covdata, options)
839
            Done = True
840
 
841
        if not options.keep:
842
            for group in gcov_files.values():
843
                for fname in group:
844
                    if os.path.exists(fname):
845
                        # Only remove files that actually exist.
846
                        os.remove(fname)
847
 
848
    os.chdir(starting_dir)
849
    if options.delete:
850
        if not abs_filename.endswith('gcno'):
851
            os.remove(abs_filename)
852
 
853
    if not Done:
854
        sys.stderr.write(
855
            "(WARNING) GCOV produced the following errors processing %s:\n"
856
            "\t   %s"
857
            "\t(gcovr could not infer a working directory that resolved it.)\n"
858
            % (filename, "\t   ".join(errors))
859
        )
860
 
861
 
862
#
863
#  Process Already existing gcov files
864
#
865
def process_existing_gcov_file(filename, covdata, options):
866
    if not options.gcov_filter.match(filename):
867
        if options.verbose:
868
            sys.stdout.write("Filtering gcov file %s\n" % filename)
869
        return
870
 
871
    for exc in options.gcov_exclude:
872
        if exc.match(options.gcov_filter.sub('', filename)) or \
873
                exc.match(filename) or \
874
                exc.match(os.path.abspath(filename)):
875
            if options.verbose:
876
                sys.stdout.write("Excluding gcov file %s\n" % filename)
877
            return
878
 
879
    process_gcov_data(filename, covdata, options)
880
 
881
    if not options.keep:
882
        if os.path.exists(filename):
883
            # Only remove files that actually exist.
884
            os.remove(filename)
885
 
886
 
887
#
888
# Produce the classic gcovr text report
889
#
890
def print_text_report(covdata):
891
    def _num_uncovered(key):
892
        (total, covered, percent) = covdata[key].coverage()
893
        return total - covered
894
 
895
    def _percent_uncovered(key):
896
        (total, covered, percent) = covdata[key].coverage()
897
        if covered:
898
            return -1.0 * covered / total
899
        else:
900
            return total or 1e6
901
 
902
    def _alpha(key):
903
        return key
904
 
905
    if options.output:
906
        OUTPUT = open(options.output, 'w')
907
    else:
908
        OUTPUT = sys.stdout
909
    total_lines = 0
910
    total_covered = 0
911
 
912
    # Header
913
    OUTPUT.write("-" * 78 + '\n')
914
    OUTPUT.write(" " * 27 + "GCC Code Coverage Report\n")
915
    OUTPUT.write("Directory: " + options.root + "\n")
916
    OUTPUT.write("-" * 78 + '\n')
917
    a = options.show_branch and "Branches" or "Lines"
918
    b = options.show_branch and "Taken" or "Exec"
919
    c = "Missing"
920
    OUTPUT.write(
921
        "File".ljust(40) + a.rjust(8) + b.rjust(8) + "  Cover   " + c + "\n"
922
    )
923
    OUTPUT.write("-" * 78 + '\n')
924
 
925
    # Data
926
    keys = list(covdata.keys())
927
    keys.sort(
928
        key=options.sort_uncovered and _num_uncovered or
929
        options.sort_percent and _percent_uncovered or _alpha
930
    )
931
    for key in keys:
932
        (t, n, txt) = covdata[key].summary()
933
        total_lines += t
934
        total_covered += n
935
        OUTPUT.write(txt + '\n')
936
 
937
    # Footer & summary
938
    OUTPUT.write("-" * 78 + '\n')
939
    percent = total_lines and str(int(100.0 * total_covered / total_lines)) \
940
        or "--"
941
    OUTPUT.write(
942
        "TOTAL".ljust(40) + str(total_lines).rjust(8) +
943
        str(total_covered).rjust(8) + str(percent).rjust(6) + "%" + '\n'
944
    )
945
    OUTPUT.write("-" * 78 + '\n')
946
 
947
    # Close logfile
948
    if options.output:
949
        OUTPUT.close()
950
 
951
 
952
#
953
# Prints a small report to the standard output
954
#
955
def print_summary(covdata):
956
    lines_total = 0
957
    lines_covered = 0
958
    branches_total = 0
959
    branches_covered = 0
960
 
961
    keys = list(covdata.keys())
962
 
963
    for key in keys:
964
        options.show_branch = False
965
        (t, n, txt) = covdata[key].coverage()
966
        lines_total += t
967
        lines_covered += n
968
 
969
        options.show_branch = True
970
        (t, n, txt) = covdata[key].coverage()
971
        branches_total += t
972
        branches_covered += n
973
 
974
    percent = lines_total and (100.0 * lines_covered / lines_total)
975
    percent_branches = branches_total and \
976
        (100.0 * branches_covered / branches_total)
977
 
978
    lines_out = "lines: %0.1f%% (%s out of %s)\n" % (
979
        percent, lines_covered, lines_total
980
    )
981
    branches_out = "branches: %0.1f%% (%s out of %s)\n" % (
982
        percent_branches, branches_covered, branches_total
983
    )
984
 
985
    sys.stdout.write(lines_out)
986
    sys.stdout.write(branches_out)
987
 
988
#
989
# CSS declarations for the HTML output
990
#
991
css = Template('''
992
    body
993
    {
994
      color: #000000;
995
      background-color: #FFFFFF;
996
    }
997
 
998
    /* Link formats: use maroon w/underlines */
999
    a:link
1000
    {
1001
      color: navy;
1002
      text-decoration: underline;
1003
    }
1004
    a:visited
1005
    {
1006
      color: maroon;
1007
      text-decoration: underline;
1008
    }
1009
    a:active
1010
    {
1011
      color: navy;
1012
      text-decoration: underline;
1013
    }
1014
 
1015
    /*** TD formats ***/
1016
    td
1017
    {
1018
      font-family: sans-serif;
1019
    }
1020
    td.title
1021
    {
1022
      text-align: center;
1023
      padding-bottom: 10px;
1024
      font-size: 20pt;
1025
      font-weight: bold;
1026
    }
1027
 
1028
    /* TD Header Information */
1029
    td.headerName
1030
    {
1031
      text-align: right;
1032
      color: black;
1033
      padding-right: 6px;
1034
      font-weight: bold;
1035
      vertical-align: top;
1036
      white-space: nowrap;
1037
    }
1038
    td.headerValue
1039
    {
1040
      text-align: left;
1041
      color: blue;
1042
      font-weight: bold;
1043
      white-space: nowrap;
1044
    }
1045
    td.headerTableEntry
1046
    {
1047
      text-align: right;
1048
      color: black;
1049
      font-weight: bold;
1050
      white-space: nowrap;
1051
      padding-left: 12px;
1052
      padding-right: 4px;
1053
      background-color: LightBlue;
1054
    }
1055
    td.headerValueLeg
1056
    {
1057
      text-align: left;
1058
      color: black;
1059
      font-size: 80%;
1060
      white-space: nowrap;
1061
      padding-left: 10px;
1062
      padding-right: 10px;
1063
      padding-top: 2px;
1064
    }
1065
 
1066
    /* Color of horizontal ruler */
1067
    td.hr
1068
    {
1069
      background-color: navy;
1070
      height:3px;
1071
    }
1072
    /* Footer format */
1073
    td.footer
1074
    {
1075
      text-align: center;
1076
      padding-top: 3px;
1077
      font-family: sans-serif;
1078
    }
1079
 
1080
    /* Coverage Table */
1081
 
1082
    td.coverTableHead
1083
    {
1084
      text-align: center;
1085
      color: white;
1086
      background-color: SteelBlue;
1087
      font-family: sans-serif;
1088
      font-size: 120%;
1089
      white-space: nowrap;
1090
      padding-left: 4px;
1091
      padding-right: 4px;
1092
    }
1093
    td.coverFile
1094
    {
1095
      text-align: left;
1096
      padding-left: 10px;
1097
      padding-right: 20px;
1098
      color: black;
1099
      background-color: LightBlue;
1100
      font-family: monospace;
1101
      font-weight: bold;
1102
      font-size: 110%;
1103
    }
1104
    td.coverBar
1105
    {
1106
      padding-left: 10px;
1107
      padding-right: 10px;
1108
      background-color: LightBlue;
1109
    }
1110
    td.coverBarOutline
1111
    {
1112
      background-color: white;
1113
    }
1114
    td.coverValue
1115
    {
1116
      padding-top: 2px;
1117
      text-align: right;
1118
      padding-left: 10px;
1119
      padding-right: 10px;
1120
      font-family: sans-serif;
1121
      white-space: nowrap;
1122
      font-weight: bold;
1123
    }
1124
 
1125
    /* Link Details */
1126
    a.detail:link
1127
    {
1128
      color: #B8D0FF;
1129
      font-size:80%;
1130
    }
1131
    a.detail:visited
1132
    {
1133
      color: #B8D0FF;
1134
      font-size:80%;
1135
    }
1136
    a.detail:active
1137
    {
1138
      color: #FFFFFF;
1139
      font-size:80%;
1140
    }
1141
 
1142
    .graphcont{
1143
        color:#000;
1144
        font-weight:700;
1145
        float:left
1146
    }
1147
 
1148
    .graph{
1149
        float:left;
1150
        background-color: white;
1151
        position:relative;
1152
        width:280px;
1153
        padding:0
1154
    }
1155
 
1156
    .graph .bar{
1157
        display:block;
1158
        position:relative;
1159
        border:black 1px solid;
1160
        text-align:center;
1161
        color:#fff;
1162
        height:10px;
1163
        font-family:Arial,Helvetica,sans-serif;
1164
        font-size:12px;
1165
        line-height:1.9em
1166
    }
1167
 
1168
    .graph .bar span{
1169
        position:absolute;
1170
        left:1em
1171
    }
1172
 
1173
    td.coveredLine,
1174
    span.coveredLine
1175
    {
1176
        background-color: ${covered_color}!important;
1177
    }
1178
 
1179
    td.uncoveredLine,
1180
    span.uncoveredLine
1181
    {
1182
        background-color: ${uncovered_color}!important;
1183
    }
1184
 
1185
    .linecount
1186
    {
1187
        border-right: 1px gray solid;
1188
        background-color: lightgray;
1189
    }
1190
 
1191
    .src
1192
    {
1193
        padding-left: 12px;
1194
    }
1195
 
1196
    .srcHeader
1197
    {
1198
        font-family: monospace;
1199
        font-weight: bold;
1200
    }
1201
 
1202
    pre
1203
    {
1204
        height : 15px;
1205
        margin-top: 0;
1206
        margin-bottom: 0;
1207
    }
1208
 
1209
    .lineno
1210
    {
1211
        background-color: #EFE383;
1212
        border-right: 1px solid #BBB15F;
1213
    }
1214
''')
1215
 
1216
#
1217
# A string template for the root HTML output
1218
#
1219
root_page = Template('''
1220
1221
 
1222
1223
  
1224
  ${HEAD}
1225
  
1228
1229
 
1230
1231
 
1232
  
1233
    
GCC Code Coverage Report
1234
    
1235
 
1236
    
1237
      
1238
        
1239
          
1240
            
Directory:
1241
            
${DIRECTORY}
1242
            
1243
            
1244
            
Exec
1245
            
Total
1246
            
Coverage
1247
          
1248
          
1249
            
Date:
1250
            
${DATE}
1251
            
1252
            
Lines:
1253
            
${LINES_EXEC}
1254
            
${LINES_TOTAL}
1255
            
${LINES_COVERAGE} %
1256
          
1257
          
1258
            
Legend:
1259
            
1260
              low: < ${COVERAGE_MED} %
1261
              medium: >= ${COVERAGE_MED} %
1262
              high: >= ${COVERAGE_HIGH} %
1263
            
1264
            
1265
            
Branches:
1266
            
${BRANCHES_EXEC}
1267
            
${BRANCHES_TOTAL}
1268
            
${BRANCHES_COVERAGE} %
1269
          
1270
        
1271
      
1272
    
1273
 
1274
    
1275
  
1276
 
1277
  
1278
  
1279
    
1280
      

1281
      
1282
      
1283
      
1284
      
1285
      
1286
    
1287
    
1288
      
File
1289
      
Lines
1290
      
Branches
1291
    
1292
 
1293
    ${ROWS}
1294
 
1295
    
1296
      

1297
      
1298
      
1299
      
1300
      
1301
      
1302
    
1303
  
1304
  
1305
 
1306
  
1307
    
1308
    
1309
  
1310
  
1311
 
1312
1313
 
1314
1315
''')
1316
 
1317
#
1318
# A string template for the source file HTML output
1319
#
1320
source_page = Template('''
1321
1322
 
1323
1324
  
1325
  ${HEAD}
1326
  
1329
1330
 
1331
1332
 
1333
  
1334
    
GCC Code Coverage Report
1335
    
1336
 
1337
    
1338
      
1339
        
1340
          
1341
            
Directory:
1342
            
${DIRECTORY}
1343
            
1344
            
1345
            
Exec
1346
            
Total
1347
            
Coverage
1348
          
1349
          
1350
            
File:
1351
            
${FILENAME}
1352
            
1353
            
Lines:
1354
            
${LINES_EXEC}
1355
            
${LINES_TOTAL}
1356
            
${LINES_COVERAGE} %
1357
          
1358
          
1359
            
Date:
1360
            
${DATE}
1361
            
1362
            
Branches:
1363
            
${BRANCHES_EXEC}
1364
            
${BRANCHES_TOTAL}
1365
            
${BRANCHES_COVERAGE} %
1366
          
1367
        
1368
      
1369
    
1370
 
1371
    
1372
  
1373
 
1374
  
1375
  
1376
    
1377
      
Line
1378
      
Exec
1379
      
Source
1380
    
1381
 
1382
    ${ROWS}
1383
 
1384
  
1385
  
1386
 
1387
  
1388
    
1389
    
1390
  
1391
  
1392
 
1393
1394
 
1395
1396
''')
1397
 
1398
 
1399
#
1400
# Produce an HTML report
1401
#
1402
def print_html_report(covdata, details):
1403
    def _num_uncovered(key):
1404
        (total, covered, percent) = covdata[key].coverage()
1405
        return total - covered
1406
 
1407
    def _percent_uncovered(key):
1408
        (total, covered, percent) = covdata[key].coverage()
1409
        if covered:
1410
            return -1.0 * covered / total
1411
        else:
1412
            return total or 1e6
1413
 
1414
    def _alpha(key):
1415
        return key
1416
 
1417
    if options.output is None:
1418
        details = False
1419
    data = {}
1420
    data['HEAD'] = "Head"
1421
    data['VERSION'] = version_str()
1422
    data['TIME'] = str(int(time.time()))
1423
    data['DATE'] = datetime.date.today().isoformat()
1424
    data['ROWS'] = []
1425
    data['low_color'] = low_color
1426
    data['medium_color'] = medium_color
1427
    data['high_color'] = high_color
1428
    data['COVERAGE_MED'] = medium_coverage
1429
    data['COVERAGE_HIGH'] = high_coverage
1430
    data['CSS'] = css.substitute(
1431
        low_color=low_color, medium_color=medium_color, high_color=high_color,
1432
        covered_color=covered_color, uncovered_color=uncovered_color
1433
    )
1434
    data['DIRECTORY'] = ''
1435
 
1436
    branchTotal = 0
1437
    branchCovered = 0
1438
    options.show_branch = True
1439
    for key in covdata.keys():
1440
        (total, covered, percent) = covdata[key].coverage()
1441
        branchTotal += total
1442
        branchCovered += covered
1443
    data['BRANCHES_EXEC'] = str(branchCovered)
1444
    data['BRANCHES_TOTAL'] = str(branchTotal)
1445
    coverage = 0.0 if branchTotal == 0 else \
1446
        round(100.0 * branchCovered / branchTotal, 1)
1447
    data['BRANCHES_COVERAGE'] = str(coverage)
1448
    if coverage < medium_coverage:
1449
        data['BRANCHES_COLOR'] = low_color
1450
    elif coverage < high_coverage:
1451
        data['BRANCHES_COLOR'] = medium_color
1452
    else:
1453
        data['BRANCHES_COLOR'] = high_color
1454
 
1455
    lineTotal = 0
1456
    lineCovered = 0
1457
    options.show_branch = False
1458
    for key in covdata.keys():
1459
        (total, covered, percent) = covdata[key].coverage()
1460
        lineTotal += total
1461
        lineCovered += covered
1462
    data['LINES_EXEC'] = str(lineCovered)
1463
    data['LINES_TOTAL'] = str(lineTotal)
1464
    coverage = 0.0 if lineTotal == 0 else \
1465
        round(100.0 * lineCovered / lineTotal, 1)
1466
    data['LINES_COVERAGE'] = str(coverage)
1467
    if coverage < medium_coverage:
1468
        data['LINES_COLOR'] = low_color
1469
    elif coverage < high_coverage:
1470
        data['LINES_COLOR'] = medium_color
1471
    else:
1472
        data['LINES_COLOR'] = high_color
1473
 
1474
    # Generate the coverage output (on a per-package basis)
1475
    #source_dirs = set()
1476
    files = []
1477
    filtered_fname = ''
1478
    keys = list(covdata.keys())
1479
    keys.sort(
1480
        key=options.sort_uncovered and _num_uncovered or
1481
        options.sort_percent and _percent_uncovered or _alpha
1482
    )
1483
    for f in keys:
1484
        cdata = covdata[f]
1485
        filtered_fname = options.root_filter.sub('', f)
1486
        files.append(filtered_fname)
1487
        cdata._filename = filtered_fname
1488
        ttmp = os.path.abspath(options.output).split('.')
1489
        if len(ttmp) > 1:
1490
            cdata._sourcefile = \
1491
                '.'.join(ttmp[:-1]) + \
1492
                '.' + cdata._filename.replace('/', '_') + \
1493
                '.' + ttmp[-1]
1494
        else:
1495
            cdata._sourcefile = \
1496
                ttmp[0] + '.' + cdata._filename.replace('/', '_') + '.html'
1497
    # Define the common root directory, which may differ from options.root
1498
    # when source files share a common prefix.
1499
    if len(files) > 1:
1500
        commondir = posixpath.commonprefix(files)
1501
        if commondir != '':
1502
            data['DIRECTORY'] = commondir
1503
    else:
1504
        dir_, file_ = os.path.split(filtered_fname)
1505
        if dir_ != '':
1506
            data['DIRECTORY'] = dir_ + os.sep
1507
 
1508
    for f in keys:
1509
        cdata = covdata[f]
1510
        class_lines = 0
1511
        class_hits = 0
1512
        class_branches = 0
1513
        class_branch_hits = 0
1514
        for line in cdata.all_lines:
1515
            hits = cdata.covered.get(line, 0)
1516
            class_lines += 1
1517
            if hits > 0:
1518
                class_hits += 1
1519
            branches = cdata.branches.get(line)
1520
            if branches is None:
1521
                pass
1522
            else:
1523
                b_hits = 0
1524
                for v in branches.values():
1525
                    if v > 0:
1526
                        b_hits += 1
1527
                coverage = 100 * b_hits / len(branches)
1528
                class_branch_hits += b_hits
1529
                class_branches += len(branches)
1530
 
1531
        lines_covered = 100.0 if class_lines == 0 else \
1532
            100.0 * class_hits / class_lines
1533
        branches_covered = 100.0 if class_branches == 0 else \
1534
            100.0 * class_branch_hits / class_branches
1535
 
1536
        data['ROWS'].append(html_row(
1537
            details, cdata._sourcefile,
1538
            directory=data['DIRECTORY'],
1539
            filename=cdata._filename,
1540
            LinesExec=class_hits,
1541
            LinesTotal=class_lines,
1542
            LinesCoverage=lines_covered,
1543
            BranchesExec=class_branch_hits,
1544
            BranchesTotal=class_branches,
1545
            BranchesCoverage=branches_covered
1546
        ))
1547
    data['ROWS'] = '\n'.join(data['ROWS'])
1548
 
1549
    if data['DIRECTORY'] == '':
1550
        data['DIRECTORY'] = "."
1551
 
1552
    htmlString = root_page.substitute(**data)
1553
 
1554
    if options.output is None:
1555
        sys.stdout.write(htmlString + '\n')
1556
    else:
1557
        OUTPUT = open(options.output, 'w')
1558
        OUTPUT.write(htmlString + '\n')
1559
        OUTPUT.close()
1560
 
1561
    # Return, if no details are requested
1562
    if not details:
1563
        return
1564
 
1565
    #
1566
    # Generate an HTML file for every source file
1567
    #
1568
    for f in keys:
1569
        cdata = covdata[f]
1570
 
1571
        data['FILENAME'] = cdata._filename
1572
        data['ROWS'] = ''
1573
 
1574
        options.show_branch = True
1575
        branchTotal, branchCovered, tmp = cdata.coverage()
1576
        data['BRANCHES_EXEC'] = str(branchCovered)
1577
        data['BRANCHES_TOTAL'] = str(branchTotal)
1578
        coverage = 0.0 if branchTotal == 0 else \
1579
            round(100.0 * branchCovered / branchTotal, 1)
1580
        data['BRANCHES_COVERAGE'] = str(coverage)
1581
        if coverage < medium_coverage:
1582
            data['BRANCHES_COLOR'] = low_color
1583
        elif coverage < high_coverage:
1584
            data['BRANCHES_COLOR'] = medium_color
1585
        else:
1586
            data['BRANCHES_COLOR'] = high_color
1587
 
1588
        options.show_branch = False
1589
        lineTotal, lineCovered, tmp = cdata.coverage()
1590
        data['LINES_EXEC'] = str(lineCovered)
1591
        data['LINES_TOTAL'] = str(lineTotal)
1592
        coverage = 0.0 if lineTotal == 0 else \
1593
            round(100.0 * lineCovered / lineTotal, 1)
1594
        data['LINES_COVERAGE'] = str(coverage)
1595
        if coverage < medium_coverage:
1596
            data['LINES_COLOR'] = low_color
1597
        elif coverage < high_coverage:
1598
            data['LINES_COLOR'] = medium_color
1599
        else:
1600
            data['LINES_COLOR'] = high_color
1601
 
1602
        data['ROWS'] = []
1603
        currdir = os.getcwd()
1604
        os.chdir(root_dir)
1605
        INPUT = open(data['FILENAME'], 'r')
1606
        ctr = 1
1607
        for line in INPUT:
1608
            data['ROWS'].append(
1609
                source_row(ctr, line.rstrip(), cdata)
1610
            )
1611
            ctr += 1
1612
        INPUT.close()
1613
        os.chdir(currdir)
1614
        data['ROWS'] = '\n'.join(data['ROWS'])
1615
 
1616
        htmlString = source_page.substitute(**data)
1617
        OUTPUT = open(cdata._sourcefile, 'w')
1618
        OUTPUT.write(htmlString + '\n')
1619
        OUTPUT.close()
1620
 
1621
 
1622
def source_row(lineno, source, cdata):
1623
    rowstr = Template('''
1624
    
1625
    
${lineno}
1626
    
${linecount}
1627
    
${source}
1628
    
1629
    kwargs = {}
1630
    kwargs['lineno'] = str(lineno)
1631
    if lineno in cdata.covered:
1632
        kwargs['covclass'] = 'coveredLine'
1633
        kwargs['linecount'] = str(cdata.covered.get(lineno, 0))
1634
    elif lineno in cdata.uncovered:
1635
        kwargs['covclass'] = 'uncoveredLine'
1636
        kwargs['linecount'] = ''
1637
    else:
1638
        kwargs['covclass'] = ''
1639
        kwargs['linecount'] = ''
1640
    kwargs['source'] = html.escape(source)
1641
    return rowstr.substitute(**kwargs)
1642
 
1643
#
1644
# Generate the table row for a single file
1645
#
1646
nrows = 0
1647
 
1648
 
1649
def html_row(details, sourcefile, **kwargs):
1650
    if options.relative_anchors:
1651
        sourcefile = os.path.basename(sourcefile)
1652
    rowstr = Template('''
1653
    
1654
      
${filename}
1655
      
1656
        
1657
                
1658
                
1659
      
1660
      
${LinesCoverage} %
1661
      
${LinesExec} / ${LinesTotal}
1662
      
${BranchesCoverage} %
1663
      
${BranchesExec} / ${BranchesTotal}
1664
    
1665
''')
1666
    global nrows
1667
    nrows += 1
1668
    if nrows % 2 == 0:
1669
        kwargs['altstyle'] = 'style="background-color:LightSteelBlue"'
1670
    else:
1671
        kwargs['altstyle'] = ''
1672
    if details:
1673
        kwargs['filename'] = '%s' % (
1674
            sourcefile, kwargs['filename'][len(kwargs['directory']):]
1675
        )
1676
    else:
1677
        kwargs['filename'] = kwargs['filename'][len(kwargs['directory']):]
1678
    kwargs['LinesCoverage'] = round(kwargs['LinesCoverage'], 1)
1679
    # Disable the border if the bar is too short to see the color
1680
    if kwargs['LinesCoverage'] < 1e-7:
1681
        kwargs['BarBorder'] = "border:white; "
1682
    else:
1683
        kwargs['BarBorder'] = ""
1684
    if kwargs['LinesCoverage'] < medium_coverage:
1685
        kwargs['LinesColor'] = low_color
1686
        kwargs['LinesBar'] = 'red'
1687
    elif kwargs['LinesCoverage'] < high_coverage:
1688
        kwargs['LinesColor'] = medium_color
1689
        kwargs['LinesBar'] = 'yellow'
1690
    else:
1691
        kwargs['LinesColor'] = high_color
1692
        kwargs['LinesBar'] = 'green'
1693
 
1694
    kwargs['BranchesCoverage'] = round(kwargs['BranchesCoverage'], 1)
1695
    if kwargs['BranchesCoverage'] < medium_coverage:
1696
        kwargs['BranchesColor'] = low_color
1697
        kwargs['BranchesBar'] = 'red'
1698
    elif kwargs['BranchesCoverage'] < high_coverage:
1699
        kwargs['BranchesColor'] = medium_color
1700
        kwargs['BranchesBar'] = 'yellow'
1701
    else:
1702
        kwargs['BranchesColor'] = high_color
1703
        kwargs['BranchesBar'] = 'green'
1704
 
1705
    return rowstr.substitute(**kwargs)
1706
 
1707
 
1708
#
1709
# Produce an XML report in the Cobertura format
1710
#
1711
def print_xml_report(covdata):
1712
    branchTotal = 0
1713
    branchCovered = 0
1714
    lineTotal = 0
1715
    lineCovered = 0
1716
 
1717
    options.show_branch = True
1718
    for key in covdata.keys():
1719
        (total, covered, percent) = covdata[key].coverage()
1720
        branchTotal += total
1721
        branchCovered += covered
1722
 
1723
    options.show_branch = False
1724
    for key in covdata.keys():
1725
        (total, covered, percent) = covdata[key].coverage()
1726
        lineTotal += total
1727
        lineCovered += covered
1728
 
1729
    impl = xml.dom.minidom.getDOMImplementation()
1730
    docType = impl.createDocumentType(
1731
        "coverage", None,
1732
        "http://cobertura.sourceforge.net/xml/coverage-03.dtd"
1733
    )
1734
    doc = impl.createDocument(None, "coverage", docType)
1735
    root = doc.documentElement
1736
    root.setAttribute(
1737
        "line-rate", lineTotal == 0 and '0.0' or
1738
        str(float(lineCovered) / lineTotal)
1739
    )
1740
    root.setAttribute(
1741
        "branch-rate", branchTotal == 0 and '0.0' or
1742
        str(float(branchCovered) / branchTotal)
1743
    )
1744
    root.setAttribute(
1745
        "timestamp", str(int(time.time()))
1746
    )
1747
    root.setAttribute(
1748
        "version", "gcovr %s" % (version_str(),)
1749
    )
1750
 
1751
    # Generate the  element: this is either the root directory
1752
    # (specified by --root), or the CWD.
1753
    sources = doc.createElement("sources")
1754
    root.appendChild(sources)
1755
 
1756
    # Generate the coverage output (on a per-package basis)
1757
    packageXml = doc.createElement("packages")
1758
    root.appendChild(packageXml)
1759
    packages = {}
1760
    source_dirs = set()
1761
 
1762
    keys = list(covdata.keys())
1763
    keys.sort()
1764
    for f in keys:
1765
        data = covdata[f]
1766
        directory = options.root_filter.sub('', f)
1767
        if f.endswith(directory):
1768
            src_path = f[:-1 * len(directory)]
1769
            if len(src_path) > 0:
1770
                while directory.startswith(os.path.sep):
1771
                    src_path += os.path.sep
1772
                    directory = directory[len(os.path.sep):]
1773
                source_dirs.add(src_path)
1774
        else:
1775
            # Do no truncation if the filter does not start matching at
1776
            # the beginning of the string
1777
            directory = f
1778
        directory, fname = os.path.split(directory)
1779
 
1780
        package = packages.setdefault(
1781
            directory, [doc.createElement("package"), {}, 0, 0, 0, 0]
1782
        )
1783
        c = doc.createElement("class")
1784
        # The Cobertura DTD requires a methods section, which isn't
1785
        # trivial to get from gcov (so we will leave it blank)
1786
        c.appendChild(doc.createElement("methods"))
1787
        lines = doc.createElement("lines")
1788
        c.appendChild(lines)
1789
 
1790
        class_lines = 0
1791
        class_hits = 0
1792
        class_branches = 0
1793
        class_branch_hits = 0
1794
        for line in data.all_lines:
1795
            hits = data.covered.get(line, 0)
1796
            class_lines += 1
1797
            if hits > 0:
1798
                class_hits += 1
1799
            l = doc.createElement("line")
1800
            l.setAttribute("number", str(line))
1801
            l.setAttribute("hits", str(hits))
1802
            branches = data.branches.get(line)
1803
            if branches is None:
1804
                l.setAttribute("branch", "false")
1805
            else:
1806
                b_hits = 0
1807
                for v in branches.values():
1808
                    if v > 0:
1809
                        b_hits += 1
1810
                coverage = 100 * b_hits / len(branches)
1811
                l.setAttribute("branch", "true")
1812
                l.setAttribute(
1813
                    "condition-coverage",
1814
                    "%i%% (%i/%i)" % (coverage, b_hits, len(branches))
1815
                )
1816
                cond = doc.createElement('condition')
1817
                cond.setAttribute("number", "0")
1818
                cond.setAttribute("type", "jump")
1819
                cond.setAttribute("coverage", "%i%%" % (coverage))
1820
                class_branch_hits += b_hits
1821
                class_branches += float(len(branches))
1822
                conditions = doc.createElement("conditions")
1823
                conditions.appendChild(cond)
1824
                l.appendChild(conditions)
1825
 
1826
            lines.appendChild(l)
1827
 
1828
        className = fname.replace('.', '_')
1829
        c.setAttribute("name", className)
1830
        c.setAttribute("filename", os.path.join(directory, fname))
1831
        c.setAttribute(
1832
            "line-rate",
1833
            str(class_hits / (1.0 * class_lines or 1.0))
1834
        )
1835
        c.setAttribute(
1836
            "branch-rate",
1837
            str(class_branch_hits / (1.0 * class_branches or 1.0))
1838
        )
1839
        c.setAttribute("complexity", "0.0")
1840
 
1841
        package[1][className] = c
1842
        package[2] += class_hits
1843
        package[3] += class_lines
1844
        package[4] += class_branch_hits
1845
        package[5] += class_branches
1846
 
1847
    keys = list(packages.keys())
1848
    keys.sort()
1849
    for packageName in keys:
1850
        packageData = packages[packageName]
1851
        package = packageData[0]
1852
        packageXml.appendChild(package)
1853
        classes = doc.createElement("classes")
1854
        package.appendChild(classes)
1855
        classNames = list(packageData[1].keys())
1856
        classNames.sort()
1857
        for className in classNames:
1858
            classes.appendChild(packageData[1][className])
1859
        package.setAttribute("name", packageName.replace(os.sep, '.'))
1860
        package.setAttribute(
1861
            "line-rate", str(packageData[2] / (1.0 * packageData[3] or 1.0))
1862
        )
1863
        package.setAttribute(
1864
            "branch-rate", str(packageData[4] / (1.0 * packageData[5] or 1.0))
1865
        )
1866
        package.setAttribute("complexity", "0.0")
1867
 
1868
    # Populate the  element: this is either the root directory
1869
    # (specified by --root), or relative directories based
1870
    # on the filter, or the CWD
1871
    if options.root is not None:
1872
        source = doc.createElement("source")
1873
        source.appendChild(doc.createTextNode(options.root.strip()))
1874
        sources.appendChild(source)
1875
    elif len(source_dirs) > 0:
1876
        cwd = os.getcwd()
1877
        for d in source_dirs:
1878
            source = doc.createElement("source")
1879
            if d.startswith(cwd):
1880
                reldir = d[len(cwd):].lstrip(os.path.sep)
1881
            elif cwd.startswith(d):
1882
                i = 1
1883
                while normpath(d) != \
1884
                        normpath(os.path.join(*tuple([cwd] + ['..'] * i))):
1885
                    i += 1
1886
                reldir = os.path.join(*tuple(['..'] * i))
1887
            else:
1888
                reldir = d
1889
            source.appendChild(doc.createTextNode(reldir.strip()))
1890
            sources.appendChild(source)
1891
    else:
1892
        source = doc.createElement("source")
1893
        source.appendChild(doc.createTextNode('.'))
1894
        sources.appendChild(source)
1895
 
1896
    if options.prettyxml:
1897
        import textwrap
1898
        lines = doc.toprettyxml(" ").split('\n')
1899
        for i in xrange(len(lines)):
1900
            n = 0
1901
            while n < len(lines[i]) and lines[i][n] == " ":
1902
                n += 1
1903
            lines[i] = "\n".join(textwrap.wrap(
1904
                lines[i], 78,
1905
                break_long_words=False,
1906
                break_on_hyphens=False,
1907
                subsequent_indent=" " + n * " "
1908
            ))
1909
        xmlString = "\n".join(lines)
1910
        #print textwrap.wrap(doc.toprettyxml(" "), 80)
1911
    else:
1912
        xmlString = doc.toprettyxml(indent="")
1913
    if options.output is None:
1914
        sys.stdout.write(xmlString + '\n')
1915
    else:
1916
        OUTPUT = open(options.output, 'w')
1917
        OUTPUT.write(xmlString + '\n')
1918
        OUTPUT.close()
1919
 
1920
 
1921
##
1922
## MAIN
1923
##
1924
 
1925
#
1926
# Create option parser
1927
#
1928
parser = OptionParser()
1929
parser.add_option(
1930
    "--version",
1931
    help="Print the version number, then exit",
1932
    action="store_true",
1933
    dest="version",
1934
    default=False
1935
)
1936
parser.add_option(
1937
    "-v", "--verbose",
1938
    help="Print progress messages",
1939
    action="store_true",
1940
    dest="verbose",
1941
    default=False
1942
)
1943
parser.add_option(
1944
    '--object-directory',
1945
    help="Specify the directory that contains the gcov data files.  gcovr "
1946
         "must be able to identify the path between the *.gcda files and the "
1947
         "directory where gcc was originally run.  Normally, gcovr can guess "
1948
         "correctly.  This option overrides gcovr's normal path detection and "
1949
         "can specify either the path from gcc to the gcda file (i.e. what "
1950
         "was passed to gcc's '-o' option), or the path from the gcda file to "
1951
         "gcc's original working directory.",
1952
    action="store",
1953
    dest="objdir",
1954
    default=None
1955
)
1956
parser.add_option(
1957
    "-o", "--output",
1958
    help="Print output to this filename",
1959
    action="store",
1960
    dest="output",
1961
    default=None
1962
)
1963
parser.add_option(
1964
    "-k", "--keep",
1965
    help="Keep the temporary *.gcov files generated by gcov.  "
1966
         "By default, these are deleted.",
1967
    action="store_true",
1968
    dest="keep",
1969
    default=False
1970
)
1971
parser.add_option(
1972
    "-d", "--delete",
1973
    help="Delete the coverage files after they are processed.  "
1974
         "These are generated by the users's program, and by default gcovr "
1975
         "does not remove these files.",
1976
    action="store_true",
1977
    dest="delete",
1978
    default=False
1979
)
1980
parser.add_option(
1981
    "-f", "--filter",
1982
    help="Keep only the data files that match this regular expression",
1983
    action="append",
1984
    dest="filter",
1985
    default=[]
1986
)
1987
parser.add_option(
1988
    "-e", "--exclude",
1989
    help="Exclude data files that match this regular expression",
1990
    action="append",
1991
    dest="exclude",
1992
    default=[]
1993
)
1994
parser.add_option(
1995
    "--gcov-filter",
1996
    help="Keep only gcov data files that match this regular expression",
1997
    action="store",
1998
    dest="gcov_filter",
1999
    default=None
2000
)
2001
parser.add_option(
2002
    "--gcov-exclude",
2003
    help="Exclude gcov data files that match this regular expression",
2004
    action="append",
2005
    dest="gcov_exclude",
2006
    default=[]
2007
)
2008
parser.add_option(
2009
    "-r", "--root",
2010
    help="Defines the root directory for source files.  "
2011
         "This is also used to filter the files, and to standardize "
2012
         "the output.",
2013
    action="store",
2014
    dest="root",
2015
    default=None
2016
)
2017
parser.add_option(
2018
    "-x", "--xml",
2019
    help="Generate XML instead of the normal tabular output.",
2020
    action="store_true",
2021
    dest="xml",
2022
    default=False
2023
)
2024
parser.add_option(
2025
    "--xml-pretty",
2026
    help="Generate pretty XML instead of the normal dense format.",
2027
    action="store_true",
2028
    dest="prettyxml",
2029
    default=False
2030
)
2031
parser.add_option(
2032
    "--html",
2033
    help="Generate HTML instead of the normal tabular output.",
2034
    action="store_true",
2035
    dest="html",
2036
    default=False
2037
)
2038
parser.add_option(
2039
    "--html-details",
2040
    help="Generate HTML output for source file coverage.",
2041
    action="store_true",
2042
    dest="html_details",
2043
    default=False
2044
)
2045
parser.add_option(
2046
    "--html-absolute-paths",
2047
    help="Set the paths in the HTML report to be absolute instead of relative",
2048
    action="store_false",
2049
    dest="relative_anchors",
2050
    default=True
2051
)
2052
parser.add_option(
2053
    "-b", "--branches",
2054
    help="Tabulate the branch coverage instead of the line coverage.",
2055
    action="store_true",
2056
    dest="show_branch",
2057
    default=None
2058
)
2059
parser.add_option(
2060
    "-u", "--sort-uncovered",
2061
    help="Sort entries by increasing number of uncovered lines.",
2062
    action="store_true",
2063
    dest="sort_uncovered",
2064
    default=None
2065
)
2066
parser.add_option(
2067
    "-p", "--sort-percentage",
2068
    help="Sort entries by decreasing percentage of covered lines.",
2069
    action="store_true",
2070
    dest="sort_percent",
2071
    default=None
2072
)
2073
parser.add_option(
2074
    "--gcov-executable",
2075
    help="Defines the name/path to the gcov executable [defaults to the "
2076
         "GCOV environment variable, if present; else 'gcov'].",
2077
    action="store",
2078
    dest="gcov_cmd",
2079
    default=os.environ.get('GCOV', 'gcov')
2080
)
2081
parser.add_option(
2082
    "--exclude-unreachable-branches",
2083
    help="Exclude from coverage branches which are marked to be excluded by "
2084
         "LCOV/GCOV markers or are determined to be from lines containing "
2085
         "only compiler-generated \"dead\" code.",
2086
    action="store_true",
2087
    dest="exclude_unreachable_branches",
2088
    default=False
2089
)
2090
parser.add_option(
2091
    "-g", "--use-gcov-files",
2092
    help="Use preprocessed gcov files for analysis.",
2093
    action="store_true",
2094
    dest="gcov_files",
2095
    default=False
2096
)
2097
parser.add_option(
2098
    "-s", "--print-summary",
2099
    help="Prints a small report to stdout with line & branch "
2100
         "percentage coverage",
2101
    action="store_true",
2102
    dest="print_summary",
2103
    default=False
2104
)
2105
parser.usage = "gcovr [options]"
2106
parser.description = \
2107
    "A utility to run gcov and generate a simple report that summarizes " \
2108
    "the coverage"
2109
#
2110
# Process options
2111
#
2112
options, args = parser.parse_args(args=sys.argv)
2113
if options.version:
2114
    sys.stdout.write(
2115
        "gcovr %s\n"
2116
        "\n"
2117
        "Copyright (2013) Sandia Corporation. Under the terms of Contract\n"
2118
        "DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government\n"
2119
        "retains certain rights in this software.\n"
2120
        % (version_str(), )
2121
    )
2122
    sys.exit(0)
2123
if options.objdir:
2124
    tmp = options.objdir.replace('/', os.sep).replace('\\', os.sep)
2125
    while os.sep + os.sep in tmp:
2126
        tmp = tmp.replace(os.sep + os.sep, os.sep)
2127
    if normpath(options.objdir) != tmp:
2128
        sys.stderr.write(
2129
            "(WARNING) relative referencing in --object-directory.\n"
2130
            "\tthis could cause strange errors when gcovr attempts to\n"
2131
            "\tidentify the original gcc working directory.\n")
2132
    if not os.path.exists(normpath(options.objdir)):
2133
        sys.stderr.write(
2134
            "(ERROR) Bad --object-directory option.\n"
2135
            "\tThe specified directory does not exist.\n")
2136
        sys.exit(1)
2137
#
2138
# Setup filters
2139
#
2140
for i in range(0, len(options.exclude)):
2141
    options.exclude[i] = re.compile(options.exclude[i])
2142
 
2143
if options.root is not None:
2144
    if not options.root:
2145
        sys.stderr.write(
2146
            "(ERROR) empty --root option.\n"
2147
            "\tRoot specifies the path to the root "
2148
            "directory of your project.\n"
2149
            "\tThis option cannot be an empty string.\n"
2150
        )
2151
        sys.exit(1)
2152
    root_dir = os.path.abspath(options.root)
2153
    options.root_filter = re.compile(re.escape(root_dir + os.sep))
2154
else:
2155
    options.root_filter = re.compile('')
2156
    root_dir = starting_dir
2157
 
2158
for i in range(0, len(options.filter)):
2159
    options.filter[i] = re.compile(options.filter[i])
2160
if len(options.filter) == 0:
2161
    options.filter.append(options.root_filter)
2162
 
2163
for i in range(0, len(options.gcov_exclude)):
2164
    options.gcov_exclude[i] = re.compile(options.gcov_exclude[i])
2165
if options.gcov_filter is not None:
2166
    options.gcov_filter = re.compile(options.gcov_filter)
2167
else:
2168
    options.gcov_filter = re.compile('')
2169
#
2170
# Get data files
2171
#
2172
if len(args) == 1:
2173
    if options.root is None:
2174
        datafiles = get_datafiles(["."], options)
2175
    else:
2176
        datafiles = get_datafiles([options.root], options)
2177
else:
2178
    datafiles = get_datafiles(args[1:], options)
2179
#
2180
# Get coverage data
2181
#
2182
covdata = {}
2183
for file_ in datafiles:
2184
    if options.gcov_files:
2185
        process_existing_gcov_file(file_, covdata, options)
2186
    else:
2187
        process_datafile(file_, covdata, options)
2188
if options.verbose:
2189
    sys.stdout.write(
2190
        "Gathered coveraged data for " + str(len(covdata)) + " files\n"
2191
    )
2192
#
2193
# Print report
2194
#
2195
if options.xml or options.prettyxml:
2196
    print_xml_report(covdata)
2197
elif options.html:
2198
    print_html_report(covdata, options.html_details)
2199
else:
2200
    print_text_report(covdata)
2201
 
2202
if options.print_summary:
2203
    print_summary(covdata)

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.