OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [contrib/] [testsuite-management/] [validate_failures.py] - Blame information for rev 755

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 723 jeremybenn
#!/usr/bin/python
2
 
3
# Script to compare testsuite failures against a list of known-to-fail
4
# tests.
5
 
6
# Contributed by Diego Novillo <dnovillo@google.com>
7
#
8
# Copyright (C) 2011 Free Software Foundation, Inc.
9
#
10
# This file is part of GCC.
11
#
12
# GCC is free software; you can redistribute it and/or modify
13
# it under the terms of the GNU General Public License as published by
14
# the Free Software Foundation; either version 3, or (at your option)
15
# any later version.
16
#
17
# GCC is distributed in the hope that it will be useful,
18
# but WITHOUT ANY WARRANTY; without even the implied warranty of
19
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20
# GNU General Public License for more details.
21
#
22
# You should have received a copy of the GNU General Public License
23
# along with GCC; see the file COPYING.  If not, write to
24
# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
25
# Boston, MA 02110-1301, USA.
26
 
27
"""This script provides a coarser XFAILing mechanism that requires no
28
detailed DejaGNU markings.  This is useful in a variety of scenarios:
29
 
30
- Development branches with many known failures waiting to be fixed.
31
- Release branches with known failures that are not considered
32
  important for the particular release criteria used in that branch.
33
 
34
The script must be executed from the toplevel build directory.  When
35
executed it will:
36
 
37
1- Determine the target built: TARGET
38
2- Determine the source directory: SRCDIR
39
3- Look for a failure manifest file in
40
   <SRCDIR>/contrib/testsuite-management/<TARGET>.xfail
41
4- Collect all the <tool>.sum files from the build tree.
42
5- Produce a report stating:
43
   a- Failures expected in the manifest but not present in the build.
44
   b- Failures in the build not expected in the manifest.
45
6- If all the build failures are expected in the manifest, it exits
46
   with exit code 0.  Otherwise, it exits with error code 1.
47
"""
48
 
49
import optparse
50
import os
51
import re
52
import sys
53
 
54
# Handled test results.
55
_VALID_TEST_RESULTS = [ 'FAIL', 'UNRESOLVED', 'XPASS', 'ERROR' ]
56
 
57
# Pattern for naming manifest files.  The first argument should be
58
# the toplevel GCC source directory.  The second argument is the
59
# target triple used during the build.
60
_MANIFEST_PATH_PATTERN = '%s/contrib/testsuite-management/%s.xfail'
61
 
62
def Error(msg):
63
  print >>sys.stderr, '\nerror: %s' % msg
64
  sys.exit(1)
65
 
66
 
67
class TestResult(object):
68
  """Describes a single DejaGNU test result as emitted in .sum files.
69
 
70
  We are only interested in representing unsuccessful tests.  So, only
71
  a subset of all the tests are loaded.
72
 
73
  The summary line used to build the test result should have this format:
74
 
75
  attrlist | XPASS: gcc.dg/unroll_1.c (test for excess errors)
76
  ^^^^^^^^   ^^^^^  ^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^
77
  optional   state  name              description
78
  attributes
79
 
80
  Attributes:
81
    attrlist: A comma separated list of attributes.
82
      Valid values:
83
        flaky            Indicates that this test may not always fail.  These
84
                         tests are reported, but their presence does not affect
85
                         the results.
86
 
87
        expire=YYYYMMDD  After this date, this test will produce an error
88
                         whether it is in the manifest or not.
89
 
90
    state: One of UNRESOLVED, XPASS or FAIL.
91
    name: File name for the test.
92
    description: String describing the test (flags used, dejagnu message, etc)
93
  """
94
 
95
  def __init__(self, summary_line):
96
    try:
97
      self.attrs = ''
98
      if '|' in summary_line:
99
        (self.attrs, summary_line) = summary_line.split('|', 1)
100
      (self.state,
101
       self.name,
102
       self.description) = re.match(r' *([A-Z]+): ([^ ]+) (.*)',
103
                                    summary_line).groups()
104
      self.attrs = self.attrs.strip()
105
      self.state = self.state.strip()
106
      self.description = self.description.strip()
107
    except ValueError:
108
      Error('Cannot parse summary line "%s"' % summary_line)
109
 
110
    if self.state not in _VALID_TEST_RESULTS:
111
      Error('Invalid test result %s in "%s" (parsed as "%s")' % (
112
            self.state, summary_line, self))
113
 
114
  def __lt__(self, other):
115
    return self.name < other.name
116
 
117
  def __hash__(self):
118
    return hash(self.state) ^ hash(self.name) ^ hash(self.description)
119
 
120
  def __eq__(self, other):
121
    return (self.state == other.state and
122
            self.name == other.name and
123
            self.description == other.description)
124
 
125
  def __ne__(self, other):
126
    return not (self == other)
127
 
128
  def __str__(self):
129
    attrs = ''
130
    if self.attrs:
131
      attrs = '%s | ' % self.attrs
132
    return '%s%s: %s %s' % (attrs, self.state, self.name, self.description)
133
 
134
 
135
def GetMakefileValue(makefile_name, value_name):
136
  if os.path.exists(makefile_name):
137
    with open(makefile_name) as makefile:
138
      for line in makefile:
139
        if line.startswith(value_name):
140
          (_, value) = line.split('=', 1)
141
          value = value.strip()
142
          return value
143
  return None
144
 
145
 
146
def ValidBuildDirectory(builddir, target):
147
  if (not os.path.exists(builddir) or
148
      not os.path.exists('%s/Makefile' % builddir) or
149
      (not os.path.exists('%s/build-%s' % (builddir, target)) and
150
       not os.path.exists('%s/%s' % (builddir, target)))):
151
    return False
152
  return True
153
 
154
 
155
def IsInterestingResult(line):
156
  """Return True if the given line is one of the summary lines we care about."""
157
  line = line.strip()
158
  if line.startswith('#'):
159
    return False
160
  if '|' in line:
161
    (_, line) = line.split('|', 1)
162
  line = line.strip()
163
  for result in _VALID_TEST_RESULTS:
164
    if line.startswith(result):
165
      return True
166
  return False
167
 
168
 
169
def ParseSummary(sum_fname):
170
  """Create a set of TestResult instances from the given summary file."""
171
  result_set = set()
172
  with open(sum_fname) as sum_file:
173
    for line in sum_file:
174
      if IsInterestingResult(line):
175
        result_set.add(TestResult(line))
176
  return result_set
177
 
178
 
179
def GetManifest(manifest_name):
180
  """Build a set of expected failures from the manifest file.
181
 
182
  Each entry in the manifest file should have the format understood
183
  by the TestResult constructor.
184
 
185
  If no manifest file exists for this target, it returns an empty
186
  set.
187
  """
188
  if os.path.exists(manifest_name):
189
    return ParseSummary(manifest_name)
190
  else:
191
    return set()
192
 
193
 
194
def GetSumFiles(builddir):
195
  sum_files = []
196
  for root, dirs, files in os.walk(builddir):
197
    if '.svn' in dirs:
198
      dirs.remove('.svn')
199
    for fname in files:
200
      if fname.endswith('.sum'):
201
        sum_files.append(os.path.join(root, fname))
202
  return sum_files
203
 
204
 
205
def GetResults(builddir):
206
  """Collect all the test results from .sum files under the given build
207
  directory."""
208
  sum_files = GetSumFiles(builddir)
209
  build_results = set()
210
  for sum_fname in sum_files:
211
    print '\t%s' % sum_fname
212
    build_results |= ParseSummary(sum_fname)
213
  return build_results
214
 
215
 
216
def CompareResults(manifest, actual):
217
  """Compare sets of results and return two lists:
218
     - List of results present in MANIFEST but missing from ACTUAL.
219
     - List of results present in ACTUAL but missing from MANIFEST.
220
  """
221
  # Report all the actual results not present in the manifest.
222
  actual_vs_manifest = set()
223
  for actual_result in actual:
224
    if actual_result not in manifest:
225
      actual_vs_manifest.add(actual_result)
226
 
227
  # Simlarly for all the tests in the manifest.
228
  manifest_vs_actual = set()
229
  for expected_result in manifest:
230
    # Ignore tests marked flaky.
231
    if 'flaky' in expected_result.attrs:
232
      continue
233
    if expected_result not in actual:
234
      manifest_vs_actual.add(expected_result)
235
 
236
  return actual_vs_manifest, manifest_vs_actual
237
 
238
 
239
def GetBuildData(options):
240
  target = GetMakefileValue('%s/Makefile' % options.build_dir, 'target=')
241
  srcdir = GetMakefileValue('%s/Makefile' % options.build_dir, 'srcdir =')
242
  if not ValidBuildDirectory(options.build_dir, target):
243
    Error('%s is not a valid GCC top level build directory.' %
244
          options.build_dir)
245
  print 'Source directory: %s' % srcdir
246
  print 'Build target:     %s' % target
247
  return srcdir, target, True
248
 
249
 
250
def PrintSummary(msg, summary):
251
  print '\n\n%s' % msg
252
  for result in sorted(summary):
253
    print result
254
 
255
 
256
def CheckExpectedResults(options):
257
  (srcdir, target, valid_build) = GetBuildData(options)
258
  if not valid_build:
259
    return False
260
 
261
  manifest_name = _MANIFEST_PATH_PATTERN % (srcdir, target)
262
  print 'Manifest:         %s' % manifest_name
263
  manifest = GetManifest(manifest_name)
264
 
265
  print 'Getting actual results from build'
266
  actual = GetResults(options.build_dir)
267
 
268
  if options.verbosity >= 1:
269
    PrintSummary('Tests expected to fail', manifest)
270
    PrintSummary('\nActual test results', actual)
271
 
272
  actual_vs_manifest, manifest_vs_actual = CompareResults(manifest, actual)
273
 
274
  tests_ok = True
275
  if len(actual_vs_manifest) > 0:
276
    PrintSummary('Build results not in the manifest', actual_vs_manifest)
277
    tests_ok = False
278
 
279
  if len(manifest_vs_actual) > 0:
280
    PrintSummary('Manifest results not present in the build'
281
                 '\n\nNOTE: This is not a failure.  It just means that the '
282
                 'manifest expected\nthese tests to fail, '
283
                 'but they worked in this configuration.\n',
284
                 manifest_vs_actual)
285
 
286
  if tests_ok:
287
    print '\nSUCCESS: No unexpected failures.'
288
 
289
  return tests_ok
290
 
291
 
292
def ProduceManifest(options):
293
  (srcdir, target, valid_build) = GetBuildData(options)
294
  if not valid_build:
295
    return False
296
 
297
  manifest_name = _MANIFEST_PATH_PATTERN % (srcdir, target)
298
  if os.path.exists(manifest_name) and not options.force:
299
    Error('Manifest file %s already exists.\nUse --force to overwrite.' %
300
          manifest_name)
301
 
302
  actual = GetResults(options.build_dir)
303
  with open(manifest_name, 'w') as manifest_file:
304
    for result in sorted(actual):
305
      print result
306
      manifest_file.write('%s\n' % result)
307
 
308
  return True
309
 
310
 
311
def Main(argv):
312
  parser = optparse.OptionParser(usage=__doc__)
313
  parser.add_option('--build_dir', action='store', type='string',
314
                    dest='build_dir', default='.',
315
                    help='Build directory to check (default = .)')
316
  parser.add_option('--manifest', action='store_true', dest='manifest',
317
                    default=False, help='Produce the manifest for the current '
318
                    'build (default = False)')
319
  parser.add_option('--force', action='store_true', dest='force',
320
                    default=False, help='When used with --manifest, it will '
321
                    'overwrite an existing manifest file (default = False)')
322
  parser.add_option('--verbosity', action='store', dest='verbosity',
323
                    type='int', default=0, help='Verbosity level (default = 0)')
324
  (options, _) = parser.parse_args(argv[1:])
325
 
326
  if options.manifest:
327
    retval = ProduceManifest(options)
328
  else:
329
    retval = CheckExpectedResults(options)
330
 
331
  if retval:
332
    return 0
333
  else:
334
    return 1
335
 
336
if __name__ == '__main__':
337
  retval = Main(sys.argv)
338
  sys.exit(retval)

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.