eric6/DebugClients/Python/coverage/data.py

changeset 6942
2602857055c5
parent 6219
d6c795b5ce33
child 7427
362cd1b6f81a
equal deleted inserted replaced
6941:f99d60d6b59b 6942:2602857055c5
1 # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
2 # For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
3
4 """Coverage data for coverage.py."""
5
6 import glob
7 import itertools
8 import json
9 import optparse
10 import os
11 import os.path
12 import random
13 import re
14 import socket
15
16 from coverage import env
17 from coverage.backward import iitems, string_class
18 from coverage.debug import _TEST_NAME_FILE
19 from coverage.files import PathAliases
20 from coverage.misc import CoverageException, file_be_gone, isolate_module
21
22 os = isolate_module(os)
23
24
25 class CoverageData(object):
26 """Manages collected coverage data, including file storage.
27
28 This class is the public supported API to the data coverage.py collects
29 during program execution. It includes information about what code was
30 executed. It does not include information from the analysis phase, to
31 determine what lines could have been executed, or what lines were not
32 executed.
33
34 .. note::
35
36 The file format is not documented or guaranteed. It will change in
37 the future, in possibly complicated ways. Do not read coverage.py
38 data files directly. Use this API to avoid disruption.
39
40 There are a number of kinds of data that can be collected:
41
42 * **lines**: the line numbers of source lines that were executed.
43 These are always available.
44
45 * **arcs**: pairs of source and destination line numbers for transitions
46 between source lines. These are only available if branch coverage was
47 used.
48
49 * **file tracer names**: the module names of the file tracer plugins that
50 handled each file in the data.
51
52 * **run information**: information about the program execution. This is
53 written during "coverage run", and then accumulated during "coverage
54 combine".
55
56 Lines, arcs, and file tracer names are stored for each source file. File
57 names in this API are case-sensitive, even on platforms with
58 case-insensitive file systems.
59
60 To read a coverage.py data file, use :meth:`read_file`, or
61 :meth:`read_fileobj` if you have an already-opened file. You can then
62 access the line, arc, or file tracer data with :meth:`lines`, :meth:`arcs`,
63 or :meth:`file_tracer`. Run information is available with
64 :meth:`run_infos`.
65
66 The :meth:`has_arcs` method indicates whether arc data is available. You
67 can get a list of the files in the data with :meth:`measured_files`.
68 A summary of the line data is available from :meth:`line_counts`. As with
69 most Python containers, you can determine if there is any data at all by
70 using this object as a boolean value.
71
72
73 Most data files will be created by coverage.py itself, but you can use
74 methods here to create data files if you like. The :meth:`add_lines`,
75 :meth:`add_arcs`, and :meth:`add_file_tracers` methods add data, in ways
76 that are convenient for coverage.py. The :meth:`add_run_info` method adds
77 key-value pairs to the run information.
78
79 To add a file without any measured data, use :meth:`touch_file`.
80
81 You write to a named file with :meth:`write_file`, or to an already opened
82 file with :meth:`write_fileobj`.
83
84 You can clear the data in memory with :meth:`erase`. Two data collections
85 can be combined by using :meth:`update` on one :class:`CoverageData`,
86 passing it the other.
87
88 """
89
90 # The data file format is JSON, with these keys:
91 #
92 # * lines: a dict mapping file names to lists of line numbers
93 # executed::
94 #
95 # { "file1": [17,23,45], "file2": [1,2,3], ... }
96 #
97 # * arcs: a dict mapping file names to lists of line number pairs::
98 #
99 # { "file1": [[17,23], [17,25], [25,26]], ... }
100 #
101 # * file_tracers: a dict mapping file names to plugin names::
102 #
103 # { "file1": "django.coverage", ... }
104 #
105 # * runs: a list of dicts of information about the coverage.py runs
106 # contributing to the data::
107 #
108 # [ { "brief_sys": "CPython 2.7.10 Darwin" }, ... ]
109 #
110 # Only one of `lines` or `arcs` will be present: with branch coverage, data
111 # is stored as arcs. Without branch coverage, it is stored as lines. The
112 # line data is easily recovered from the arcs: it is all the first elements
113 # of the pairs that are greater than zero.
114
115 def __init__(self, debug=None):
116 """Create a CoverageData.
117
118 `debug` is a `DebugControl` object for writing debug messages.
119
120 """
121 self._debug = debug
122
123 # A map from canonical Python source file name to a dictionary in
124 # which there's an entry for each line number that has been
125 # executed:
126 #
127 # { 'filename1.py': [12, 47, 1001], ... }
128 #
129 self._lines = None
130
131 # A map from canonical Python source file name to a dictionary with an
132 # entry for each pair of line numbers forming an arc:
133 #
134 # { 'filename1.py': [(12,14), (47,48), ... ], ... }
135 #
136 self._arcs = None
137
138 # A map from canonical source file name to a plugin module name:
139 #
140 # { 'filename1.py': 'django.coverage', ... }
141 #
142 self._file_tracers = {}
143
144 # A list of dicts of information about the coverage.py runs.
145 self._runs = []
146
147 def __repr__(self):
148 return "<{klass} lines={lines} arcs={arcs} tracers={tracers} runs={runs}>".format(
149 klass=self.__class__.__name__,
150 lines="None" if self._lines is None else "{{{0}}}".format(len(self._lines)),
151 arcs="None" if self._arcs is None else "{{{0}}}".format(len(self._arcs)),
152 tracers="{{{0}}}".format(len(self._file_tracers)),
153 runs="[{0}]".format(len(self._runs)),
154 )
155
156 ##
157 ## Reading data
158 ##
159
160 def has_arcs(self):
161 """Does this data have arcs?
162
163 Arc data is only available if branch coverage was used during
164 collection.
165
166 Returns a boolean.
167
168 """
169 return self._has_arcs()
170
171 def lines(self, filename):
172 """Get the list of lines executed for a file.
173
174 If the file was not measured, returns None. A file might be measured,
175 and have no lines executed, in which case an empty list is returned.
176
177 If the file was executed, returns a list of integers, the line numbers
178 executed in the file. The list is in no particular order.
179
180 """
181 if self._arcs is not None:
182 arcs = self._arcs.get(filename)
183 if arcs is not None:
184 all_lines = itertools.chain.from_iterable(arcs)
185 return list(set(l for l in all_lines if l > 0))
186 elif self._lines is not None:
187 return self._lines.get(filename)
188 return None
189
190 def arcs(self, filename):
191 """Get the list of arcs executed for a file.
192
193 If the file was not measured, returns None. A file might be measured,
194 and have no arcs executed, in which case an empty list is returned.
195
196 If the file was executed, returns a list of 2-tuples of integers. Each
197 pair is a starting line number and an ending line number for a
198 transition from one line to another. The list is in no particular
199 order.
200
201 Negative numbers have special meaning. If the starting line number is
202 -N, it represents an entry to the code object that starts at line N.
203 If the ending ling number is -N, it's an exit from the code object that
204 starts at line N.
205
206 """
207 if self._arcs is not None:
208 if filename in self._arcs:
209 return self._arcs[filename]
210 return None
211
212 def file_tracer(self, filename):
213 """Get the plugin name of the file tracer for a file.
214
215 Returns the name of the plugin that handles this file. If the file was
216 measured, but didn't use a plugin, then "" is returned. If the file
217 was not measured, then None is returned.
218
219 """
220 # Because the vast majority of files involve no plugin, we don't store
221 # them explicitly in self._file_tracers. Check the measured data
222 # instead to see if it was a known file with no plugin.
223 if filename in (self._arcs or self._lines or {}):
224 return self._file_tracers.get(filename, "")
225 return None
226
227 def run_infos(self):
228 """Return the list of dicts of run information.
229
230 For data collected during a single run, this will be a one-element
231 list. If data has been combined, there will be one element for each
232 original data file.
233
234 """
235 return self._runs
236
237 def measured_files(self):
238 """A list of all files that had been measured."""
239 return list(self._arcs or self._lines or {})
240
241 def line_counts(self, fullpath=False):
242 """Return a dict summarizing the line coverage data.
243
244 Keys are based on the file names, and values are the number of executed
245 lines. If `fullpath` is true, then the keys are the full pathnames of
246 the files, otherwise they are the basenames of the files.
247
248 Returns a dict mapping file names to counts of lines.
249
250 """
251 summ = {}
252 if fullpath:
253 filename_fn = lambda f: f
254 else:
255 filename_fn = os.path.basename
256 for filename in self.measured_files():
257 summ[filename_fn(filename)] = len(self.lines(filename))
258 return summ
259
260 def __nonzero__(self):
261 return bool(self._lines or self._arcs)
262
263 __bool__ = __nonzero__
264
265 def read_fileobj(self, file_obj):
266 """Read the coverage data from the given file object.
267
268 Should only be used on an empty CoverageData object.
269
270 """
271 data = self._read_raw_data(file_obj)
272
273 self._lines = self._arcs = None
274
275 if 'lines' in data:
276 self._lines = data['lines']
277 if 'arcs' in data:
278 self._arcs = dict(
279 (fname, [tuple(pair) for pair in arcs])
280 for fname, arcs in iitems(data['arcs'])
281 )
282 self._file_tracers = data.get('file_tracers', {})
283 self._runs = data.get('runs', [])
284
285 self._validate()
286
287 def read_file(self, filename):
288 """Read the coverage data from `filename` into this object."""
289 if self._debug and self._debug.should('dataio'):
290 self._debug.write("Reading data from %r" % (filename,))
291 try:
292 with self._open_for_reading(filename) as f:
293 self.read_fileobj(f)
294 except Exception as exc:
295 raise CoverageException(
296 "Couldn't read data from '%s': %s: %s" % (
297 filename, exc.__class__.__name__, exc,
298 )
299 )
300
301 _GO_AWAY = "!coverage.py: This is a private format, don't read it directly!"
302
303 @classmethod
304 def _open_for_reading(cls, filename):
305 """Open a file appropriately for reading data."""
306 return open(filename, "r")
307
308 @classmethod
309 def _read_raw_data(cls, file_obj):
310 """Read the raw data from a file object."""
311 go_away = file_obj.read(len(cls._GO_AWAY))
312 if go_away != cls._GO_AWAY:
313 raise CoverageException("Doesn't seem to be a coverage.py data file")
314 return json.load(file_obj)
315
316 @classmethod
317 def _read_raw_data_file(cls, filename):
318 """Read the raw data from a file, for debugging."""
319 with cls._open_for_reading(filename) as f:
320 return cls._read_raw_data(f)
321
322 ##
323 ## Writing data
324 ##
325
326 def add_lines(self, line_data):
327 """Add measured line data.
328
329 `line_data` is a dictionary mapping file names to dictionaries::
330
331 { filename: { lineno: None, ... }, ...}
332
333 """
334 if self._debug and self._debug.should('dataop'):
335 self._debug.write("Adding lines: %d files, %d lines total" % (
336 len(line_data), sum(len(lines) for lines in line_data.values())
337 ))
338 if self._has_arcs():
339 raise CoverageException("Can't add lines to existing arc data")
340
341 if self._lines is None:
342 self._lines = {}
343 for filename, linenos in iitems(line_data):
344 if filename in self._lines:
345 new_linenos = set(self._lines[filename])
346 new_linenos.update(linenos)
347 linenos = new_linenos
348 self._lines[filename] = list(linenos)
349
350 self._validate()
351
352 def add_arcs(self, arc_data):
353 """Add measured arc data.
354
355 `arc_data` is a dictionary mapping file names to dictionaries::
356
357 { filename: { (l1,l2): None, ... }, ...}
358
359 """
360 if self._debug and self._debug.should('dataop'):
361 self._debug.write("Adding arcs: %d files, %d arcs total" % (
362 len(arc_data), sum(len(arcs) for arcs in arc_data.values())
363 ))
364 if self._has_lines():
365 raise CoverageException("Can't add arcs to existing line data")
366
367 if self._arcs is None:
368 self._arcs = {}
369 for filename, arcs in iitems(arc_data):
370 if filename in self._arcs:
371 new_arcs = set(self._arcs[filename])
372 new_arcs.update(arcs)
373 arcs = new_arcs
374 self._arcs[filename] = list(arcs)
375
376 self._validate()
377
378 def add_file_tracers(self, file_tracers):
379 """Add per-file plugin information.
380
381 `file_tracers` is { filename: plugin_name, ... }
382
383 """
384 if self._debug and self._debug.should('dataop'):
385 self._debug.write("Adding file tracers: %d files" % (len(file_tracers),))
386
387 existing_files = self._arcs or self._lines or {}
388 for filename, plugin_name in iitems(file_tracers):
389 if filename not in existing_files:
390 raise CoverageException(
391 "Can't add file tracer data for unmeasured file '%s'" % (filename,)
392 )
393 existing_plugin = self._file_tracers.get(filename)
394 if existing_plugin is not None and plugin_name != existing_plugin:
395 raise CoverageException(
396 "Conflicting file tracer name for '%s': %r vs %r" % (
397 filename, existing_plugin, plugin_name,
398 )
399 )
400 self._file_tracers[filename] = plugin_name
401
402 self._validate()
403
404 def add_run_info(self, **kwargs):
405 """Add information about the run.
406
407 Keywords are arbitrary, and are stored in the run dictionary. Values
408 must be JSON serializable. You may use this function more than once,
409 but repeated keywords overwrite each other.
410
411 """
412 if self._debug and self._debug.should('dataop'):
413 self._debug.write("Adding run info: %r" % (kwargs,))
414 if not self._runs:
415 self._runs = [{}]
416 self._runs[0].update(kwargs)
417 self._validate()
418
419 def touch_file(self, filename, plugin_name=""):
420 """Ensure that `filename` appears in the data, empty if needed.
421
422 `plugin_name` is the name of the plugin resposible for this file. It is used
423 to associate the right filereporter, etc.
424 """
425 if self._debug and self._debug.should('dataop'):
426 self._debug.write("Touching %r" % (filename,))
427 if not self._has_arcs() and not self._has_lines():
428 raise CoverageException("Can't touch files in an empty CoverageData")
429
430 if self._has_arcs():
431 where = self._arcs
432 else:
433 where = self._lines
434 where.setdefault(filename, [])
435 if plugin_name:
436 # Set the tracer for this file
437 self._file_tracers[filename] = plugin_name
438
439 self._validate()
440
441 def write_fileobj(self, file_obj):
442 """Write the coverage data to `file_obj`."""
443
444 # Create the file data.
445 file_data = {}
446
447 if self._has_arcs():
448 file_data['arcs'] = self._arcs
449
450 if self._has_lines():
451 file_data['lines'] = self._lines
452
453 if self._file_tracers:
454 file_data['file_tracers'] = self._file_tracers
455
456 if self._runs:
457 file_data['runs'] = self._runs
458
459 # Write the data to the file.
460 file_obj.write(self._GO_AWAY)
461 json.dump(file_data, file_obj, separators=(',', ':'))
462
463 def write_file(self, filename):
464 """Write the coverage data to `filename`."""
465 if self._debug and self._debug.should('dataio'):
466 self._debug.write("Writing data to %r" % (filename,))
467 with open(filename, 'w') as fdata:
468 self.write_fileobj(fdata)
469
470 def erase(self):
471 """Erase the data in this object."""
472 self._lines = None
473 self._arcs = None
474 self._file_tracers = {}
475 self._runs = []
476 self._validate()
477
478 def update(self, other_data, aliases=None):
479 """Update this data with data from another `CoverageData`.
480
481 If `aliases` is provided, it's a `PathAliases` object that is used to
482 re-map paths to match the local machine's.
483
484 """
485 if self._has_lines() and other_data._has_arcs():
486 raise CoverageException("Can't combine arc data with line data")
487 if self._has_arcs() and other_data._has_lines():
488 raise CoverageException("Can't combine line data with arc data")
489
490 aliases = aliases or PathAliases()
491
492 # _file_tracers: only have a string, so they have to agree.
493 # Have to do these first, so that our examination of self._arcs and
494 # self._lines won't be confused by data updated from other_data.
495 for filename in other_data.measured_files():
496 other_plugin = other_data.file_tracer(filename)
497 filename = aliases.map(filename)
498 this_plugin = self.file_tracer(filename)
499 if this_plugin is None:
500 if other_plugin:
501 self._file_tracers[filename] = other_plugin
502 elif this_plugin != other_plugin:
503 raise CoverageException(
504 "Conflicting file tracer name for '%s': %r vs %r" % (
505 filename, this_plugin, other_plugin,
506 )
507 )
508
509 # _runs: add the new runs to these runs.
510 self._runs.extend(other_data._runs)
511
512 # _lines: merge dicts.
513 if other_data._has_lines():
514 if self._lines is None:
515 self._lines = {}
516 for filename, file_lines in iitems(other_data._lines):
517 filename = aliases.map(filename)
518 if filename in self._lines:
519 lines = set(self._lines[filename])
520 lines.update(file_lines)
521 file_lines = list(lines)
522 self._lines[filename] = file_lines
523
524 # _arcs: merge dicts.
525 if other_data._has_arcs():
526 if self._arcs is None:
527 self._arcs = {}
528 for filename, file_arcs in iitems(other_data._arcs):
529 filename = aliases.map(filename)
530 if filename in self._arcs:
531 arcs = set(self._arcs[filename])
532 arcs.update(file_arcs)
533 file_arcs = list(arcs)
534 self._arcs[filename] = file_arcs
535
536 self._validate()
537
538 ##
539 ## Miscellaneous
540 ##
541
542 def _validate(self):
543 """If we are in paranoid mode, validate that everything is right."""
544 if env.TESTING:
545 self._validate_invariants()
546
547 def _validate_invariants(self):
548 """Validate internal invariants."""
549 # Only one of _lines or _arcs should exist.
550 assert not(self._has_lines() and self._has_arcs()), (
551 "Shouldn't have both _lines and _arcs"
552 )
553
554 # _lines should be a dict of lists of ints.
555 if self._has_lines():
556 for fname, lines in iitems(self._lines):
557 assert isinstance(fname, string_class), "Key in _lines shouldn't be %r" % (fname,)
558 assert all(isinstance(x, int) for x in lines), (
559 "_lines[%r] shouldn't be %r" % (fname, lines)
560 )
561
562 # _arcs should be a dict of lists of pairs of ints.
563 if self._has_arcs():
564 for fname, arcs in iitems(self._arcs):
565 assert isinstance(fname, string_class), "Key in _arcs shouldn't be %r" % (fname,)
566 assert all(isinstance(x, int) and isinstance(y, int) for x, y in arcs), (
567 "_arcs[%r] shouldn't be %r" % (fname, arcs)
568 )
569
570 # _file_tracers should have only non-empty strings as values.
571 for fname, plugin in iitems(self._file_tracers):
572 assert isinstance(fname, string_class), (
573 "Key in _file_tracers shouldn't be %r" % (fname,)
574 )
575 assert plugin and isinstance(plugin, string_class), (
576 "_file_tracers[%r] shoudn't be %r" % (fname, plugin)
577 )
578
579 # _runs should be a list of dicts.
580 for val in self._runs:
581 assert isinstance(val, dict)
582 for key in val:
583 assert isinstance(key, string_class), "Key in _runs shouldn't be %r" % (key,)
584
585 def add_to_hash(self, filename, hasher):
586 """Contribute `filename`'s data to the `hasher`.
587
588 `hasher` is a `coverage.misc.Hasher` instance to be updated with
589 the file's data. It should only get the results data, not the run
590 data.
591
592 """
593 if self._has_arcs():
594 hasher.update(sorted(self.arcs(filename) or []))
595 else:
596 hasher.update(sorted(self.lines(filename) or []))
597 hasher.update(self.file_tracer(filename))
598
599 ##
600 ## Internal
601 ##
602
603 def _has_lines(self):
604 """Do we have data in self._lines?"""
605 return self._lines is not None
606
607 def _has_arcs(self):
608 """Do we have data in self._arcs?"""
609 return self._arcs is not None
610
611
612 class CoverageDataFiles(object):
613 """Manage the use of coverage data files."""
614
615 def __init__(self, basename=None, warn=None, debug=None):
616 """Create a CoverageDataFiles to manage data files.
617
618 `warn` is the warning function to use.
619
620 `basename` is the name of the file to use for storing data.
621
622 `debug` is a `DebugControl` object for writing debug messages.
623
624 """
625 self.warn = warn
626 self.debug = debug
627
628 # Construct the file name that will be used for data storage.
629 self.filename = os.path.abspath(basename or ".coverage")
630
631 def erase(self, parallel=False):
632 """Erase the data from the file storage.
633
634 If `parallel` is true, then also deletes data files created from the
635 basename by parallel-mode.
636
637 """
638 if self.debug and self.debug.should('dataio'):
639 self.debug.write("Erasing data file %r" % (self.filename,))
640 file_be_gone(self.filename)
641 if parallel:
642 data_dir, local = os.path.split(self.filename)
643 localdot = local + '.*'
644 pattern = os.path.join(os.path.abspath(data_dir), localdot)
645 for filename in glob.glob(pattern):
646 if self.debug and self.debug.should('dataio'):
647 self.debug.write("Erasing parallel data file %r" % (filename,))
648 file_be_gone(filename)
649
650 def read(self, data):
651 """Read the coverage data."""
652 if os.path.exists(self.filename):
653 data.read_file(self.filename)
654
655 def write(self, data, suffix=None):
656 """Write the collected coverage data to a file.
657
658 `suffix` is a suffix to append to the base file name. This can be used
659 for multiple or parallel execution, so that many coverage data files
660 can exist simultaneously. A dot will be used to join the base name and
661 the suffix.
662
663 """
664 filename = self.filename
665 if suffix is True:
666 # If data_suffix was a simple true value, then make a suffix with
667 # plenty of distinguishing information. We do this here in
668 # `save()` at the last minute so that the pid will be correct even
669 # if the process forks.
670 extra = ""
671 if _TEST_NAME_FILE: # pragma: debugging
672 with open(_TEST_NAME_FILE) as f:
673 test_name = f.read()
674 extra = "." + test_name
675 dice = random.Random(os.urandom(8)).randint(0, 999999)
676 suffix = "%s%s.%s.%06d" % (socket.gethostname(), extra, os.getpid(), dice)
677
678 if suffix:
679 filename += "." + suffix
680 data.write_file(filename)
681
682 def combine_parallel_data(self, data, aliases=None, data_paths=None, strict=False):
683 """Combine a number of data files together.
684
685 Treat `self.filename` as a file prefix, and combine the data from all
686 of the data files starting with that prefix plus a dot.
687
688 If `aliases` is provided, it's a `PathAliases` object that is used to
689 re-map paths to match the local machine's.
690
691 If `data_paths` is provided, it is a list of directories or files to
692 combine. Directories are searched for files that start with
693 `self.filename` plus dot as a prefix, and those files are combined.
694
695 If `data_paths` is not provided, then the directory portion of
696 `self.filename` is used as the directory to search for data files.
697
698 Every data file found and combined is then deleted from disk. If a file
699 cannot be read, a warning will be issued, and the file will not be
700 deleted.
701
702 If `strict` is true, and no files are found to combine, an error is
703 raised.
704
705 """
706 # Because of the os.path.abspath in the constructor, data_dir will
707 # never be an empty string.
708 data_dir, local = os.path.split(self.filename)
709 localdot = local + '.*'
710
711 data_paths = data_paths or [data_dir]
712 files_to_combine = []
713 for p in data_paths:
714 if os.path.isfile(p):
715 files_to_combine.append(os.path.abspath(p))
716 elif os.path.isdir(p):
717 pattern = os.path.join(os.path.abspath(p), localdot)
718 files_to_combine.extend(glob.glob(pattern))
719 else:
720 raise CoverageException("Couldn't combine from non-existent path '%s'" % (p,))
721
722 if strict and not files_to_combine:
723 raise CoverageException("No data to combine")
724
725 files_combined = 0
726 for f in files_to_combine:
727 new_data = CoverageData(debug=self.debug)
728 try:
729 new_data.read_file(f)
730 except CoverageException as exc:
731 if self.warn:
732 # The CoverageException has the file name in it, so just
733 # use the message as the warning.
734 self.warn(str(exc))
735 else:
736 data.update(new_data, aliases=aliases)
737 files_combined += 1
738 if self.debug and self.debug.should('dataio'):
739 self.debug.write("Deleting combined data file %r" % (f,))
740 file_be_gone(f)
741
742 if strict and not files_combined:
743 raise CoverageException("No usable data files")
744
745
746 def canonicalize_json_data(data):
747 """Canonicalize our JSON data so it can be compared."""
748 for fname, lines in iitems(data.get('lines', {})):
749 data['lines'][fname] = sorted(lines)
750 for fname, arcs in iitems(data.get('arcs', {})):
751 data['arcs'][fname] = sorted(arcs)
752
753
754 def pretty_data(data):
755 """Format data as JSON, but as nicely as possible.
756
757 Returns a string.
758
759 """
760 # Start with a basic JSON dump.
761 out = json.dumps(data, indent=4, sort_keys=True)
762 # But pairs of numbers shouldn't be split across lines...
763 out = re.sub(r"\[\s+(-?\d+),\s+(-?\d+)\s+]", r"[\1, \2]", out)
764 # Trailing spaces mess with tests, get rid of them.
765 out = re.sub(r"(?m)\s+$", "", out)
766 return out
767
768
769 def debug_main(args):
770 """Dump the raw data from data files.
771
772 Run this as::
773
774 $ python -m coverage.data [FILE]
775
776 """
777 parser = optparse.OptionParser()
778 parser.add_option(
779 "-c", "--canonical", action="store_true",
780 help="Sort data into a canonical order",
781 )
782 options, args = parser.parse_args(args)
783
784 for filename in (args or [".coverage"]):
785 print("--- {0} ------------------------------".format(filename))
786 data = CoverageData._read_raw_data_file(filename)
787 if options.canonical:
788 canonicalize_json_data(data)
789 print(pretty_data(data))
790
791
792 if __name__ == '__main__':
793 import sys
794 debug_main(sys.argv[1:])

eric ide

mercurial