summaryrefslogtreecommitdiffstats
path: root/Tools/CodeCoverage
diff options
context:
space:
mode:
Diffstat (limited to 'Tools/CodeCoverage')
-rw-r--r--Tools/CodeCoverage/README22
-rw-r--r--Tools/CodeCoverage/amber.pngbin0 -> 127 bytes
-rw-r--r--Tools/CodeCoverage/cov.py201
-rw-r--r--Tools/CodeCoverage/emerald.pngbin0 -> 127 bytes
-rw-r--r--Tools/CodeCoverage/gcov.css116
-rw-r--r--Tools/CodeCoverage/glass.pngbin0 -> 127 bytes
-rwxr-xr-xTools/CodeCoverage/regenerate-coverage-display382
-rw-r--r--Tools/CodeCoverage/ruby.pngbin0 -> 127 bytes
-rwxr-xr-xTools/CodeCoverage/run-generate-coverage-data240
-rw-r--r--Tools/CodeCoverage/snow.pngbin0 -> 127 bytes
10 files changed, 961 insertions, 0 deletions
diff --git a/Tools/CodeCoverage/README b/Tools/CodeCoverage/README
new file mode 100644
index 0000000..7a85527
--- /dev/null
+++ b/Tools/CodeCoverage/README
@@ -0,0 +1,22 @@
+Generate coverage on Mac
+
+call Tools/Script/generate-coverage-data
+
+or by hand
+
+
+# delete
+find . -name '*.gcda' -delete
+
+# build, -framework CoreFoundation might suffice as well
+Tools/Scripts/build-webkit GCC_GENERATE_TEST_COVERAGE_FILES=YES GCC_INSTRUMENT_PROGRAM_FLOW_ARCS=YES EXTRA_LINK=" -ftest-coverage -fprofile-arcs" OTHER_CFLAGS=" -MD " OTHER_LDFLAGS=" -ftest-coverage -fprofile-arcs -framework AppKit"
+Tools/Scripts/run-webkit-tests
+Tools/Scripts/run-javascriptcore-tests GCC_GENERATE_TEST_COVERAGE_FILES=YES GCC_INSTRUMENT_PROGRAM_FLOW_ARCS=YES EXTRA_LINK=" -ftest-coverage -fprofile-arcs" OTHER_CFLAGS=" -MD " OTHER_LDFLAGS=" -ftest-coverage -fprofile-arcs -framework AppKit"
+
+
+# Collect files
+Tools/CodeCoverage/run-generate-coverage-data <RUN_ID> ../coverage-results/
+
+
+# Generate graph
+Tools/CodeCoverage/regenerate-coverage-display ../coverage-results/ ../coverage-results/html
diff --git a/Tools/CodeCoverage/amber.png b/Tools/CodeCoverage/amber.png
new file mode 100644
index 0000000..ee5d920
--- /dev/null
+++ b/Tools/CodeCoverage/amber.png
Binary files differ
diff --git a/Tools/CodeCoverage/cov.py b/Tools/CodeCoverage/cov.py
new file mode 100644
index 0000000..443e601
--- /dev/null
+++ b/Tools/CodeCoverage/cov.py
@@ -0,0 +1,201 @@
+# Copyright (C) 2004, 2005, 2006 Nathaniel Smith
+# Copyright (C) 2006, 2007 Holger Hans Peter Freyther
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import csv
+import time
+import os.path
+import shutil
+
+def analyze_coverage(possible_gcov_files, source_files, runid, data_dir, base):
+
+ if not os.path.exists(data_dir):
+ os.makedirs(data_dir)
+
+ output = open(os.path.join(data_dir, runid + ".csv"), "w")
+ w = csv.writer(output)
+ # First row: id and time
+ w.writerow([runid, time.time()])
+
+ results = scan_gcov_files(possible_gcov_files, source_files)
+ annotated_dir = os.path.join(data_dir, runid + ".annotated")
+ if os.path.exists(annotated_dir):
+ shutil.rmtree(annotated_dir)
+
+ keys = results.keys()
+ keys.sort()
+ for path in keys:
+ (total, covered, annotated_data) = results[path]
+ path = path[path.find(base)+len(base):]
+ # Rest of the rows: filename, total_lines, covered_lines
+ w.writerow([path, total, covered])
+
+ if path[:1] == "/":
+ path = path[1:]
+ annotated_path = os.path.join(annotated_dir, path)
+ try:
+ os.makedirs(os.path.dirname(annotated_path))
+ except OSError:
+ pass
+ a = open(annotated_path, "w")
+ a.write(annotated_data)
+ a.close()
+
+
+# zecke's rewrite
+STATE_NOT_CODE = -1
+STATE_NOT_SEEN = -2
+STATE_TEST_CODE = -3
+
+def find_gcov(f, possible_gcovs):
+ """
+ Find .gcov files that could be of interest for us
+ """
+ try:
+ return possible_gcovs[f]
+ except:
+ return []
+
+
+def parse_source_file(file):
+ """
+ Parse one source file and return a list of lines
+ """
+ f_source_list = []
+ init_state = STATE_NOT_SEEN
+ in_test_code = False
+ nesting = 0
+
+ for line in open(file, "r"):
+ code = line.split(":", 2)[-1]
+ if not in_test_code and code.startswith("#ifdef BUILD_UNIT_TESTS"):
+ in_test_code = 1
+ if in_test_code and code.startswith("#if"):
+ nesting += 1
+ if in_test_code and code.startswith("#endif"):
+ nesting -= 1
+ if not nesting:
+ in_test_code = True
+ if in_test_code:
+ init_state = STATE_TEST_CODE
+ else:
+ init_state = STATE_NOT_SEEN
+ f_source_list.append([init_state, line.split(":", 1)[1]])
+
+ return f_source_list
+
+# Runner-up, 3rd annual "write Python that looks like Perl" competition,
+# Well, not really. It doesn't even use regexps.
+# He is right so I'm cleaning it up (zecke)
+def scan_gcov_files(possible_gcov_files, source_files):
+ """Takes a list of gcov filenames and a list of source filenames.
+
+ The gcov files should have names of the form foo.o##foo.cc.gcov, as
+ created by 'gcov -l'.
+
+ Returns a dict mapping source filenames to tuples
+ (total_lines, tested_lines, gcov_annotated_source)
+ which are a number, a number, and a very long string, respectively.
+
+ The fun bit is that we merge .gcov output generated by different object
+ files; this way we can provide accurate information for header files and
+ for monotone's current unit test system."""
+ results = {}
+ for f in source_files:
+ possible_gcovs = find_gcov(f, possible_gcov_files)
+ base_name = os.path.splitext(os.path.basename(f))[0]
+ if len(possible_gcovs) == 0:
+ print "No gcov files found for: '%s' but it was compiled" % f
+ continue
+
+ (garbage,extension) = os.path.splitext(f)
+ if extension in [".cc", ".c", ".moc", ".cpp", ".cxx", ".m", ".mm"]:
+ lines = open(f, "r").readlines()
+ results[f] = (len(lines), 0, "".join(lines))
+ continue
+ elif len(possible_gcovs) > 1:
+ print "More than one gcov file for %s %d" % (f,len(possible_gcovs))
+ base_gcov_lines = parse_source_file(possible_gcovs[0])
+
+ # Now we will try hard to merge the results with others
+ # Our requirement is that we have the same amount of lines as
+ # as the original file
+ for cov_file in possible_gcovs:
+ lines = open(cov_file, "r").readlines()
+
+ # e.g. with phonon we have visualisation.h and we can not know
+ # which header file (foldername) it is refering to. This is a gcov
+ # limitation and i have no workaround yet. We just hope we will pick
+ # the right header file...
+ if len(lines) != len(base_gcov_lines):
+ print "Error Base: %s and Target: %s have different amount of lines" % (possible_gcovs[0],cov_file)
+ continue
+
+ # now do the merging of the file. If it has the same basename
+ # and the same number of lines things might work out
+ # In the future take a look at the header of the file
+ i = 0
+ for line in lines:
+ accumulator = base_gcov_lines[i]
+ if accumulator[0] != STATE_TEST_CODE:
+ info = line.split(":", 1)[0]
+ if info.endswith("-"):
+ if accumulator[0] == STATE_NOT_SEEN:
+ accumulator[0] = STATE_NOT_CODE
+ else:
+ if info.endswith("#"):
+ num = 0
+ else:
+ num = int(info)
+ if accumulator[0] in (STATE_NOT_SEEN, STATE_NOT_CODE):
+ accumulator[0] = 0
+ accumulator[0] += num
+ i += 1
+
+ # post processing of ths file
+ (total_lines, total_covered) = (0, 0)
+ annotated_lines = []
+ for state, line in base_gcov_lines:
+ if state == STATE_NOT_SEEN:
+ desc = "?????"
+ elif state == STATE_TEST_CODE:
+ desc = "+"
+ elif state == STATE_NOT_CODE:
+ desc = "-"
+ elif state == 0:
+ desc = "#####"
+ total_lines += 1
+ else:
+ desc = str(state)
+ total_lines += 1
+ total_covered += 1
+ annotated_lines.append(":".join([desc.rjust(9), line]))
+ results[f] = (total_lines, total_covered, "".join(annotated_lines))
+ return results
+
+
+
+ return results
diff --git a/Tools/CodeCoverage/emerald.png b/Tools/CodeCoverage/emerald.png
new file mode 100644
index 0000000..0e60294
--- /dev/null
+++ b/Tools/CodeCoverage/emerald.png
Binary files differ
diff --git a/Tools/CodeCoverage/gcov.css b/Tools/CodeCoverage/gcov.css
new file mode 100644
index 0000000..71ca080
--- /dev/null
+++ b/Tools/CodeCoverage/gcov.css
@@ -0,0 +1,116 @@
+body {
+ color: black; background-color: white;
+ font-family: Helvetica,Arial,sans-serif;
+ margin: 0; padding: 0em;
+ text-align: center;
+}
+
+.title {
+ text-align:center;
+ font-weight:bold;
+ font-style:italic;
+ font-size:1.8em;
+ padding:10px;
+}
+
+.ruler {
+ height:3px;
+ background-color:#638AD6;
+ margin-left:10px;
+ margin-right:10px;
+}
+
+.headerItem {
+ text-align:right;
+ font-weight:bold;
+}
+
+.headerValue {
+ text-align:left;
+ font-weight:bold;
+ color:#638AD6;
+}
+
+.tableHead {
+ text-align:center;
+ font-weight:bold;
+ background-color:#638AD6;
+ color:white;
+
+}
+
+.coverFile {
+ font-family: Courier;
+ background-color:#DEE7FF;
+ padding:3px;
+ width:70%;
+}
+
+.coverBar {
+ background-color:#DEE7FF;
+ padding:3px;
+ width:5%;
+}
+
+.coverBarOutline {
+}
+
+.coverPerHi {
+ font-family: Times;
+ text-align:center;
+ font-weight:bold;
+ background-color:lightgreen;
+ padding:3px;
+ width:5%;
+}
+
+.coverNumHi {
+ font-family: Times;
+ text-align:right;
+ background-color:lightgreen;
+ padding:3px;
+ width:25%;
+}
+
+.coverPerMed {
+ font-family: Times;
+ text-align:center;
+ font-weight:bold;
+ background-color:yellow;
+ padding:3px;
+}
+
+.coverNumMed {
+ font-family: Times;
+ text-align:right;
+ background-color:yellow;
+ padding:3px;
+}
+
+.coverPerLo {
+ font-family: Times;
+ text-align:center;
+ font-weight:bold;
+ background-color:red;
+ padding:3px;
+}
+
+.coverNumLo {
+ font-family: Times;
+ text-align:right;
+ background-color:red;
+ padding:3px;
+}
+
+.lineNum {
+ background-color:#EFE384;
+}
+
+.lineCov {
+ background-color:#CED7FF;
+}
+
+.lineNoCov {
+ background-color:#FF6131;
+}
+
diff --git a/Tools/CodeCoverage/glass.png b/Tools/CodeCoverage/glass.png
new file mode 100644
index 0000000..a4ba373
--- /dev/null
+++ b/Tools/CodeCoverage/glass.png
Binary files differ
diff --git a/Tools/CodeCoverage/regenerate-coverage-display b/Tools/CodeCoverage/regenerate-coverage-display
new file mode 100755
index 0000000..c25b412
--- /dev/null
+++ b/Tools/CodeCoverage/regenerate-coverage-display
@@ -0,0 +1,382 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2004, 2005, 2006 Nathaniel Smith
+# Copyright (C) 2007 Holger Hans Peter Freyther
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#
+# HTML output inspired by the output of lcov as found on the GStreamer
+# site. I assume this is not copyrightable.
+#
+
+
+#
+# Read all CSV files and
+# Create an overview file
+#
+#
+
+
+import sys
+import csv
+import glob
+import time
+import os
+import os.path
+import datetime
+import shutil
+
+os.environ["TTFPATH"] = ":".join(["/usr/share/fonts/truetype/" + d
+ for d in "ttf-bitstream-vera",
+ "freefont",
+ "msttcorefonts"])
+
+level_LOW = 10
+level_MEDIUM = 70
+
+def copy_files(dest_dir):
+ """
+ Copy the CSS and the png's to the destination directory
+ """
+ images = ["amber.png", "emerald.png", "glass.png", "ruby.png", "snow.png"]
+ css = "gcov.css"
+ (base_path, name) = os.path.split(__file__)
+ base_path = os.path.abspath(base_path)
+
+ shutil.copyfile(os.path.join(base_path,css), os.path.join(dest_dir,css))
+ map(lambda x: shutil.copyfile(os.path.join(base_path,x), os.path.join(dest_dir,x)), images)
+
+def sumcov(cov):
+ return "%.2f%% (%s/%s)" % (cov[1] * 100.0 / (cov[0] or 1), cov[1], cov[0])
+
+def create_page(dest_dir, name):
+ index = open(os.path.join(dest_dir, name), "w")
+ index.write("""<HTML>
+ <HEAD>
+ <TITLE>WebKit test coverage information</TITLE>
+ <link rel="stylesheet" type="text/css" href="gcov.css">
+ </HEAD>
+ <BODY>
+ """)
+ return index
+
+def generate_header(file, last_time, total_lines, total_executed, path, image):
+ product = "WebKit"
+ date = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(last_time))
+ covered_lines = sumcov((total_lines, total_executed))
+
+ file.write("""<table width="100%%" border=0 cellspacing=0 cellpadding=0>
+ <tr><td class="title">GCOV code coverage report</td></tr>
+ <tr><td class="ruler"><img src="glass.png" width=3 height=3 alt=""></td></tr>
+
+ <tr>
+ <td width="100%%">
+ <table cellpadding=1 border=0 width="100%%">
+ <tr>
+ <td class="headerItem" width="20%%">Current&nbsp;view:</td>
+ <td class="headerValue" width="80%%" colspan=4>%(path)s</td>
+ </tr>
+ <tr>
+ <td class="headerItem" width="20%%">Test:</td>
+ <td class="headerValue" width="80%%" colspan=4>%(product)s</td>
+ </tr>
+ <tr>
+ <td class="headerItem" width="20%%">Date:</td>
+ <td class="headerValue" width="20%%">%(date)s</td>
+ <td width="20%%"></td>
+ <td class="headerItem" width="20%%">Instrumented&nbsp;lines:</td>
+ <td class="headerValue" width="20%%">%(total_lines)s</td>
+ </tr>
+ <tr>
+ <td class="headerItem" width="20%%">Code&nbsp;covered:</td>
+ <td class="headerValue" width="20%%">%(covered_lines)s</td>
+ <td width="20%%"></td>
+ <td class="headerItem" width="20%%">Executed&nbsp;lines:</td>
+ <td class="headerValue" width="20%%">%(total_executed)s</td>
+ </tr>
+ </table>
+ </td>
+ </tr>
+ <tr><td class="ruler"><img src="glass.png" width=3 height=3 alt=""></td></tr>
+ </table>""" % vars())
+ # disabled for now <tr><td><img src="%(image)s"></td></tr>
+
+def generate_table_item(file, name, total_lines, covered_lines):
+ covered_precise = (covered_lines*100.0)/(total_lines or 1.0)
+ covered = int(round(covered_precise))
+ remainder = 100-covered
+ (image,perClass,numClass) = coverage_icon(covered_precise)
+ site = "%s.html" % name.replace(os.path.sep,'__')
+ file.write("""
+ <tr>
+ <td class="coverFile"><a href="%(site)s">%(name)s</a></td>
+ <td class="coverBar" align="center">
+ <table border=0 cellspacing=0 cellpadding=1><tr><td class="coverBarOutline"><img src="%(image)s" width=%(covered)s height=10 alt="%(covered_precise).2f"><img src="snow.png" width=%(remainder)s height=10 alt="%(covered_precise).2f"></td></tr></table>
+ </td>
+ <td class="%(perClass)s">%(covered_precise).2f&nbsp;%%</td>
+ <td class="%(numClass)s">%(covered_lines)s&nbsp;/&nbsp;%(total_lines)s&nbsp;lines</td>
+ </tr>
+ """ % vars())
+
+def generate_table_header_start(file):
+ file.write("""<center>
+ <table width="80%%" cellpadding=2 cellspacing=1 border=0>
+
+ <tr>
+ <td width="50%%"><br></td>
+ <td width="15%%"></td>
+ <td width="15%%"></td>
+ <td width="20%%"></td>
+ </tr>
+
+ <tr>
+ <td class="tableHead">Directory&nbsp;name</td>
+ <td class="tableHead" colspan=3>Coverage</td>
+ </tr>
+ """)
+
+def coverage_icon(percent):
+ if percent < level_LOW:
+ return ("ruby.png", "coverPerLo", "coverNumLo")
+ elif percent < level_MEDIUM:
+ return ("amber.png", "coverPerMed", "coverNumMed")
+ else:
+ return ("emerald.png", "coverPerHi", "coverNumHi")
+
+def replace(text, *pairs):
+ """
+ From pydoc... almost identical at least
+ """
+ from string import split, join
+ while pairs:
+ (a,b) = pairs[0]
+ text = join(split(text, a), b)
+ pairs = pairs[1:]
+ return text
+
+def escape(text):
+ """
+ Escape string to be conform HTML
+ """
+ return replace(text,
+ ('&', '&amp;'),
+ ('<', '&lt;' ),
+ ('>', '&gt;' ) )
+
+def generate_table_header_end(file):
+ file.write("""</table>
+ </center>""")
+
+def write_title_page(dest_dir, last_time, last_tot_lines, last_tot_covered, dir_series):
+ """
+ Write the index.html with a overview of each directory
+ """
+ index= create_page(dest_dir, "index.html")
+ generate_header(index, last_time, last_tot_lines, last_tot_covered, "directory", "images/Total.png")
+ # Create the directory overview
+ generate_table_header_start(index)
+ dirs = dir_series.keys()
+ dirs.sort()
+ for dir in dirs:
+ (dir_files, total_lines, covered_lines,_) = dir_series[dir][-1]
+ generate_table_item(index, dir, total_lines, covered_lines)
+ generate_table_header_end(index)
+
+ index.write("""</BODY></HTML>""")
+ index.close()
+
+def write_directory_site(dest_dir, dir_name, last_time, dir_series, file_series):
+ escaped_dir = dir_name.replace(os.path.sep,'__')
+ site = create_page(dest_dir, "%s.html" % escaped_dir)
+ (_,tot_lines,tot_covered,files) = dir_series[dir_name][-1]
+ generate_header(site, last_time, tot_lines, tot_covered, "directory - %s" % dir_name, "images/%s.png" % escaped_dir)
+
+ files.sort()
+
+ generate_table_header_start(site)
+ for file in files:
+ (lines,covered) = file_series[file][-1]
+ generate_table_item(site, file, lines, covered)
+
+ generate_table_header_end(site)
+ site.write("""</BODY></HTML>""")
+ site.close()
+
+def write_file_site(dest_dir, file_name, last_time, data_dir, last_id, file_series):
+ escaped_name = file_name.replace(os.path.sep,'__')
+ site = create_page(dest_dir, "%s.html" % escaped_name)
+ (tot_lines,tot_covered) = file_series[file_name][-1]
+ generate_header(site, last_time, tot_lines, tot_covered, "file - %s" % file_name, "images/%s.png" % escaped_name)
+
+ path = "%s/%s.annotated%s" % (data_dir,last_id,file_name)
+
+ # In contrast to the lcov we want to show files that have been compiled
+ # but have not been tested at all. This means we have sourcefiles with 0
+ # lines covered in the path but they are not lcov files.
+ # To identify them we check the first line now. If we see that we can
+ # continue
+ # -: 0:Source:
+ try:
+ file = open(path, "r")
+ except:
+ return
+ all_lines = file.read().split("\n")
+
+ # Convert the gcov file to HTML if we have a chanche to do so
+ # Scan each line and see if it was covered or not and escape the
+ # text
+ if len(all_lines) == 0 or not "-: 0:Source:" in all_lines[0]:
+ site.write("<p>The file was not excercised</p>")
+ else:
+ site.write("""</br><table cellpadding=0 cellspacing=0 border=0>
+ <tr>
+ <td><br></td>
+ </tr>
+ <tr>
+ <td><pre class="source">
+ """)
+ for line in all_lines:
+ split_line = line.split(':',2)
+ # e.g. at the EOF
+ if len(split_line) == 1:
+ continue
+ line_number = split_line[1].strip()
+ if line_number == "0":
+ continue
+ covered = 15*" "
+ end = ""
+ if "#####" in split_line[0]:
+ covered = '<span class="lineNoCov">%15s' % "0"
+ end = "</span>"
+ elif split_line[0].strip() != "-":
+ covered = '<span class="lineCov">%15s' % split_line[0].strip()
+ end = "</span>"
+
+ escaped_line = escape(split_line[2])
+ str = '<span class="lineNum">%(line_number)10s </span>%(covered)s: %(escaped_line)s%(end)s\n' % vars()
+ site.write(str)
+ site.write("</pre></td></tr></table>")
+ site.write("</BODY></HTML>")
+ site.close()
+
+def main(progname, args):
+ if len(args) != 2:
+ sys.exit("Usage: %s DATADIR OUTDIR" % progname)
+
+ branch = "WebKit from trunk"
+ datadir, outdir = args
+
+ # First, load in all data from the data directory.
+ data = []
+ for datapath in glob.glob(os.path.join(datadir, "*.csv")):
+ data.append(read_csv(datapath))
+ # Sort by time
+ data.sort()
+
+ # Calculate time series for each file.
+ times = [sample[0] for sample in data]
+ times = [datetime.datetime.utcfromtimestamp(t) for t in times]
+
+ all_files = {}
+ all_dirs = {}
+ for sample in data:
+ t, i, tot_line, tot_cover, per_file, per_dir = sample
+ all_files.update(per_file)
+ all_dirs.update(per_dir)
+ total_series = []
+ file_serieses = dict([[k, [(0, 0)] * len(times)] for k in all_files.keys()])
+ dir_serieses = dict([[k, [(0, 0, 0, [])] * len(times)] for k in all_dirs.keys()])
+ data_idx = 0
+ for sample in data:
+ t, i, tot_line, tot_cover, per_file, per_dir = sample
+ total_series.append([tot_line, tot_cover])
+ for f, covinfo in per_file.items():
+ file_serieses[f][data_idx] = covinfo
+ for f, covinfo in per_dir.items():
+ dir_serieses[f][data_idx] = covinfo
+ data_idx += 1
+
+
+ # Okay, ready to start outputting. First make sure our directories
+ # exist.
+ if not os.path.exists(outdir):
+ os.makedirs(outdir)
+ rel_imgdir = "images"
+ imgdir = os.path.join(outdir, rel_imgdir)
+ if not os.path.exists(imgdir):
+ os.makedirs(imgdir)
+
+
+ # And look up the latest revision id, and coverage information
+ last_time, last_id, last_tot_lines, last_tot_covered = data[-1][:4]
+
+ # Now start generating our html file
+ copy_files(outdir)
+ write_title_page(outdir, last_time, last_tot_lines, last_tot_covered, dir_serieses)
+
+ dir_keys = dir_serieses.keys()
+ dir_keys.sort()
+ for dir_name in dir_keys:
+ write_directory_site(outdir, dir_name, last_time, dir_serieses, file_serieses)
+
+ file_keys = file_serieses.keys()
+ for file_name in file_keys:
+ write_file_site(outdir, file_name, last_time, datadir, last_id, file_serieses)
+
+def read_csv(path):
+ r = csv.reader(open(path, "r"))
+ # First line is id, time
+ for row in r:
+ id, time_str = row
+ break
+ time = int(float(time_str))
+ # Rest of lines are path, total_lines, covered_lines
+ per_file = {}
+ per_dir = {}
+ grand_total_lines, grand_covered_lines = 0, 0
+ for row in r:
+ path, total_lines_str, covered_lines_str = row
+ total_lines = int(total_lines_str)
+ covered_lines = int(covered_lines_str)
+ grand_total_lines += total_lines
+ grand_covered_lines += covered_lines
+ per_file[path] = [total_lines, covered_lines]
+
+ # Update dir statistics
+ dirname = os.path.dirname(path)
+ if not dirname in per_dir:
+ per_dir[dirname] = (0,0,0,[])
+ (dir_files,dir_total_lines,dir_covered_lines, files) = per_dir[dirname]
+ dir_files += 1
+ dir_total_lines += total_lines
+ dir_covered_lines += covered_lines
+ files.append(path)
+ per_dir[dirname] = (dir_files,dir_total_lines,dir_covered_lines,files)
+ return [time, id, grand_total_lines, grand_covered_lines, per_file, per_dir]
+
+if __name__ == "__main__":
+ import sys
+ main(sys.argv[0], sys.argv[1:])
diff --git a/Tools/CodeCoverage/ruby.png b/Tools/CodeCoverage/ruby.png
new file mode 100644
index 0000000..a582d35
--- /dev/null
+++ b/Tools/CodeCoverage/ruby.png
Binary files differ
diff --git a/Tools/CodeCoverage/run-generate-coverage-data b/Tools/CodeCoverage/run-generate-coverage-data
new file mode 100755
index 0000000..a87da1d
--- /dev/null
+++ b/Tools/CodeCoverage/run-generate-coverage-data
@@ -0,0 +1,240 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2004, 2005, 2006 Nathaniel Smith
+# Copyright (C) 2007 Holger Hans Peter Freyther
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os, sys
+
+# from BitBake
+def mkdirhier(dir):
+ """Create a directory like 'mkdir -p', but does not complain if
+ directory already exists like os.makedirs
+ """
+ try:
+ os.makedirs(dir)
+ except OSError, e:
+ if e.errno != 17: raise e
+
+def collect_base(src,match_array):
+ """
+ Collect all files that match the match_array.
+ """
+
+ sources = []
+ for root, dirs, files in os.walk(src):
+ if ".svn" in root:
+ continue
+
+ for file in files:
+ base,ext = os.path.splitext(file)
+ if ext in match_array:
+ sources.append( os.path.join(root, file) )
+
+ return sources
+
+def collect_depends(src):
+ return collect_base(src, [".d"])
+
+def parse_dependency_file(src, base_dir, black_list):
+ """
+ Parse the .d files of the gcc
+
+ Wow, the first time os.path.join is doing the right thing. We might
+ have a relative path in the depends using os.path.join(dirname of .d, dep)
+ we will end up in
+ """
+ file = open(src)
+ file = file.read()
+ file = file.replace('\\', '').replace('\n', '')
+
+ # We now have object: dependencies splitted
+ ar = file.split(':', 1)
+ obj = ar[0].strip()
+ dir = os.path.dirname(obj)
+ deps = ar[1].split(' ')
+
+ # Remove files outside WebKit, make path absolute
+ deps = filter(lambda x: base_dir in x, deps)
+ deps = map(lambda x: os.path.abspath(os.path.join(dir, x)), deps)
+ return (obj, dir, deps)
+
+def collect_cov(base_path,targets):
+ """
+ Collect gcov files, collect_sources is not used as it also creates
+ dirs and needs to do substituting.
+ Actually we will build a mapping from source file to gcov files of
+ interest. This is because we could have bytestream.h in many different
+ subdirectories. And we would endup with bla.cpp##bytestream.h and we
+ do not know which bytestream file was tested
+ """
+ def find_source_file(root,cov_file):
+ """ Find a Source line or crash
+
+ '#Users#ich#projekte#src#threadmessage.cpp###space#dports#include#qt3#qstring.h.gcov'
+ '#Users#ich#projekte#src#threadmessage.cpp##..#^#src#threadmessage.cpp.gcov'
+
+ ### is absolute path
+ ##..#^# is relative path... well a gcov bug as well
+ ## normal split file in the same directory
+ """
+ if '###' in cov_file:
+ split = cov_file.split('###')
+ if not len(split) == 2:
+ raise "Unexpected split result"
+ filepath = split[1][:-5].replace('#',os.path.sep)
+ return os.path.join(os.path.sep,filepath)
+ elif '##..#^#' in cov_file:
+ split = cov_file.split('##..#^#')
+ if not len(split) == 2:
+ raise "Unexpected split result"
+ filepath = split[1][:-5].replace('#',os.path.sep)
+ return os.path.abspath(os.path.join(root,os.path.pardir,os.path.pardir,filepath))
+ elif '##' in cov_file:
+ split = cov_file.split('##')
+ if not len(split) == 2:
+ raise "Unexpected split result"
+ filepath = split[1][:-5].replace('#',os.path.sep)
+ return os.path.abspath(os.path.join(root,filepath))
+ elif '#' in cov_file:
+ # wow a not broken gcov on OSX
+ basename=os.path.basename(cov_file).replace('#',os.path.sep)[:-5]
+ return os.path.abspath(os.path.join(root,basename))
+
+ else:
+ raise "No source found %s" % cov_file
+
+ def sanitize_path(path):
+ """
+ Well fix up paths once again /usr/lib/gcc/i486-linux-gnu/4.1.2/^/^/^/^/include/c++/4.1.2/bits/stl_pair.h
+ according to gcov '^' is a relative path, we will now build one from this one. Somehow it depends
+ on the gcov version if .. really gets replaced to ^....
+ """
+ import os
+ split = path.split(os.path.sep)
+ str = ""
+ for part in split:
+ if part == '':
+ str = os.path.sep
+ elif part == '^':
+ str = "%s..%s" % (str,os.path.sep)
+ else:
+ str = "%s%s%s" % (str,part,os.path.sep)
+ return os.path.abspath(str)
+
+
+ gcov = {}
+ for root, dirs, files in os.walk(base_path):
+ if ".svn" in root:
+ continue
+ for file in files:
+ base,ext = os.path.splitext(file)
+ if ext in [".gcov"]:
+ try:
+ cov = os.path.join(root, file)
+ src = find_source_file( root, cov )
+ src = sanitize_path( src )
+
+ if not src in gcov:
+ gcov[src] = []
+ gcov[src].append( cov )
+ except Exception,e:
+ print "Exception on ", e
+ #import sys
+ #sys.exit(0)
+ pass
+
+ #print gcov
+ return gcov
+
+def generate_covs(candidates):
+ """
+ Generate gcov files in the right directory
+
+ candidtaes contains the directories we have used when
+ building. Each directory contains a set of files we will
+ try to generate gcov files for.
+ """
+ print candidates.keys()
+ for dir in candidates.keys():
+ print "Trying in %s" % (dir)
+ for dep in candidates[dir].keys():
+ cmd = "cd %s; gcov -p -l %s" % (dir, dep)
+ os.system("%s > /dev/null 2>&1 " % cmd)
+
+
+def analyze_coverage(sources,data,dirs,runid,base):
+ """
+ sources actual source files relative to src_dir e.g kdelibs/kdecore/klibloader.cpp
+ data Where to put the stuff
+ dirs Where to take a look for gcov files
+ base The base directory for files. All files not inside base will be ignored
+ """
+ import cov
+ print base
+ gcov = collect_cov(base,dirs)
+ result = cov.analyze_coverage(gcov, sources, runid, data, base)
+ print result
+
+if __name__ == "__main__":
+ #global targets
+ if not len(sys.argv) == 3:
+ print "This script needs three parameters"
+ print "Call it with generate_cov RUNID ResultsDir"
+ sys.exit(-1)
+ runid = sys.argv[1]
+ results = sys.argv[2]
+
+ # create directories for out result
+ mkdirhier(results)
+
+ print "Collection Sources and preparing data tree"
+ base_dir = os.path.abspath(os.path.curdir)
+ depends = collect_depends(base_dir)
+ candidates = map(lambda x: parse_dependency_file(x,base_dir,[]), depends)
+
+ # Build a number of sources from the candidates. This is a Set for the poor
+ # Two level dict. One for
+ dirs = {}
+ files = {}
+ for (_,dir,deps) in candidates:
+ if not dir in dirs:
+ dirs[dir] = {}
+ for dep in deps:
+ if not dep in dirs[dir]:
+ dirs[dir][dep] = dep
+ if not dep in files:
+ files[dep] = dep
+
+ sources = files.keys()
+
+ print "Found %d candidates" % (len(sources))
+ print "Will run inefficient generation of gcov files now"
+ generate_covs(dirs)
+
+ print "Analyzing Gcov"
+ analyze_coverage(sources, results, dirs.keys(), runid, base_dir)
+ print "Done"
diff --git a/Tools/CodeCoverage/snow.png b/Tools/CodeCoverage/snow.png
new file mode 100644
index 0000000..a4ba373
--- /dev/null
+++ b/Tools/CodeCoverage/snow.png
Binary files differ