From: <bi...@fr...> - 2006-01-17 14:28:17
|
CVS Root: /cvs/gstreamer Module: gst-media-test Changes by: bilboed Date: Tue Jan 17 2006 06:26:10 PST Log message: * compare.py: New compare file * output.py: Simplified and beautified main page output. Really use data from previous run. Output Summary of comparision. Implement call of output.py on it's own. Modified files: . : ChangeLog output.py Added files: . : compare.py Links: http://freedesktop.org/cgi-bin/viewcvs.cgi/gstreamer/gst-media-test/ChangeLog.diff?r1=1.13&r2=1.14 http://freedesktop.org/cgi-bin/viewcvs.cgi/gstreamer/gst-media-test/compare.py?rev=1.1&content-type=text/vnd.viewcvs-markup http://freedesktop.org/cgi-bin/viewcvs.cgi/gstreamer/gst-media-test/output.py.diff?r1=1.5&r2=1.6 ====Begin Diffs==== Index: ChangeLog =================================================================== RCS file: /cvs/gstreamer/gst-media-test/ChangeLog,v retrieving revision 1.13 retrieving revision 1.14 diff -u -d -r1.13 -r1.14 --- ChangeLog 17 Jan 2006 11:31:38 -0000 1.13 +++ ChangeLog 17 Jan 2006 14:25:58 -0000 1.14 @@ -1,5 +1,15 @@ 2006-01-17 Edward Hervey <ed...@fl...> + * compare.py: + New compare file + * output.py: + Simplified and beautified main page output. + Really use data from previous run. + Output Summary of comparision. + Implement call of output.py on it's own. + +2006-01-17 Edward Hervey <ed...@fl...> * runtests.py: Separate debug/backtrace files for the different test runs, otherwise they overwrite each other, and the results won't contain the correct --- NEW FILE: compare.py --- # # compare.py # Comparision functions for test results import common def compare_test_result(results, presults): """ Compares the results for ONE given test Returns a tuple of: _ list of new files names (not in results) _ list of removed file names (not in presults) _ list of improved file names _ list of regressed file names _ list of files that didn't change improved = [] regressed = [] ident = [] oldfile = results.keys() newfile = presults.keys() new = [filn for filn in newfile if not filn in oldfile] removed = [filn for filn in oldfile if not filn in newfile] both = [filn for filn in newfile if filn in oldfile] for filn in both: perc = results[filn][common.INFO_PERCENTAGE] pperc = presults[filn][common.INFO_PERCENTAGE] if perc > pperc: improved.append(filn) elif perc < pperc: regressed.append(filn) else: ident.append(filn) return (new, removed, improved, regressed, ident) Index: output.py RCS file: /cvs/gstreamer/gst-media-test/output.py,v retrieving revision 1.5 retrieving revision 1.6 diff -u -d -r1.5 -r1.6 --- output.py 16 Jan 2006 18:19:37 -0000 1.5 +++ output.py 17 Jan 2006 14:25:58 -0000 1.6 @@ -8,6 +8,8 @@ import sys import string import common +import compare +import cPickle from common import safe_url from urllib import quote from BeautifulSoup import BeautifulSoup, Tag @@ -280,31 +282,26 @@ return [firstrow, secondrow] -def get_single_test_resume_soup(testname, result): +def test_summary_tuple(result): """ - Return the Tag for the resume of the given test results + Return a tuple of: + _ total number of files + _ number of expected failures + _ number of succeeded tests + _ number of failed tests + _ number of crashed tests - top = Tag("div") - - toph1 = Tag("h1").append("Results for test %s" % testname) - top.append(toph1) - for prefix in ["all", "bad", "good", "expected", "crashed"]: - subpage = "%s-%s.html" % (string.replace(testname, "/", "_"), - prefix) - moreinfo = Tag("a", [("href", subpage)]).append("%s files results" % prefix) - top.append(Tag("p").append(moreinfo)) - # Find out how many tests have crashed, failed, passed through + total = len(result.keys()) + expected = 0 crashed = 0 failed = 0 success = 0 - expected = 0 - - for filename in result.keys(): - res = result[filename][common.INFO_RESULT] + for filn in result.keys(): + res = result[filn][common.INFO_RESULT] if res == common.TEST_CRASHED: crashed = crashed + 1 + failed = failed + 1 elif res in [common.TEST_FAILURE, common.TEST_TIMEDOUT]: failed = failed + 1 elif res == common.TEST_SUCCESS: @@ -312,30 +309,75 @@ elif res == common.TEST_EXPECTED: expected = expected + 1 - total = len(result.keys()) - realtotal = total - expected - if not realtotal: - notestp = Tag("p") - notestp.append("No test"); - top.append(notestp) - return top - crashedp = Tag("p") - crashedp.append("Number of Tests that crashed : %d (%2f%%)" % (crashed, float(crashed)/float(realtotal) * 100.0)) + return (total, expected, success, failed, crashed) - failedp = Tag("p") - failedp.append("Number of Tests that failed : %d (%2f%%)" % (failed, float(failed)/float(realtotal) * 100.0)) +def get_single_test_resume_soup(testname, result, pres): + """ + Return the Tag for the resume of the given test results + top = Tag("div") - successp = Tag("p") - successp.append("Number of Tests that finished with success : %d (%2f%%)" % (success, float(success)/float(realtotal) * 100.0)) + toph1 = Tag("h1").append("Results for test %s" % testname) + top.append(toph1) - expectedp = Tag("p") - expectedp.append("Number of Tests that finished with expected failure : %d (%2f%%)" % (expected, float(expected)/float(total) * 100.0)) + # Results summary table + counts = test_summary_tuple(result) + names = ["all", "expected", "good", "bad", "crashed"] + descs = ["All Files", + "-Expected failures", + "-Succeeded tests", + "-Failed tests", + "--Crashed tests"] + bgcolours = ["#0000ff", + "#0000ff", + "#00ff00", + "#ff0000"] - top.append(expectedp) - top.append(crashedp) - top.append(failedp) - top.append(successp) + resulttable = Tag("table", [("width", "95%")]) + resulttable.append(Tag("th").append("Results")) + resulttable.append(Tag("th").append("Count")) + resulttable.append(Tag("th").append("Percentage")) + for count, name, desc, bgcolour in zip(counts, names, descs, bgcolours): + line = Tag("tr") + # Name + td = Tag("td") + subpage = "%s-%s.html" % (string.replace(testname, "/", "_"), + name) + td.append(Tag("a", [("href", subpage)]).append(desc)) + line.append(td) + # count + line.append(Tag("td", [("width", "10%"), ("align","right")]).append(count)) + # percentage + line.append(Tag("td", [("width", "10%"), ("align","right")]).append("%2.2f%%" % (float(count * 100) / float(counts[0])))) + resulttable.append(line) + top.append(resulttable) + # If pres : compare table + if pres: + prestable = Tag("table", [("width", "95%")]) + prestable.append(Tag("th").append("Difference from previous run")) + compcount = [len(x) for x in compare.compare_test_result(result, pres)[:-1]] + comptitle = ["New files", + "Removed files", + "Improved files", + "Regressions"] + for count, title in zip(compcount, comptitle): + line = Tag("tr") + # Name + td = Tag("td") + td.append(title) + line.append(td) + # Count + line.append(Tag("td", [("width", "10%"), ("align", "right")]).append(count)) + prestable.append(line) + top.append(prestable) return top @@ -420,18 +462,21 @@ soup = create_blank_soup("GStreamer Media Test Suite") for testname, result in results: - soup.append(get_single_test_resume_soup(testname, result)) - output_everything_test_html(testname, result, directory) if previousres and pdict.has_key(testname): - soup.append(get_single_test_compare_resume_soup(testname, result, pdict[testname])) - output_compare_test_html(testname, result, directory, pdict[testname]) + pres = pdict[testname] + else: + pres = None + soup.append(get_single_test_resume_soup(testname, result, pres)) + output_everything_test_html(testname, result, directory) + if pres: + output_compare_test_html(testname, result, directory, pres) outputfile = file(os.path.join(directory, filename), "w+") outputfile.write(soup.prettify()) outputfile.close() if __name__ == "__main__": - parser = OptionParser() + parser = OptionParser(usage="usage: %prog [options] <pickled-results>") parser.add_option("-c", "--compare", dest="compare", help="Result pickle from previous run", default=None) @@ -440,10 +485,21 @@ (options, args) = parser.parse_args(sys.argv[1:]) - # extract from pickles if len (args) < 1: parser.print_help() sys.exit() + elif not options.directory: + parser.print_help() + print "You need to specify a directory where the test output files are" + sys.exit() + # extract from pickles + res = cPickle.load(file(args[0], "r")) + if options.compare: + pres = cPickle.load(file(options.compare, "r")) + else: + pres = None + # call output_full_html + output_full_html(res, os.path.abspath(options.directory), + previousres=pres) |