diff options
Diffstat (limited to 'WebKitTools/TestResultServer/model/jsonresults.py')
-rwxr-xr-x | WebKitTools/TestResultServer/model/jsonresults.py | 127 |
1 files changed, 109 insertions, 18 deletions
diff --git a/WebKitTools/TestResultServer/model/jsonresults.py b/WebKitTools/TestResultServer/model/jsonresults.py index d86fbcd..e5eb7f7 100755 --- a/WebKitTools/TestResultServer/model/jsonresults.py +++ b/WebKitTools/TestResultServer/model/jsonresults.py @@ -40,8 +40,11 @@ JSON_RESULTS_BUILD_NUMBERS = "buildNumbers" JSON_RESULTS_TESTS = "tests" JSON_RESULTS_RESULTS = "results" JSON_RESULTS_TIMES = "times" +JSON_RESULTS_PASS = "P" +JSON_RESULTS_NO_DATA = "N" +JSON_RESULTS_MIN_TIME = 1 JSON_RESULTS_VERSION = 3 -JSON_RESULTS_MAX_BUILDS = 750 +JSON_RESULTS_MAX_BUILDS = 1500 class JsonResults(object): @@ -171,8 +174,6 @@ class JsonResults(object): # Merge this build into aggreagated results. cls._merge_one_build(aggregated_json, incremental_json, index) - logging.debug("Merged build %s, merged json: %s.", - build_number, aggregated_json) return True @@ -210,18 +211,26 @@ class JsonResults(object): incremental_json: incremental json object. """ - for test_name in incremental_json: - incremental_test = incremental_json[test_name] + all_tests = (set(aggregated_json.iterkeys()) | + set(incremental_json.iterkeys())) + for test_name in all_tests: if test_name in aggregated_json: aggregated_test = aggregated_json[test_name] + if test_name in incremental_json: + incremental_test = incremental_json[test_name] + results = incremental_test[JSON_RESULTS_RESULTS] + times = incremental_test[JSON_RESULTS_TIMES] + else: + results = [[1, JSON_RESULTS_NO_DATA]] + times = [[1, 0]] + cls._insert_item_run_length_encoded( - incremental_test[JSON_RESULTS_RESULTS], - aggregated_test[JSON_RESULTS_RESULTS]) + results, aggregated_test[JSON_RESULTS_RESULTS]) cls._insert_item_run_length_encoded( - incremental_test[JSON_RESULTS_TIMES], - aggregated_test[JSON_RESULTS_TIMES]) + times, aggregated_test[JSON_RESULTS_TIMES]) + cls._normalize_results_json(test_name, aggregated_json) else: - aggregated_json[test_name] = incremental_test + aggregated_json[test_name] = incremental_json[test_name] @classmethod def _insert_item_run_length_encoded(cls, incremental_item, aggregated_item): @@ -238,17 +247,69 @@ class JsonResults(object): aggregated_item[0][0] = min( aggregated_item[0][0] + item[0], JSON_RESULTS_MAX_BUILDS) else: - # The test item values need to be summed from continuous runs. - # If there is an older item (not most recent one) whose value is - # same as the one to insert, then we should remove the old item - # from aggregated list. - for i in reversed(range(1, len(aggregated_item))): - if item[1] == aggregated_item[i][1]: - aggregated_item.pop(i) - aggregated_item.insert(0, item) @classmethod + def _normalize_results_json(cls, test_name, aggregated_json): + """ Prune tests where all runs pass or tests that no longer exist and + truncate all results to JSON_RESULTS_MAX_BUILDS. + + Args: + test_name: Name of the test. + aggregated_json: The JSON object with all the test results for + this builder. + """ + + aggregated_test = aggregated_json[test_name] + aggregated_test[JSON_RESULTS_RESULTS] = \ + cls._remove_items_over_max_number_of_builds( + aggregated_test[JSON_RESULTS_RESULTS]) + aggregated_test[JSON_RESULTS_TIMES] = \ + cls._remove_items_over_max_number_of_builds( + aggregated_test[JSON_RESULTS_TIMES]) + + is_all_pass = cls._is_results_all_of_type( + aggregated_test[JSON_RESULTS_RESULTS], JSON_RESULTS_PASS) + is_all_no_data = cls._is_results_all_of_type( + aggregated_test[JSON_RESULTS_RESULTS], JSON_RESULTS_NO_DATA) + + max_time = max( + [time[1] for time in aggregated_test[JSON_RESULTS_TIMES]]) + # Remove all passes/no-data from the results to reduce noise and + # filesize. If a test passes every run, but + # takes >= JSON_RESULTS_MIN_TIME to run, don't throw away the data. + if (is_all_no_data or + (is_all_pass and max_time < JSON_RESULTS_MIN_TIME)): + del aggregated_json[test_name] + + @classmethod + def _remove_items_over_max_number_of_builds(cls, encoded_list): + """Removes items from the run-length encoded list after the final + item that exceeds the max number of builds to track. + + Args: + encoded_results: run-length encoded results. An array of arrays, e.g. + [[3,'A'],[1,'Q']] encodes AAAQ. + """ + num_builds = 0 + index = 0 + for result in encoded_list: + num_builds = num_builds + result[0] + index = index + 1 + if num_builds > JSON_RESULTS_MAX_BUILDS: + return encoded_list[:index] + + return encoded_list + + @classmethod + def _is_results_all_of_type(cls, results, type): + """Returns whether all the results are of the given type + (e.g. all passes). + """ + + return len(results) == 1 and results[0][1] == type + + @classmethod def _check_json(cls, builder, json): """Check whether the given json is valid. @@ -363,3 +424,33 @@ class JsonResults(object): return None return file + + @classmethod + def get_test_list(cls, builder, json_file_data): + """Get list of test names from aggregated json file data. + + Args: + json_file_data: json file data that has all test-data and + non-test-data. + + Returns: + json file with test name list only. The json format is the same + as the one saved in datastore, but all non-test-data and test detail + results are removed. + """ + + logging.debug("Loading test results json...") + json = cls._load_json(json_file_data) + if not json: + return None + + logging.debug("Checking test results json...") + if not cls._check_json(builder, json): + return None + + test_list_json = {} + tests = json[builder][JSON_RESULTS_TESTS] + test_list_json[builder] = { + "tests": dict.fromkeys(tests, {})} + + return cls._generate_file_data(test_list_json) |