diff --git a/catapult_build/build_steps.py b/catapult_build/build_steps.py
index d6755d93bc..dd14e7dbd5 100644
--- a/catapult_build/build_steps.py
+++ b/catapult_build/build_steps.py
@@ -37,7 +37,9 @@
'additional_args': [
'--no-install-hooks',
'--no-use-local-chrome',
- '--channel=stable'
+ '--channel=stable',
+ '--timeout-sec=120',
+ '--timeout-retries=2'
],
'outputs_presentation_json': True,
'disabled': ['android'],
@@ -48,7 +50,9 @@
'additional_args': [
'--no-install-hooks',
'--no-use-local-chrome',
- '--channel=canary'
+ '--channel=canary',
+ '--timeout-sec=120',
+ '--timeout-retries=2'
],
'outputs_presentation_json': True,
'disabled': ['android'],
@@ -112,7 +116,8 @@
'path': 'catapult_build/fetch_telemetry_deps_and_run_tests',
'additional_args': [
'--browser=reference',
- '--start-xvfb'
+ '--start-xvfb',
+ '-v',
],
'uses_sandbox_env': True,
'disabled': ['android'],
@@ -123,7 +128,8 @@
'additional_args': [
'--browser=reference',
'--device=android',
- '--jobs=1'
+ '--jobs=1',
+ '-v',
],
'uses_sandbox_env': True,
'disabled': ['win', 'mac', 'linux']
@@ -134,6 +140,7 @@
'additional_args': [
'BrowserTest',
'--browser=reference',
+ '-v',
],
'uses_sandbox_env': True,
'disabled': ['android', 'linux'], # TODO(nedn): enable this on linux
@@ -145,6 +152,7 @@
'--no-install-hooks',
'--no-use-local-chrome',
'--channel=stable',
+ '--timeout-sec=900',
],
'outputs_presentation_json': True,
'disabled': ['android'],
@@ -156,6 +164,7 @@
'--no-install-hooks',
'--no-use-local-chrome',
'--channel=canary',
+ '--timeout-sec=900',
],
'outputs_presentation_json': True,
'disabled': ['android'],
diff --git a/catapult_build/dev_server.py b/catapult_build/dev_server.py
index d9145949ee..d109f2c8f8 100644
--- a/catapult_build/dev_server.py
+++ b/catapult_build/dev_server.py
@@ -2,6 +2,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+from __future__ import print_function
+
import argparse
import json
import os
@@ -82,6 +84,15 @@ def post(self, *args, **kwargs): # pylint: disable=unused-argument
self.app.server.please_exit(exit_code)
return self.response.write('')
+class TestsErrorHandler(webapp2.RequestHandler):
+ def post(self, *args, **kwargs):
+ del args, kwargs
+ msg = self.request.body
+ sys.stderr.write(msg + '\n')
+ exit_code = 1
+ if hasattr(self.app.server, 'please_exit'):
+ self.app.server.please_exit(exit_code)
+ return self.response.write('')
class DirectoryListingHandler(webapp2.RequestHandler):
def get(self, *args, **kwargs): # pylint: disable=unused-argument
@@ -198,7 +209,9 @@ def _InitFromArgs(self, args):
Route('/%s/notify_test_result' % pd.GetName(),
TestResultHandler),
Route('/%s/notify_tests_completed' % pd.GetName(),
- TestsCompletedHandler)
+ TestsCompletedHandler),
+ Route('/%s/notify_test_error' % pd.GetName(),
+ TestsErrorHandler)
]
for pd in self.pds:
@@ -276,6 +289,7 @@ def ServeForever():
# allow CTRL+C to shutdown
return 255
+ print("Exiting dev server")
if len(exit_code_attempt) == 1:
return exit_code_attempt[0]
# The serve_forever returned for some reason separate from
diff --git a/catapult_build/run_dev_server_tests.py b/catapult_build/run_dev_server_tests.py
index 871cd3d75c..e2afac96e8 100644
--- a/catapult_build/run_dev_server_tests.py
+++ b/catapult_build/run_dev_server_tests.py
@@ -3,6 +3,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+from __future__ import print_function
+
import argparse
import json
import logging
@@ -12,8 +14,11 @@
import subprocess
import sys
import tempfile
+import threading
import time
+from collections import namedtuple
+
from hooks import install
from py_utils import binary_manager
@@ -31,6 +36,8 @@
# Default port to run on if not auto-assigning from OS
DEFAULT_PORT = '8111'
+_TIMEOUT_RETURNCODE = 124
+
# Mapping of sys.platform -> platform-specific names and paths.
PLATFORM_MAPPING = {
'linux2': {
@@ -49,16 +56,20 @@
'omaha': 'mac',
'prefix': 'Mac',
'zip_prefix': 'mac',
- 'chromepath': ('chrome-mac/Chromium.app/Contents/MacOS/Chromium'),
- 'version_path': 'chrome-mac/Chromium.app/Contents/Versions/',
+ 'chromepath': ('chrome-mac/Chrome.app/Contents/MacOS/Chrome'),
+ 'version_path': 'chrome-mac/Chrome.app/Contents/Versions/',
'additional_paths': [
- ('chrome-mac/Chromium.app/Contents/Versions/%VERSION%/'
- 'Chromium Helper.app/Contents/MacOS/Chromium Helper'),
+ ('chrome-mac/Chrome.app/Contents/Versions/%VERSION%/'
+ 'Chrome Helper.app/Contents/MacOS/Chrome Helper'),
],
},
}
+class ChromeNotFound(Exception):
+ pass
+
+
def IsDepotToolsPath(path):
return os.path.isfile(os.path.join(path, 'gclient'))
@@ -108,33 +119,59 @@ def GetLocalChromePath(path_from_command_line):
return None
-def Main(argv):
- try:
- parser = argparse.ArgumentParser(
- description='Run dev_server tests for a project.')
- parser.add_argument('--chrome_path', type=str,
- help='Path to Chrome browser binary.')
- parser.add_argument('--no-use-local-chrome',
- dest='use_local_chrome', action='store_false')
- parser.add_argument(
- '--no-install-hooks', dest='install_hooks', action='store_false')
- parser.add_argument('--tests', type=str,
- help='Set of tests to run (tracing or perf_insights)')
- parser.add_argument('--channel', type=str, default='stable',
- help='Chrome channel to run (stable or canary)')
- parser.add_argument('--presentation-json', type=str,
- help='Recipe presentation-json output file path')
- parser.set_defaults(install_hooks=True)
- parser.set_defaults(use_local_chrome=True)
- args = parser.parse_args(argv[1:])
-
- if args.install_hooks:
- install.InstallHooks()
+ChromeInfo = namedtuple('ChromeInfo', 'path, version')
- user_data_dir = tempfile.mkdtemp()
- tmpdir = None
- xvfb_process = None
+def GetChromeInfo(args):
+ """Finds chrome either locally or remotely and returns path and version info.
+
+ Version is not reported if local chrome is used.
+ """
+ if args.use_local_chrome:
+ chrome_path = GetLocalChromePath(args.chrome_path)
+ if not chrome_path:
+ raise ChromeNotFound('Could not find chrome locally. You can supply it '
+ 'manually using --chrome_path')
+ return ChromeInfo(path=chrome_path, version=None)
+ else:
+ channel = args.channel
+ if sys.platform == 'linux2' and channel == 'canary':
+ channel = 'dev'
+ assert channel in ['stable', 'beta', 'dev', 'canary']
+
+ binary = 'chrome'
+ print('Fetching the', channel, binary, 'binary via the binary_manager.')
+ chrome_manager = binary_manager.BinaryManager([CHROME_BINARIES_CONFIG])
+ arch, os_name = dependency_util.GetOSAndArchForCurrentDesktopPlatform()
+ chrome_path, version = chrome_manager.FetchPathWithVersion(
+ '%s_%s' % (binary, channel), arch, os_name)
+ print('Finished fetching the', binary, 'binary to', chrome_path)
+ return ChromeInfo(path=chrome_path, version=version)
+
+
+def KillProcess(process):
+ """Kills process on all platform, including windows."""
+ if sys.platform == 'win32':
+ # Use taskkill on Windows to make sure process and all its subprocesses are
+ # killed.
+ subprocess.call(['taskkill', '/F', '/T', '/PID', str(process.pid)])
+ else:
+ process.kill()
+
+
+def RunTests(args, chrome_path):
+ """Runs tests and returns dev server return code.
+
+ Returns _TIMEOUT_RETURNCODE if tests exceed args.timeout_sec.
+ """
+ user_data_dir = None
+ xvfb_process = None
+ chrome_process = None
+ server_process = None
+ timer = None
+ test_start_time = time.time()
+ try:
+ user_data_dir = tempfile.mkdtemp()
server_path = os.path.join(os.path.dirname(
os.path.abspath(__file__)), os.pardir, 'bin', 'run_dev_server')
# TODO(anniesullie): Make OS selection of port work on Windows. See #1235.
@@ -145,7 +182,7 @@ def Main(argv):
server_command = [server_path, '--no-install-hooks', '--port', port]
if sys.platform.startswith('win'):
server_command = ['python.exe'] + server_command
- print "Starting dev_server..."
+ print('Starting dev_server...')
server_process = subprocess.Popen(
server_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
bufsize=1)
@@ -154,40 +191,9 @@ def Main(argv):
output = server_process.stderr.readline()
port = re.search(
r'Now running on http://127.0.0.1:([\d]+)', output).group(1)
-
- chrome_info = None
- if args.use_local_chrome:
- chrome_path = GetLocalChromePath(args.chrome_path)
- if not chrome_path:
- logging.error('Could not find path to chrome.')
- sys.exit(1)
- chrome_info = 'with command `%s`' % chrome_path
- else:
- channel = args.channel
- if sys.platform == 'linux2':
- print ('Using chromium instead of chrome on linux due to ' +
- 'https://crbug.com/998338.')
- binary = 'chromium'
- if channel == 'canary':
- # Linux does not have canary.
- channel = 'dev'
- else:
- binary = 'chrome'
-
- assert channel in ['stable', 'beta', 'dev', 'canary']
-
- print ('Fetching the {0} {1}'.format(channel, binary) +
- ' binary via the binary_manager.')
-
- chrome_manager = binary_manager.BinaryManager([CHROME_BINARIES_CONFIG])
- arch, os_name = dependency_util.GetOSAndArchForCurrentDesktopPlatform()
- chrome_path, version = chrome_manager.FetchPathWithVersion(
- '{0}_{1}'.format(binary, channel), arch, os_name)
- print 'Finished fetching the chrome binary to %s' % chrome_path
- if xvfb.ShouldStartXvfb():
- print 'Starting xvfb...'
- xvfb_process = xvfb.StartXvfb()
- chrome_info = 'version %s from channel %s' % (version, channel)
+ if xvfb.ShouldStartXvfb():
+ print('Starting xvfb...')
+ xvfb_process = xvfb.StartXvfb()
chrome_command = [
chrome_path,
'--user-data-dir=%s' % user_data_dir,
@@ -196,49 +202,132 @@ def Main(argv):
'--no-first-run',
'--noerrdialogs',
'--window-size=1280,1024',
+ '--enable-logging', '--v=1',
+ '--enable-features=ForceWebRequestProxyForTest',
+ '--enable-blink-features=CustomElementsV0,'
+ 'HTMLImportsStyleApplication,ShadowDOMV0',
('http://localhost:%s/%s/tests.html?' % (port, args.tests)) +
'headless=true&testTypeToRun=all',
]
- print "Starting Chrome %s..." % chrome_info
+ print('Starting Chrome at path %s...' % chrome_path)
chrome_process = subprocess.Popen(
chrome_command, stdout=sys.stdout, stderr=sys.stderr)
- print 'chrome process command: %s' % ' '.join(chrome_command)
- print "Waiting for tests to finish..."
+ print('Chrome process command:', ' '.join(chrome_command))
+ print('Waiting for tests to finish...')
+
+ def KillServer():
+ print('Timeout reached. Killing dev server...')
+ KillProcess(server_process)
+
+ timer = threading.Timer(args.timeout_sec, KillServer)
+ timer.start()
server_out, server_err = server_process.communicate()
- print "Killing Chrome..."
- if sys.platform == 'win32':
- # Use taskkill on Windows to make sure Chrome and all subprocesses are
- # killed.
- subprocess.call(['taskkill', '/F', '/T', '/PID', str(chrome_process.pid)])
- else:
- chrome_process.kill()
- if server_process.returncode != 0:
- logging.error('Tests failed!')
- logging.error('Server stdout:\n%s', server_out)
- logging.error('Server stderr:\n%s', server_err)
+ timed_out = not timer.is_alive()
+ timer.cancel()
+
+ # There is a very unlikely case where you see server saying "ALL_PASSED"
+ # but the test still saying "timed out". This usually happens because the
+ # server takes about a second to exit after printing "ALL_PASSED", and it
+ # can time out within that time. Looking at the server returncode can help
+ # here. The timeout should be increased if we're hitting this case.
+ print("Server return code:", server_process.returncode)
+
+ logging.error('Server stdout:\n%s', server_out)
+ logging.error('Server stderr:\n%s', server_err)
+
+ if timed_out:
+ print('Tests did not finish before', args.timeout_sec, 'seconds')
+ return _TIMEOUT_RETURNCODE
else:
- print server_out
- if args.presentation_json:
- with open(args.presentation_json, 'w') as recipe_out:
- # Add a link to the buildbot status for the step saying which version
- # of Chrome the test ran on. The actual linking feature is not used,
- # but there isn't a way to just add text.
- link_name = 'Chrome Version %s' % version
- presentation_info = {'links': {link_name: CHROME_CONFIG_URL}}
- json.dump(presentation_info, recipe_out)
+ if server_process.returncode == 0:
+ print("Tests passed in %.2f seconds." % (time.time() - test_start_time))
+ else:
+ logging.error('Tests failed!')
+ return server_process.returncode
+
finally:
+ if timer:
+ timer.cancel()
+ if server_process and server_process.poll is None:
+ # Dev server is still running. Kill it.
+ print('Killing dev server...')
+ KillProcess(server_process)
+ if chrome_process:
+ print('Killing Chrome...')
+ KillProcess(chrome_process)
# Wait for Chrome to be killed before deleting temp Chrome dir. Only have
# this timing issue on Windows.
if sys.platform == 'win32':
time.sleep(5)
- if tmpdir:
+ if user_data_dir:
+ chrome_debug_logs = os.path.join(user_data_dir, 'chrome_debug.log')
+ if os.path.exists(chrome_debug_logs):
+ with open(chrome_debug_logs) as f:
+ print('-------- chrome_debug.log --------')
+ sys.stdout.write(f.read())
+ print('-------- ---------------- --------')
+ print('Chrome debug logs printed from', chrome_debug_logs)
try:
- shutil.rmtree(tmpdir)
shutil.rmtree(user_data_dir)
except OSError as e:
- logging.error('Error cleaning up temp dirs %s and %s: %s',
- tmpdir, user_data_dir, e)
+ logging.error('Error cleaning up temp dirs %s: %s', user_data_dir, e)
if xvfb_process:
- xvfb_process.kill()
+ KillProcess(xvfb_process)
+
- sys.exit(server_process.returncode)
+def Main(argv):
+ parser = argparse.ArgumentParser(
+ description='Run dev_server tests for a project.')
+ parser.add_argument('--chrome_path', type=str,
+ help='Path to Chrome browser binary.')
+ parser.add_argument('--no-use-local-chrome',
+ dest='use_local_chrome', action='store_false',
+ help='Use chrome binary fetched from cloud storage '
+ 'instead of chrome available on the system.')
+ parser.add_argument(
+ '--no-install-hooks', dest='install_hooks', action='store_false')
+ parser.add_argument('--tests', type=str,
+ help='Set of tests to run (tracing or perf_insights)')
+ parser.add_argument('--channel', type=str, default='stable',
+ help='Chrome channel to run (stable or canary)')
+ parser.add_argument('--presentation-json', type=str,
+ help='Recipe presentation-json output file path')
+ parser.add_argument('--timeout-sec', type=float, default=float('inf'),
+ help='Timeout for running all tests, in seconds')
+ parser.add_argument('--timeout-retries', type=int, default=0,
+ help='Number of times to retry if tests time out.'
+ 'Default 0 (no retries)')
+ parser.set_defaults(install_hooks=True)
+ parser.set_defaults(use_local_chrome=True)
+ args = parser.parse_args(argv[1:])
+
+ if args.install_hooks:
+ install.InstallHooks()
+
+ chrome_info = GetChromeInfo(args)
+ print('Using chrome at path', chrome_info.path)
+ if not args.use_local_chrome:
+ print ('Chrome version', chrome_info.version, '| channel ', args.channel)
+ attempts_left = max(0, args.timeout_retries) + 1
+ return_code = None
+ while attempts_left:
+ print(attempts_left, 'attempts left. Running tests...')
+ return_code = RunTests(args, chrome_info.path)
+ if return_code == _TIMEOUT_RETURNCODE:
+ attempts_left -= 1
+ continue
+ else:
+ break
+ else:
+ logging.error('Tests timed out every time. Retried %d times.',
+ args.timeout_retries)
+ return_code = 1
+ if args.presentation_json:
+ with open(args.presentation_json, 'w') as recipe_out:
+ # Add a link to the buildbot status for the step saying which version
+ # of Chrome the test ran on. The actual linking feature is not used,
+ # but there isn't a way to just add text.
+ link_name = 'Chrome Version %s' % chrome_info.version
+ presentation_info = {'links': {link_name: CHROME_CONFIG_URL}}
+ json.dump(presentation_info, recipe_out)
+ sys.exit(return_code)
diff --git a/common/bin/update_chrome_reference_binaries b/common/bin/update_chrome_reference_binaries.py
similarity index 61%
rename from common/bin/update_chrome_reference_binaries
rename to common/bin/update_chrome_reference_binaries.py
index c62ea78203..f38232678b 100755
--- a/common/bin/update_chrome_reference_binaries
+++ b/common/bin/update_chrome_reference_binaries.py
@@ -14,7 +14,6 @@
import argparse
import collections
-import csv
import logging
import os
import shutil
@@ -30,21 +29,28 @@
from dependency_manager import base_config
-def BuildNotFoundError(error_string):
- raise ValueError(error_string)
-
_CHROME_BINARIES_CONFIG = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..', '..', 'common',
'py_utils', 'py_utils', 'chrome_binaries.json')
-CHROME_GS_BUCKET = 'chrome-unsigned'
+_CHROME_GS_BUCKET = 'chrome-unsigned'
+_CHROMIUM_GS_BUCKET = 'chromium-browser-snapshots'
+
+# How many commit positions to search below and above omaha branch position to
+# find closest chromium build snapshot. The value 10 is chosen because it looks
+# more than sufficient from manual inspection of the bucket.
+_CHROMIUM_SNAPSHOT_SEARCH_WINDOW = 10
# Remove a platform name from this list to disable updating it.
# Add one to enable updating it. (Must also update _PLATFORM_MAP.)
_PLATFORMS_TO_UPDATE = ['mac_x86_64', 'win_x86', 'win_AMD64', 'linux_x86_64',
'android_k_armeabi-v7a', 'android_l_arm64-v8a',
'android_l_armeabi-v7a', 'android_n_armeabi-v7a',
- 'android_n_arm64-v8a']
+ 'android_n_arm64-v8a' ]
+
+# Add platforms here if you also want to update chromium binary for it.
+# Must add chromium_info for it in _PLATFORM_MAP.
+_CHROMIUM_PLATFORMS = ['mac_x86_64', 'win_x86', 'win_AMD64', 'linux_x86_64']
# Remove a channel name from this list to disable updating it.
# Add one to enable updating it.
@@ -57,65 +63,78 @@ def BuildNotFoundError(error_string):
'dev': ['linux'], 'canary': ['mac', 'win']}
-# Chromium binaries are only updated for linux builds.
-# See https://crbug.com/973847 for context.
-_CHROMIUM_PLATFORM = 'linux_x86_64'
-_CHROMIUM_CHANNELS_TO_UPDATE = ['stable', 'dev']
-_CHROMIUM_GS_BUCKET = 'chromium-browser-snapshots'
-_CHROMIUM_GS_BUILD_DIR = 'Linux_x64'
-_CHROMIUM_ZIP_NAME = 'chrome-linux.zip'
-# How many commit positions to search below and above omaha branch position to
-# find closest chromium build snapshot. The value 10 is chosen because it looks
-# more than sufficient from manual inspection of the bucket.
-_CHROMIUM_SNAPSHOT_SEARCH_WINDOW = 10
-
# All of the information we need to update each platform.
# omaha: name omaha uses for the platforms.
# zip_name: name of the zip file to be retrieved from cloud storage.
# gs_build: name of the Chrome build platform used in cloud storage.
+# chromium_info: information needed to update chromium (optional).
# destination: Name of the folder to download the reference build to.
UpdateInfo = collections.namedtuple('UpdateInfo',
- 'omaha, gs_folder, gs_build, zip_name')
+ 'omaha, gs_folder, gs_build, chromium_info, zip_name')
+# build_dir: name of the build directory in _CHROMIUM_GS_BUCKET.
+# zip_name: name of the zip file to be retrieved from cloud storage.
+ChromiumInfo = collections.namedtuple('ChromiumInfo', 'build_dir, zip_name')
_PLATFORM_MAP = {'mac_x86_64': UpdateInfo(omaha='mac',
gs_folder='desktop-*',
gs_build='mac64',
+ chromium_info=ChromiumInfo(
+ build_dir='Mac',
+ zip_name='chrome-mac.zip'),
zip_name='chrome-mac.zip'),
'win_x86': UpdateInfo(omaha='win',
gs_folder='desktop-*',
gs_build='win-clang',
+ chromium_info=ChromiumInfo(
+ build_dir='Win',
+ zip_name='chrome-win.zip'),
zip_name='chrome-win-clang.zip'),
'win_AMD64': UpdateInfo(omaha='win',
gs_folder='desktop-*',
gs_build='win64-clang',
+ chromium_info=ChromiumInfo(
+ build_dir='Win_x64',
+ zip_name='chrome-win.zip'),
zip_name='chrome-win64-clang.zip'),
'linux_x86_64': UpdateInfo(omaha='linux',
gs_folder='desktop-*',
gs_build='linux64',
+ chromium_info=ChromiumInfo(
+ build_dir='Linux_x64',
+ zip_name='chrome-linux.zip'),
zip_name='chrome-linux64.zip'),
'android_k_armeabi-v7a': UpdateInfo(omaha='android',
gs_folder='android-*',
gs_build='arm',
+ chromium_info=None,
zip_name='Chrome.apk'),
'android_l_arm64-v8a': UpdateInfo(omaha='android',
gs_folder='android-*',
gs_build='arm_64',
+ chromium_info=None,
zip_name='ChromeModern.apk'),
'android_l_armeabi-v7a': UpdateInfo(omaha='android',
gs_folder='android-*',
gs_build='arm',
+ chromium_info=None,
zip_name='Chrome.apk'),
'android_n_armeabi-v7a': UpdateInfo(omaha='android',
gs_folder='android-*',
gs_build='arm',
+ chromium_info=None,
zip_name='Monochrome.apk'),
'android_n_arm64-v8a': UpdateInfo(omaha='android',
gs_folder='android-*',
gs_build='arm_64',
+ chromium_info=None,
zip_name='Monochrome.apk'),
}
+VersionInfo = collections.namedtuple('VersionInfo',
+ 'version, branch_base_position')
+
+
def _ChannelVersionsMap(channel):
rows = _OmahaReportVersionInfo(channel)
omaha_versions_map = _OmahaVersionsMap(rows, channel)
@@ -136,7 +155,8 @@ def _OmahaReportVersionInfo(channel):
def _OmahaVersionsMap(rows, channel):
platforms = _OMAHA_PLATFORMS.get(channel, [])
if (len(rows) < 1 or
- not rows[0][0:3] == ['os', 'channel', 'current_version']):
+ rows[0][0:3] != ['os', 'channel', 'current_version'] or
+ rows[0][7] != 'branch_base_position'):
raise ValueError(
'Omaha report is not in the expected form: %s.' % rows)
versions_map = {}
@@ -145,78 +165,29 @@ def _OmahaVersionsMap(rows, channel):
raise ValueError(
'Omaha report contains a line with the channel %s' % row[1])
if row[0] in platforms:
- versions_map[row[0]] = row[2]
+ versions_map[row[0]] = VersionInfo(version=row[2],
+ branch_base_position=int(row[7]))
logging.warn('versions map: %s' % versions_map)
if not all(platform in versions_map for platform in platforms):
raise ValueError(
- 'Omaha report did not contain all desired platforms for channel %s' % channel)
+ 'Omaha report did not contain all desired platforms '
+ 'for channel %s' % channel)
return versions_map
-def _QueuePlatformUpdate(platform, version, config, channel):
- """ platform: the name of the platform for the browser to
- be downloaded & updated from cloud storage. """
- platform_info = _PLATFORM_MAP[platform]
- filename = platform_info.zip_name
- # remote_path example: desktop-*/30.0.1595.0/precise32/chrome-precise32.zip
- remote_path = '%s/%s/%s/%s' % (
- platform_info.gs_folder, version, platform_info.gs_build, filename)
- if not cloud_storage.Exists(CHROME_GS_BUCKET, remote_path):
- cloud_storage_path = 'gs://%s/%s' % (CHROME_GS_BUCKET, remote_path)
- raise BuildNotFoundError(
- 'Failed to find %s build for version %s at path %s.' % (
- platform, version, cloud_storage_path))
- reference_builds_folder = os.path.join(
- os.path.dirname(os.path.abspath(__file__)), 'chrome_telemetry_build',
- 'reference_builds', channel)
- if not os.path.exists(reference_builds_folder):
- os.makedirs(reference_builds_folder)
- local_dest_path = os.path.join(reference_builds_folder, filename)
- cloud_storage.Get(CHROME_GS_BUCKET, remote_path, local_dest_path)
- _ModifyBuildIfNeeded(local_dest_path, platform)
- config.AddCloudStorageDependencyUpdateJob(
- 'chrome_%s' % channel, platform, local_dest_path, version=version,
- execute_job=False)
+RemotePath = collections.namedtuple('RemotePath', 'bucket, path')
-def _ModifyBuildIfNeeded(location, platform):
- """Hook to modify the build before saving it for Telemetry to use.
-
- This can be used to remove various utilities that cause noise in a
- test environment. Right now, it is just used to remove Keystone,
- which is a tool used to autoupdate Chrome.
- """
- if platform == 'mac_x86_64':
- _RemoveKeystoneFromBuild(location)
- return
-
- if 'mac' in platform:
- raise NotImplementedError(
- 'Platform <%s> sounds like it is an OSX version. If so, we may need to '
- 'remove Keystone from it per crbug.com/932615. Please edit this script'
- ' and teach it what needs to be done :).')
+def _ResolveChromeRemotePath(platform_info, version_info):
+ # Path example: desktop-*/30.0.1595.0/precise32/chrome-precise32.zip
+ return RemotePath(bucket=_CHROME_GS_BUCKET,
+ path=('%s/%s/%s/%s' % (platform_info.gs_folder,
+ version_info.version,
+ platform_info.gs_build,
+ platform_info.zip_name)))
-def _RemoveKeystoneFromBuild(location):
- """Removes the Keystone autoupdate binary from the chrome mac zipfile."""
- logging.info('Removing keystone from mac build at %s' % location)
- temp_folder = tempfile.mkdtemp(prefix='RemoveKeystoneFromBuild')
- try:
- subprocess.check_call(['unzip', '-q', location, '-d', temp_folder])
- keystone_folder = os.path.join(
- temp_folder, 'chrome-mac', 'Google Chrome.app', 'Contents',
- 'Frameworks', 'Google Chrome Framework.framework', 'Frameworks',
- 'KeystoneRegistration.framework')
- shutil.rmtree(keystone_folder)
- os.remove(location)
- subprocess.check_call(['zip', '--quiet', '--recurse-paths', '--symlinks',
- location, 'chrome-mac'],
- cwd=temp_folder)
- finally:
- shutil.rmtree(temp_folder)
-
-
-def _FindClosestChromiumSnapshot(base_position):
+def _FindClosestChromiumSnapshot(base_position, build_dir):
"""Returns the closest chromium snapshot available in cloud storage.
Chromium snapshots are pulled from _CHROMIUM_BUILD_DIR in CHROMIUM_GS_BUCKET.
@@ -239,7 +210,7 @@ def _FindClosestChromiumSnapshot(base_position):
available_positions = []
for position_prefix in range(min_position_prefix, max_position_prefix + 1):
- query = '%s/%d*' % (_CHROMIUM_GS_BUILD_DIR, position_prefix)
+ query = '%s/%d*' % (build_dir, position_prefix)
try:
ls_results = cloud_storage.ListDirs(_CHROMIUM_GS_BUCKET, query)
except cloud_storage.NotFoundError:
@@ -248,12 +219,12 @@ def _FindClosestChromiumSnapshot(base_position):
continue
for entry in ls_results:
- # entry looks like "/Linux_x64/${commit_position}/".
+ # entry looks like '/Linux_x64/${commit_position}/'.
position = int(entry.split('/')[2])
available_positions.append(position)
if len(available_positions) == 0:
- raise ValueError("No chromium build found +/-%d commit positions of %d" %
+ raise ValueError('No chromium build found +/-%d commit positions of %d' %
(_CHROMIUM_SNAPSHOT_SEARCH_WINDOW, base_position))
distance_function = lambda position: abs(position - base_position)
@@ -261,73 +232,125 @@ def _FindClosestChromiumSnapshot(base_position):
return min_distance_snapshot
-def _GetLinuxOmahaInfo():
- """Returns a dict of channel -> its omaha info on linux as a csv dict. """
- url = 'https://omahaproxy.appspot.com/all?os=linux'
- reader = csv.DictReader(urllib2.urlopen(url))
- channel_to_info_map = {}
- for row in reader:
- channel_to_info_map[row['channel']] = row
- return channel_to_info_map
+def _ResolveChromiumRemotePath(channel, platform, version_info):
+ platform_info = _PLATFORM_MAP[platform]
+ branch_base_position = version_info.branch_base_position
+ omaha_version = version_info.version
+ build_dir = platform_info.chromium_info.build_dir
+ # Look through chromium-browser-snapshots for closest match.
+ closest_snapshot = _FindClosestChromiumSnapshot(
+ branch_base_position, build_dir)
+ if closest_snapshot != branch_base_position:
+ print ('Channel %s corresponds to commit position ' % channel +
+ '%d on %s, ' % (branch_base_position, platform) +
+ 'but closest chromium snapshot available on ' +
+ '%s is %d' % (_CHROMIUM_GS_BUCKET, closest_snapshot))
+ return RemotePath(bucket=_CHROMIUM_GS_BUCKET,
+ path = ('%s/%s/%s' % (build_dir, closest_snapshot,
+ platform_info.chromium_info.zip_name)))
+
+
+def _QueuePlatformUpdate(binary, platform, version_info, config, channel):
+ """ platform: the name of the platform for the browser to
+ be downloaded & updated from cloud storage. """
+ platform_info = _PLATFORM_MAP[platform]
+ if binary == 'chrome':
+ remote_path = _ResolveChromeRemotePath(platform_info, version_info)
+ elif binary == 'chromium':
+ remote_path = _ResolveChromiumRemotePath(channel, platform, version_info)
+ else:
+ raise ValueError('binary must be \'chrome\' or \'chromium\'')
+
+ if not cloud_storage.Exists(remote_path.bucket, remote_path.path):
+ cloud_storage_path = 'gs://%s/%s' % (remote_path.bucket, remote_path.path)
+ logging.warn('Failed to find %s build for version %s at path %s.' % (
+ platform, version_info.version, cloud_storage_path))
+ logging.warn('Skipping this update for this platform/channel.')
+ return
+
+ reference_builds_folder = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)), 'chrome_telemetry_build',
+ 'reference_builds', binary, channel)
+ if not os.path.exists(reference_builds_folder):
+ os.makedirs(reference_builds_folder)
+ local_dest_path = os.path.join(reference_builds_folder,
+ platform,
+ platform_info.zip_name)
+ cloud_storage.Get(remote_path.bucket, remote_path.path, local_dest_path)
+ _ModifyBuildIfNeeded(binary, local_dest_path, platform)
+ config.AddCloudStorageDependencyUpdateJob('%s_%s' % (binary, channel),
+ platform, local_dest_path, version=version_info.version,
+ execute_job=False)
+
+
+def _ModifyBuildIfNeeded(binary, location, platform):
+ """Hook to modify the build before saving it for Telemetry to use.
+
+ This can be used to remove various utilities that cause noise in a
+ test environment. Right now, it is just used to remove Keystone,
+ which is a tool used to autoupdate Chrome.
+ """
+ if binary != 'chrome':
+ return
+
+ if platform == 'mac_x86_64':
+ _RemoveKeystoneFromBuild(location)
+ return
+
+ if 'mac' in platform:
+ raise NotImplementedError(
+ 'Platform <%s> sounds like it is an OSX version. If so, we may need to '
+ 'remove Keystone from it per crbug.com/932615. Please edit this script'
+ ' and teach it what needs to be done :).')
+
+
+def _RemoveKeystoneFromBuild(location):
+ """Removes the Keystone autoupdate binary from the chrome mac zipfile."""
+ logging.info('Removing keystone from mac build at %s' % location)
+ temp_folder = tempfile.mkdtemp(prefix='RemoveKeystoneFromBuild')
+ try:
+ subprocess.check_call(['unzip', '-q', location, '-d', temp_folder])
+ keystone_folder = os.path.join(
+ temp_folder, 'chrome-mac', 'Google Chrome.app', 'Contents',
+ 'Frameworks', 'Google Chrome Framework.framework', 'Frameworks',
+ 'KeystoneRegistration.framework')
+ shutil.rmtree(keystone_folder)
+ os.remove(location)
+ subprocess.check_call(['zip', '--quiet', '--recurse-paths', '--symlinks',
+ location, 'chrome-mac'],
+ cwd=temp_folder)
+ finally:
+ shutil.rmtree(temp_folder)
-def _UpdateChromiumLinuxBuilds(config):
- omaha_info = _GetLinuxOmahaInfo()
- for channel in _CHROMIUM_CHANNELS_TO_UPDATE:
- if channel not in omaha_info:
- raise ValueError(
- 'Omaha report did not contain linux information for channel %s'
- % channel)
-
- branch_base_position = int(omaha_info[channel]['branch_base_position'])
- current_version = config.GetVersion(
- 'chromium_%s' % channel, _CHROMIUM_PLATFORM)
- omaha_version = omaha_info[channel]['current_version']
- if current_version and current_version == omaha_version:
- print 'Chromium %s channel is already up to date.' % channel
- continue
- # Look through chromium-browser-snapshots for closest match.
- closest_snapshot = _FindClosestChromiumSnapshot(branch_base_position)
- if closest_snapshot != branch_base_position:
- print ('Channel %s corresponds to commit position ' % channel +
- '%d on %s, ' % (branch_base_position, _CHROMIUM_PLATFORM) +
- 'but closest chromium snapshot available on ' +
- '%s is %d' % (_CHROMIUM_GS_BUCKET, closest_snapshot))
-
- remote_path = '%s/%s/%s' % (_CHROMIUM_GS_BUILD_DIR, closest_snapshot,
- _CHROMIUM_ZIP_NAME)
- reference_builds_folder = os.path.join(
- os.path.dirname(os.path.abspath(__file__)), 'chrome_telemetry_build',
- 'reference_builds', 'chromium_' + channel)
- if not os.path.exists(reference_builds_folder):
- os.makedirs(reference_builds_folder)
- local_path = os.path.join(reference_builds_folder, _CHROMIUM_ZIP_NAME)
-
- print 'Downloading Chromium at %d on linux_x86_64' % (closest_snapshot)
- cloud_storage.Get(_CHROMIUM_GS_BUCKET, remote_path, local_path)
- config.AddCloudStorageDependencyUpdateJob(
- 'chromium_%s' % channel, 'linux_x86_64', local_path,
- version=omaha_version, execute_job=False)
+def _NeedsUpdate(config, binary, channel, platform, version_info):
+ channel_version = version_info.version
+ print 'Checking %s (%s channel) on %s' % (binary, channel, platform)
+ current_version = config.GetVersion('%s_%s' % (binary, channel), platform)
+ print 'current: %s, channel: %s' % (current_version, channel_version)
+ if current_version and current_version == channel_version:
+ print 'Already up to date.'
+ return False
+ return True
def UpdateBuilds(args):
config = base_config.BaseConfig(_CHROME_BINARIES_CONFIG, writable=True)
- if args.update_chrome:
- logging.info("Preparing updates for chrome builds.")
- for channel in _CHANNELS_TO_UPDATE:
- channel_versions_map = _ChannelVersionsMap(channel)
- for platform in channel_versions_map:
- print 'Downloading Chrome (%s channel) on %s' % (channel, platform)
- current_version = config.GetVersion('chrome_%s' % channel, platform)
- channel_version = channel_versions_map.get(platform)
- print 'current: %s, channel: %s' % (current_version, channel_version)
- if current_version and current_version == channel_version:
- continue
- _QueuePlatformUpdate(platform, channel_version, config, channel)
- if args.update_chromium:
- logging.info("Preparing updates for chromium builds.")
- _UpdateChromiumLinuxBuilds(config)
+ for channel in _CHANNELS_TO_UPDATE:
+ channel_versions_map = _ChannelVersionsMap(channel)
+ for platform in channel_versions_map:
+ version_info = channel_versions_map.get(platform)
+ if args.update_chrome:
+ if _NeedsUpdate(config, 'chrome', channel, platform, version_info):
+ _QueuePlatformUpdate('chrome', platform, version_info, config,
+ channel)
+ if args.update_chromium and platform in _CHROMIUM_PLATFORMS:
+ if _NeedsUpdate(config, 'chromium', channel, platform, version_info):
+ _QueuePlatformUpdate('chromium', platform, version_info,
+ config, channel)
+
+ print 'Updating builds with downloaded binaries'
config.ExecuteUpdateJobs(force=True)
diff --git a/common/py_utils/py_utils/chrome_binaries.json b/common/py_utils/py_utils/chrome_binaries.json
index 00e4f63a2e..66caf0c908 100644
--- a/common/py_utils/py_utils/chrome_binaries.json
+++ b/common/py_utils/py_utils/chrome_binaries.json
@@ -6,22 +6,22 @@
"cloud_storage_bucket": "chrome-telemetry",
"file_info": {
"mac_x86_64": {
- "cloud_storage_hash": "381a491e14ab523b8db4cdf3c993713678237af8",
+ "cloud_storage_hash": "805bcd36abcadd252fc433b1b440edc9c1d1abaf",
"download_path": "bin/reference_builds/chrome-mac64.zip",
"path_within_archive": "chrome-mac/Google Chrome.app/Contents/MacOS/Google Chrome",
- "version_in_cs": "77.0.3822.0"
+ "version_in_cs": "80.0.3951.6"
},
"win_AMD64": {
- "cloud_storage_hash": "600ee522c410efe1de2f593c0efc32ae113a7d99",
+ "cloud_storage_hash": "7ad4063151c506f73b350665ca8e189e18565a3a",
"download_path": "bin\\reference_build\\chrome-win64-clang.zip",
"path_within_archive": "chrome-win64-clang\\chrome.exe",
- "version_in_cs": "77.0.3822.0"
+ "version_in_cs": "80.0.3951.4"
},
"win_x86": {
- "cloud_storage_hash": "5b79a181bfbd94d8288529b0da1defa3ef097197",
+ "cloud_storage_hash": "9ccaf1f26fc1f9d4e46258504a2de0f3808c1cf0",
"download_path": "bin\\reference_build\\chrome-win32-clang.zip",
"path_within_archive": "chrome-win32-clang\\chrome.exe",
- "version_in_cs": "77.0.3822.0"
+ "version_in_cs": "80.0.3951.4"
}
}
},
@@ -30,10 +30,10 @@
"cloud_storage_bucket": "chrome-telemetry",
"file_info": {
"linux_x86_64": {
- "cloud_storage_hash": "61d68a6b00f25c964f5162f5251962468c886f3a",
+ "cloud_storage_hash": "0db52435a728bbb0343791e275efd52904d059d6",
"download_path": "bin/reference_build/chrome-linux64.zip",
"path_within_archive": "chrome-linux64/chrome",
- "version_in_cs": "76.0.3809.21"
+ "version_in_cs": "79.0.3945.16"
}
}
},
@@ -42,53 +42,77 @@
"cloud_storage_bucket": "chrome-telemetry",
"file_info": {
"android_k_armeabi-v7a": {
- "cloud_storage_hash": "28b913c720d56a30c092625c7862f00175a316c7",
+ "cloud_storage_hash": "fcd18925f0929d38273c860e6fa4d1c3064b2037",
"download_path": "bin/reference_build/android_k_armeabi-v7a/ChromeStable.apk",
- "version_in_cs": "75.0.3770.67"
+ "version_in_cs": "78.0.3904.62"
},
"android_l_arm64-v8a": {
- "cloud_storage_hash": "4b953c33c61f94c2198e8001d0d8142c6504a875",
+ "cloud_storage_hash": "f2a8fd07fa7e082835a3c4ac228e66dc0dc89fee",
"download_path": "bin/reference_build/android_l_arm64-v8a/ChromeStable.apk",
- "version_in_cs": "75.0.3770.67"
+ "version_in_cs": "78.0.3904.62"
},
"android_l_armeabi-v7a": {
- "cloud_storage_hash": "28b913c720d56a30c092625c7862f00175a316c7",
+ "cloud_storage_hash": "fcd18925f0929d38273c860e6fa4d1c3064b2037",
"download_path": "bin/reference_build/android_l_armeabi-v7a/ChromeStable.apk",
- "version_in_cs": "75.0.3770.67"
+ "version_in_cs": "78.0.3904.62"
},
"android_n_arm64-v8a": {
- "cloud_storage_hash": "84152ba8f7a25cacc79d588ed827ea75f0e4ab94",
+ "cloud_storage_hash": "46943be19af7dd4dd70930d1838e7058a4a91235",
"download_path": "bin/reference_build/android_n_arm64-v8a/Monochrome.apk",
- "version_in_cs": "75.0.3770.67"
+ "version_in_cs": "78.0.3904.62"
},
"android_n_armeabi-v7a": {
- "cloud_storage_hash": "656bb9e3982d0d35decd5347ced2c320a7267f33",
+ "cloud_storage_hash": "628c0a492ac8c465b6da47909b3d1c92769da771",
"download_path": "bin/reference_build/android_n_armeabi-v7a/Monochrome.apk",
- "version_in_cs": "75.0.3770.67"
+ "version_in_cs": "78.0.3904.62"
},
"linux_x86_64": {
- "cloud_storage_hash": "dee8469e8dcd8453efd33f3a00d7ea302a126a4b",
+ "cloud_storage_hash": "6428da5968a0e69b84ee4525f8886517a45e4c92",
"download_path": "bin/reference_build/chrome-linux64.zip",
"path_within_archive": "chrome-linux64/chrome",
- "version_in_cs": "75.0.3770.80"
+ "version_in_cs": "78.0.3904.70"
},
"mac_x86_64": {
- "cloud_storage_hash": "16a43a1e794bb99ec1ebcd40569084985b3c6626",
+ "cloud_storage_hash": "40096f095b8f8b3694219c23b3f7254a60ca35e0",
"download_path": "bin/reference_builds/chrome-mac64.zip",
"path_within_archive": "chrome-mac/Google Chrome.app/Contents/MacOS/Google Chrome",
- "version_in_cs": "75.0.3770.80"
+ "version_in_cs": "78.0.3904.70"
},
"win_AMD64": {
- "cloud_storage_hash": "1ec52bd4164f2d93c53113a093dae9e041eb2d73",
+ "cloud_storage_hash": "7fcc267926ac55afe6fc28bc14eb252c98e20e08",
"download_path": "bin\\reference_build\\chrome-win64-clang.zip",
"path_within_archive": "chrome-win64-clang\\chrome.exe",
- "version_in_cs": "75.0.3770.80"
+ "version_in_cs": "78.0.3904.70"
},
"win_x86": {
- "cloud_storage_hash": "0f9eb991ba618dc61f2063ea252f44be94c2252e",
+ "cloud_storage_hash": "d6fdf2a4858bf9ddcbdb97b29b68863dfa3574f7",
"download_path": "bin\\reference_build\\chrome-win-clang.zip",
"path_within_archive": "chrome-win-clang\\chrome.exe",
- "version_in_cs": "75.0.3770.80"
+ "version_in_cs": "78.0.3904.70"
+ }
+ }
+ },
+ "chromium_canary": {
+ "cloud_storage_base_folder": "binary_dependencies",
+ "cloud_storage_bucket": "chrome-telemetry",
+ "file_info": {
+ "mac_x86_64": {
+ "cloud_storage_hash": "6502438babd29256ae0407c818123d7d25b439c4",
+ "download_path": "bin/reference_builds/chrome-mac.zip",
+ "path_within_archive": "chrome-mac/Chromium.app/Contents/MacOS/Chromium",
+ "version_in_cs": "80.0.3951.6"
+ },
+ "win_AMD64": {
+ "cloud_storage_hash": "e177a29aa1bc1d86dae31fc80bca293011e8ff51",
+ "download_path": "bin\\reference_build\\chrome-win.zip",
+ "path_within_archive": "chrome-win\\chrome.exe",
+ "version_in_cs": "80.0.3951.4"
+ },
+ "win_x86": {
+ "cloud_storage_hash": "4f1bfd18c5cc386cb966ab48bf174d34ae9596ee",
+ "download_path": "bin\\reference_build\\chrome-win32-clang.zip",
+ "path_within_archive": "chrome-win32-clang\\chrome.exe",
+ "version_in_cs": "80.0.3951.4"
}
}
},
@@ -97,10 +121,10 @@
"cloud_storage_bucket": "chrome-telemetry",
"file_info": {
"linux_x86_64": {
- "cloud_storage_hash": "af3848b34d925ce9d13ea91ce416e0fd46a1a102",
- "download_path": "bin/reference_build/chromium-linux.zip",
+ "cloud_storage_hash": "5821ab5c8693c87b5a02c2684bb45e08ba901960",
+ "download_path": "bin/reference_build/chrome-linux.zip",
"path_within_archive": "chrome-linux/chrome",
- "version_in_cs": "79.0.3921.0"
+ "version_in_cs": "79.0.3945.16"
}
}
},
@@ -109,12 +133,30 @@
"cloud_storage_bucket": "chrome-telemetry",
"file_info": {
"linux_x86_64": {
- "cloud_storage_hash": "c3d855e65c71eec418c6caf21184e73c90c86aa0",
- "download_path": "bin/reference_build/chromium-linux.zip",
+ "cloud_storage_hash": "eb82b5d41759b6eeb2e61ef1a702be31aadf71c5",
+ "download_path": "bin/reference_build/chrome-linux.zip",
"path_within_archive": "chrome-linux/chrome",
- "version_in_cs": "77.0.3865.90"
+ "version_in_cs": "78.0.3904.70"
+ },
+ "mac_x86_64": {
+ "cloud_storage_hash": "0eb8d99f6ea6e1ff5bd9607d5be3e0eb29a9a497",
+ "download_path": "bin/reference_builds/chrome-mac.zip",
+ "path_within_archive": "chrome-mac/Chromium.app/Contents/MacOS/Chromium",
+ "version_in_cs": "78.0.3904.70"
+ },
+ "win_AMD64": {
+ "cloud_storage_hash": "c28d9e5bd2229164731fc7725293e361d9a850df",
+ "download_path": "bin\\reference_build\\chrome-win.zip",
+ "path_within_archive": "chrome-win\\chrome.exe",
+ "version_in_cs": "78.0.3904.70"
+ },
+ "win_x86": {
+ "cloud_storage_hash": "5af50c744ace488341a79e5f8d208ddaee04c5e7",
+ "download_path": "bin\\reference_build\\chrome-win-clang.zip",
+ "path_within_archive": "chrome-win-clang\\chrome.exe",
+ "version_in_cs": "78.0.3904.70"
}
}
}
}
-}
+}
\ No newline at end of file
diff --git a/dashboard/cron.yaml b/dashboard/cron.yaml
index 8aff23491e..d4f09963d7 100644
--- a/dashboard/cron.yaml
+++ b/dashboard/cron.yaml
@@ -6,6 +6,10 @@ cron:
url: /internal/cron/ts_mon/send
schedule: every 1 minutes
+- description: Fix sheriff configs that get out of sync from tests
+ url: /cron/update_sheriff
+ schedule: every 48 hours
+
- description: Mark alerts as recovered.
url: /mark_recovered_alerts
schedule: every 6 hours
diff --git a/dashboard/dashboard/add_histograms_queue.py b/dashboard/dashboard/add_histograms_queue.py
index cc234da16e..a80263e6c6 100644
--- a/dashboard/dashboard/add_histograms_queue.py
+++ b/dashboard/dashboard/add_histograms_queue.py
@@ -40,6 +40,7 @@
reserved_infos.CATAPULT_REVISIONS.name: 'r_catapult_git',
reserved_infos.ANGLE_REVISIONS.name: 'r_angle_git',
reserved_infos.WEBRTC_REVISIONS.name: 'r_webrtc_git',
+ reserved_infos.WEBRTC_INTERNAL_REVISIONS.name: 'r_webrtc_internal_cl',
reserved_infos.FUCHSIA_GARNET_REVISIONS.name: 'r_fuchsia_garnet_git',
reserved_infos.FUCHSIA_PERIDOT_REVISIONS.name: 'r_fuchsia_peridot_git',
reserved_infos.FUCHSIA_TOPAZ_REVISIONS.name: 'r_fuchsia_topaz_git',
@@ -198,10 +199,6 @@ def _AddRowsFromData(params, revision, parent_test, legacy_parent_tests):
data_dict = params['data']
test_key = parent_test.key
- all_tests = [parent_test] + legacy_parent_tests.values()
- yield [a.UpdateSheriffAsync() for a in all_tests]
- yield ndb.put_multi_async(all_tests)
-
stat_names_to_test_keys = {k: v.key for k, v in
legacy_parent_tests.items()}
rows = CreateRowEntities(
diff --git a/dashboard/dashboard/add_histograms_test.py b/dashboard/dashboard/add_histograms_test.py
index f760653a83..a0d51f027e 100644
--- a/dashboard/dashboard/add_histograms_test.py
+++ b/dashboard/dashboard/add_histograms_test.py
@@ -244,41 +244,6 @@ def testPost_Succeeds(self, mock_process_test, mock_graph_revisions):
mock_graph_revisions.assert_called_once_with(mock.ANY)
self.assertEqual(len(mock_graph_revisions.mock_calls[0][1][0]), len(rows))
- @mock.patch.object(
- add_histograms_queue.graph_revisions, 'AddRowsToCacheAsync',
- mock.MagicMock())
- @mock.patch.object(
- add_histograms_queue.find_anomalies, 'ProcessTestsAsync',
- mock.MagicMock())
- def testPost_Succeeds_SheriffUpdated(self):
- hs = _CreateHistogram(
- master='master', bot='bot', benchmark='benchmark', commit_position=123,
- benchmark_description='Benchmark description.', samples=[1, 2, 3])
- data = json.dumps(hs.AsDicts())
-
- self.PostAddHistogram({'data': data})
- self.ExecuteTaskQueueTasks('/add_histograms_queue',
- add_histograms.TASK_QUEUE_NAME)
-
- t = utils.TestKey('master/bot/benchmark/hist').get()
- self.assertIsNone(t.sheriff)
-
- sheriff.Sheriff(
- id='my_sheriff1', email='a@chromium.org', patterns=[
- '*/*/*/hist', '*/*/*/hist_avg']).put()
-
- hs = _CreateHistogram(
- master='master', bot='bot', benchmark='benchmark', commit_position=124,
- benchmark_description='Benchmark description.', samples=[1, 2, 3])
- data = json.dumps(hs.AsDicts())
-
- self.PostAddHistogram({'data': data})
- self.ExecuteTaskQueueTasks('/add_histograms_queue',
- add_histograms.TASK_QUEUE_NAME)
-
- t = utils.TestKey('master/bot/benchmark/hist').get()
- self.assertIsNotNone(t.sheriff)
-
@mock.patch.object(
add_histograms_queue.graph_revisions, 'AddRowsToCacheAsync')
@mock.patch.object(add_histograms_queue.find_anomalies, 'ProcessTestsAsync')
diff --git a/dashboard/dashboard/add_point.py b/dashboard/dashboard/add_point.py
index cd85a0ba2a..7dd5034744 100644
--- a/dashboard/dashboard/add_point.py
+++ b/dashboard/dashboard/add_point.py
@@ -18,6 +18,7 @@
from google.appengine.ext import ndb
from dashboard import post_data_handler
+from dashboard.api import api_auth
from dashboard.common import datastore_hooks
from dashboard.common import histogram_helpers
from dashboard.common import math_utils
@@ -125,7 +126,14 @@ def post(self):
"""
datastore_hooks.SetPrivilegedRequest()
if not self._CheckIpAgainstWhitelist():
- return
+ try:
+ api_auth.Authorize()
+ except api_auth.ApiAuthException as error:
+ logging.error('Auth error: %s', error)
+ self.ReportError(
+ 'IP address %s not in IP whitelist!' % self.request.remote_addr,
+ 403)
+ return
data_str = self.request.get('data')
if not data_str:
@@ -378,8 +386,8 @@ def _FlattenTrace(test_suite_name, chart_name, trace_name, trace,
BadRequestError: The data wasn't valid.
"""
if '@@' in chart_name:
- tir_label, chart_name = chart_name.split('@@')
- chart_name = chart_name + '/' + tir_label
+ grouping_label, chart_name = chart_name.split('@@')
+ chart_name = chart_name + '/' + grouping_label
value, error = _ExtractValueAndError(trace)
diff --git a/dashboard/dashboard/add_point_test.py b/dashboard/dashboard/add_point_test.py
index 2146f73df9..891034cfd5 100644
--- a/dashboard/dashboard/add_point_test.py
+++ b/dashboard/dashboard/add_point_test.py
@@ -21,6 +21,7 @@
from dashboard import add_point
from dashboard import add_point_queue
from dashboard import units_to_direction
+from dashboard.api import api_auth
from dashboard.common import layered_cache
from dashboard.common import testing_common
from dashboard.common import utils
@@ -186,6 +187,45 @@ def setUp(self):
testing_common.SetIpWhitelist([_WHITELISTED_IP])
self.SetCurrentUser('foo@bar.com', is_admin=True)
+ @mock.patch.object(add_point_queue.find_anomalies, 'ProcessTestsAsync')
+ def testPost_OauthUser_Authorized(self, mock_process_test):
+ sheriff.Sheriff(
+ id='my_sheriff1', email='a@chromium.org', patterns=['*/*/*/dom']).put()
+ data_param = json.dumps([
+ {
+ 'master': 'ChromiumPerf',
+ 'bot': 'win7',
+ 'test': 'dromaeo/dom',
+ 'revision': 12345,
+ 'value': 22.4,
+ 'error': 1.23,
+ 'supplemental_columns': {
+ 'r_webkit': 1355,
+ 'a_extra': 'hello',
+ 'd_median': 22.2,
+ },
+ },
+ {
+ 'master': 'ChromiumPerf',
+ 'bot': 'win7',
+ 'test': 'dromaeo/jslib',
+ 'revision': 12345,
+ 'value': 44.3,
+ }
+ ])
+ self.SetCurrentUserOAuth(testing_common.INTERNAL_USER)
+ self.SetCurrentClientIdOAuth(api_auth.OAUTH_CLIENT_ID_WHITELIST[0])
+ self.Post('/add_point', {'data': data_param})
+ self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME)
+
+ # Verify everything was added to the database correctly
+ rows = graph_data.Row.query().fetch(limit=_FETCH_LIMIT)
+ self.assertEqual(2, len(rows))
+
+ # Verify that an anomaly processing was called.
+ tests = graph_data.TestMetadata.query().fetch(limit=_FETCH_LIMIT)
+ mock_process_test.assert_called_once_with([tests[1].key])
+
@mock.patch.object(add_point_queue.find_anomalies, 'ProcessTestsAsync')
def testPost(self, mock_process_test):
"""Tests all basic functionality of a POST request."""
@@ -1536,13 +1576,13 @@ def testFlattenTrace_SanitizesTraceName(self):
'foo', 'bar', 'http://example.com', trace)
self.assertEqual(row['test'], 'foo/bar/http___example.com')
- def testFlattenTrace_FlattensInteractionRecordLabelToFivePartName(self):
- """Tests whether a TIR label will appear between chart and trace name."""
+ def testFlattenTrace_FlattensGroupingLabelToFivePartName(self):
+ """Tests whether a grouping label appears between chart and trace name."""
trace = self._SampleTrace()
trace.update({
'name': 'bar',
'page': 'https://abc.xyz/',
- 'tir_label': 'baz'
+ 'grouping_label': 'baz'
})
row = add_point._FlattenTrace('foo', 'baz@@bar', 'https://abc.xyz/', trace)
self.assertEqual(row['test'], 'foo/bar/baz/https___abc.xyz_')
diff --git a/dashboard/dashboard/common/histogram_helpers.py b/dashboard/dashboard/common/histogram_helpers.py
index 74cfc23ed5..75009dca16 100644
--- a/dashboard/dashboard/common/histogram_helpers.py
+++ b/dashboard/dashboard/common/histogram_helpers.py
@@ -102,7 +102,7 @@ def ComputeTestPath(hist):
is_summary = list(
hist.diagnostics.get(reserved_infos.SUMMARY_KEYS.name, []))
- tir_label = GetTIRLabelFromHistogram(hist)
+ grouping_label = GetGroupingLabelFromHistogram(hist)
is_ref = hist.diagnostics.get(reserved_infos.IS_REFERENCE_BUILD.name)
if is_ref and len(is_ref) == 1:
@@ -115,17 +115,18 @@ def ComputeTestPath(hist):
story_name = None
return ComputeTestPathFromComponents(
- hist.name, tir_label=tir_label, story_name=story_name,
+ hist.name, grouping_label=grouping_label, story_name=story_name,
is_summary=is_summary, is_ref=is_ref)
def ComputeTestPathFromComponents(
- hist_name, tir_label=None, story_name=None, is_summary=None, is_ref=False):
+ hist_name, grouping_label=None, story_name=None, is_summary=None,
+ is_ref=False):
path = hist_name
- if tir_label and (
+ if grouping_label and (
not is_summary or reserved_infos.STORY_TAGS.name in is_summary):
- path += '/' + tir_label
+ path += '/' + grouping_label
if story_name and not is_summary:
escaped_story_name = EscapeName(story_name)
@@ -138,7 +139,7 @@ def ComputeTestPathFromComponents(
return path
-def GetTIRLabelFromHistogram(hist):
+def GetGroupingLabelFromHistogram(hist):
tags = hist.diagnostics.get(reserved_infos.STORY_TAGS.name) or []
tags_to_use = [t.split(':') for t in tags if ':' in t]
diff --git a/dashboard/dashboard/common/histogram_helpers_test.py b/dashboard/dashboard/common/histogram_helpers_test.py
index 458d3b9bc1..b90afad450 100644
--- a/dashboard/dashboard/common/histogram_helpers_test.py
+++ b/dashboard/dashboard/common/histogram_helpers_test.py
@@ -19,19 +19,19 @@ class HistogramHelpersTest(testing_common.TestCase):
def setUp(self):
super(HistogramHelpersTest, self).setUp()
- def testGetTIRLabelFromHistogram_NoTags_ReturnsEmpty(self):
+ def testGetGroupingLabelFromHistogram_NoTags_ReturnsEmpty(self):
hist = histogram_module.Histogram('hist', 'count')
- self.assertEqual('', histogram_helpers.GetTIRLabelFromHistogram(hist))
+ self.assertEqual('', histogram_helpers.GetGroupingLabelFromHistogram(hist))
- def testGetTIRLabelFromHistogram_NoValidTags_ReturnsEmpty(self):
+ def testGetGroupingLabelFromHistogram_NoValidTags_ReturnsEmpty(self):
hist = histogram_module.Histogram('hist', 'count')
histograms = histogram_set.HistogramSet([hist])
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORY_TAGS.name,
generic_set.GenericSet(['foo', 'bar']))
- self.assertEqual('', histogram_helpers.GetTIRLabelFromHistogram(hist))
+ self.assertEqual('', histogram_helpers.GetGroupingLabelFromHistogram(hist))
- def testGetTIRLabelFromHistogram_ValidTags_SortsByKey(self):
+ def testGetGroupingLabelFromHistogram_ValidTags_SortsByKey(self):
hist = histogram_module.Histogram('hist', 'count')
histograms = histogram_set.HistogramSet([hist])
histograms.AddSharedDiagnosticToAllHistograms(
@@ -39,7 +39,8 @@ def testGetTIRLabelFromHistogram_ValidTags_SortsByKey(self):
generic_set.GenericSet(
['z:last', 'ignore', 'a:first', 'me', 'm:middle']))
self.assertEqual(
- 'first_middle_last', histogram_helpers.GetTIRLabelFromHistogram(hist))
+ 'first_middle_last',
+ histogram_helpers.GetGroupingLabelFromHistogram(hist))
def testComputeTestPathWithStory(self):
hist = histogram_module.Histogram('hist', 'count')
@@ -51,7 +52,7 @@ def testComputeTestPathWithStory(self):
test_path = histogram_helpers.ComputeTestPath(hist)
self.assertEqual('hist/http___story', test_path)
- def testComputeTestPathWithTIRLabel(self):
+ def testComputeTestPathWithGroupingLabel(self):
hist = histogram_module.Histogram('hist', 'count')
histograms = histogram_set.HistogramSet([hist])
histograms.AddSharedDiagnosticToAllHistograms(
diff --git a/dashboard/dashboard/common/utils.py b/dashboard/dashboard/common/utils.py
index 9b9eaa5d19..ba823ed1cf 100644
--- a/dashboard/dashboard/common/utils.py
+++ b/dashboard/dashboard/common/utils.py
@@ -36,7 +36,7 @@
OAUTH_SCOPES = (
'https://www.googleapis.com/auth/userinfo.email',
)
-OAUTH_ENDPOINTS = ['/api/', '/add_histograms']
+OAUTH_ENDPOINTS = ['/api/', '/add_histograms', '/add_point']
_AUTOROLL_DOMAINS = (
'chops-service-accounts.iam.gserviceaccount.com',
@@ -293,14 +293,14 @@ class ParseTelemetryMetricFailed(Exception):
def ParseTelemetryMetricParts(test_path):
- """Parses a test path and returns the tir_label, measurement, and story.
+ """Parses a test path and returns the grouping_label, measurement, and story.
Args:
test_path_parts: A test path.
Returns:
- A tuple of (tir_label, measurement, story), or None if this doesn't appear
- to be a telemetry test.
+ A tuple of (grouping_label, measurement, story), or None if this doesn't
+ appear to be a telemetry test.
"""
test_path_parts = test_path.split('/')
metric_parts = test_path_parts[3:]
@@ -312,7 +312,7 @@ def ParseTelemetryMetricParts(test_path):
if len(metric_parts) == 2:
return '', metric_parts[0], metric_parts[1]
- # 3 part structure, so there's a TIR label in there.
+ # 3 part structure, so there's a grouping label in there.
# ie. M/B/S/timeToFirstMeaningfulPaint_avg/load_tools/load_tools_weather
if len(metric_parts) == 3:
return metric_parts[1], metric_parts[0], metric_parts[2]
diff --git a/dashboard/dashboard/cron_update_sheriff.py b/dashboard/dashboard/cron_update_sheriff.py
new file mode 100644
index 0000000000..e4b159f03b
--- /dev/null
+++ b/dashboard/dashboard/cron_update_sheriff.py
@@ -0,0 +1,63 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+
+from google.appengine.ext import deferred
+from google.appengine.ext import ndb
+
+from dashboard.common import datastore_hooks
+from dashboard.common import request_handler
+from dashboard.models import anomaly_config
+from dashboard.models import graph_data
+from dashboard.models import sheriff as sheriff_module
+
+
+_TASK_QUEUE_NAME = 'deprecate-tests-queue'
+_TESTS_PER_QUERY = 100
+
+
+class CronUpdateSheriffHandler(request_handler.RequestHandler):
+ def get(self):
+ datastore_hooks.SetPrivilegedRequest()
+ _QueryTestsTask(start_cursor=None)
+
+ def post(self):
+ datastore_hooks.SetPrivilegedRequest()
+ _QueryTestsTask(start_cursor=None)
+
+
+
+@ndb.synctasklet
+def _QueryTestsTask(start_cursor=None, sheriffs=None, anomaly_configs=None):
+ if not sheriffs:
+ sheriffs = yield sheriff_module.Sheriff.query().fetch_async()
+
+ if not anomaly_configs:
+ anomaly_configs = yield anomaly_config.AnomalyConfig.query().fetch_async()
+
+ q = graph_data.TestMetadata.query()
+ q.filter(graph_data.TestMetadata.has_rows == True)
+ q.order(graph_data.TestMetadata.key)
+ keys, next_cursor, more = q.fetch_page(
+ _TESTS_PER_QUERY, start_cursor=start_cursor, keys_only=True)
+
+ if more:
+ deferred.defer(
+ _QueryTestsTask, start_cursor=next_cursor, _queue=_TASK_QUEUE_NAME)
+
+ yield [_DoTestUpdateSheriff(k, sheriffs, anomaly_configs) for k in keys]
+
+
+@ndb.tasklet
+def _DoTestUpdateSheriff(test_key, sheriffs, anomaly_configs):
+ test = yield test_key.get_async()
+
+ changed = yield test.UpdateSheriffAsync(
+ sheriffs=sheriffs, anomaly_configs=anomaly_configs)
+
+ if changed:
+ yield test.put_async()
diff --git a/dashboard/dashboard/cron_update_sheriff_test.py b/dashboard/dashboard/cron_update_sheriff_test.py
new file mode 100644
index 0000000000..bade0370a1
--- /dev/null
+++ b/dashboard/dashboard/cron_update_sheriff_test.py
@@ -0,0 +1,82 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+
+import webapp2
+import webtest
+
+from dashboard import cron_update_sheriff
+from dashboard.common import testing_common
+from dashboard.common import utils
+from dashboard.models import anomaly_config
+from dashboard.models import sheriff as sheriff_module
+
+
+_TESTS = [
+ ['ChromiumPerf'],
+ ['mac'],
+ {
+ 'SunSpider': {
+ 'Total': {
+ 't': {},
+ 't_ref': {},
+ },
+ },
+ 'OtherTest': {
+ 'OtherMetric': {
+ 'foo1': {},
+ 'foo2': {},
+ },
+ },
+ }
+]
+
+
+class CronSheriffUpdateTest(testing_common.TestCase):
+
+ def setUp(self):
+ super(CronSheriffUpdateTest, self).setUp()
+ app = webapp2.WSGIApplication([(
+ '/cron/update_sheriff', cron_update_sheriff.CronUpdateSheriffHandler)])
+ self.testapp = webtest.TestApp(app)
+ cron_update_sheriff._TESTS_PER_QUERY = 1
+
+ def testPost_UpdatesSheriff(self):
+ testing_common.AddTests(*_TESTS)
+
+ sheriff_module.Sheriff(
+ id='s1', email='a@chromium.org', patterns=[
+ '*/*/SunSpider/Total']).put()
+
+ t = utils.TestKey('ChromiumPerf/mac/SunSpider/Total').get()
+ self.assertIsNone(t.sheriff)
+ self.assertIsNone(t.overridden_anomaly_config)
+
+ self.testapp.post('/cron/update_sheriff')
+ self.ExecuteDeferredTasks(cron_update_sheriff._TASK_QUEUE_NAME)
+
+ t = utils.TestKey('ChromiumPerf/mac/SunSpider/Total').get()
+ self.assertIsNotNone(t.sheriff)
+ self.assertIsNone(t.overridden_anomaly_config)
+
+ def testPost_UpdatesAnomalyConfig(self):
+ testing_common.AddTests(*_TESTS)
+
+ anomaly_config.AnomalyConfig(
+ id='anomaly_config1', config='',
+ patterns=['ChromiumPerf/mac/SunSpider/Total']).put()
+
+ t = utils.TestKey('ChromiumPerf/mac/SunSpider/Total').get()
+ self.assertIsNone(t.sheriff)
+ self.assertIsNone(t.overridden_anomaly_config)
+
+ self.testapp.post('/cron/update_sheriff')
+ self.ExecuteDeferredTasks(cron_update_sheriff._TASK_QUEUE_NAME)
+
+ t = utils.TestKey('ChromiumPerf/mac/SunSpider/Total').get()
+ self.assertIsNone(t.sheriff)
+ self.assertIsNotNone(t.overridden_anomaly_config)
diff --git a/dashboard/dashboard/dispatcher.py b/dashboard/dashboard/dispatcher.py
index e0a632fb4b..782574c87e 100644
--- a/dashboard/dashboard/dispatcher.py
+++ b/dashboard/dashboard/dispatcher.py
@@ -19,6 +19,7 @@
from dashboard import bug_details
from dashboard import buildbucket_job_status
from dashboard import create_health_report
+from dashboard import cron_update_sheriff
from dashboard import debug_alert
from dashboard import delete_test_data
from dashboard import deprecate_tests
@@ -104,6 +105,7 @@
buildbucket_job_status.BuildbucketJobStatusHandler),
('/create_health_report', create_health_report.CreateHealthReportHandler),
('/configs/update', sheriff_config_poller.ConfigsUpdateHandler),
+ ('/cron/update_sheriff', cron_update_sheriff.CronUpdateSheriffHandler),
('/debug_alert', debug_alert.DebugAlertHandler),
('/delete_expired_entities',
layered_cache_delete_expired.LayeredCacheDeleteExpiredHandler),
diff --git a/dashboard/dashboard/models/graph_data.py b/dashboard/dashboard/models/graph_data.py
index 00c81ff57f..0f74ad9cf9 100644
--- a/dashboard/dashboard/models/graph_data.py
+++ b/dashboard/dashboard/models/graph_data.py
@@ -279,10 +279,11 @@ def __init__(self, *args, **kwargs):
@ndb.synctasklet
def UpdateSheriff(self):
- yield self.UpdateSheriffAsync()
+ r = yield self.UpdateSheriffAsync()
+ raise ndb.Return(r)
@ndb.tasklet
- def UpdateSheriffAsync(self):
+ def UpdateSheriffAsync(self, sheriffs=None, anomaly_configs=None):
"""This method is called before a TestMetadata is put into the datastore.
Here, we check the key to make sure it is valid and check the sheriffs and
@@ -297,7 +298,11 @@ def UpdateSheriffAsync(self):
# Set the sheriff to the first sheriff (alphabetically by sheriff name)
# that has a test pattern that matches this test.
- sheriffs = yield sheriff_module.Sheriff.query().fetch_async()
+ old_sheriff = self.sheriff
+ old_anomaly_config = self.overridden_anomaly_config
+
+ if not sheriffs:
+ sheriffs = yield sheriff_module.Sheriff.query().fetch_async()
self.sheriff = None
for sheriff_entity in sheriffs:
for pattern in sheriff_entity.patterns:
@@ -311,7 +316,8 @@ def UpdateSheriffAsync(self):
# that more specifically matches the test are given higher priority.
# ie. */*/*/foo is chosen over */*/*/*
self.overridden_anomaly_config = None
- anomaly_configs = yield anomaly_config.AnomalyConfig.query().fetch_async()
+ if not anomaly_configs:
+ anomaly_configs = yield anomaly_config.AnomalyConfig.query().fetch_async()
anomaly_data_list = []
for e in anomaly_configs:
for p in e.patterns:
@@ -321,6 +327,10 @@ def UpdateSheriffAsync(self):
if anomaly_config_to_use:
self.overridden_anomaly_config = anomaly_config_to_use.key
+ raise ndb.Return(
+ self.sheriff != old_sheriff or
+ self.overridden_anomaly_config != old_anomaly_config)
+
def CreateCallback(self):
"""Called when the entity is first saved."""
if len(self.key.id().split('/')) > 3:
diff --git a/dashboard/dashboard/pinpoint/elements/jobs-page/new-job-fab.html b/dashboard/dashboard/pinpoint/elements/jobs-page/new-job-fab.html
index 8a0d9eb568..d55ca7c6e5 100644
--- a/dashboard/dashboard/pinpoint/elements/jobs-page/new-job-fab.html
+++ b/dashboard/dashboard/pinpoint/elements/jobs-page/new-job-fab.html
@@ -42,7 +42,44 @@
}
paper-dialog {
- width: 30em;
+ width: 50em;
+ }
+
+ .divider {
+ width: 100%;
+ border-bottom: 1px dashed black;
+ line-height: 0.1em;
+ margin: 2em 0em;
+ text-align: center;
+ font-size: 1.5em;
+ font-weight: lighter;
+ }
+
+ .divider span {
+ background: white;
+ padding: 0 1em;
+ }
+
+ .horizontal {
+ display: inline-flex;
+ width: 100%;
+ }
+
+ .column {
+ width: 50%;
+ }
+
+ .rightPad {
+ margin-right: 0.25em;
+ }
+
+ #chartDropdown {
+ width: 75%;
+ margin-right: 0.5em;
+ }
+
+ #statisticDropdown {
+ width: 25%;
}
.error {
@@ -99,14 +136,18 @@
Run a try job
+
-
+
-
+
+ Benchmark Configuration
+
+
Try Job
@@ -114,17 +155,6 @@ Run a try job
-
-
-
-
-
-
-
-
-
-
-
Run a try job
-
-
-
-
+
+
+
-
-
-
-
+
-
-
-
+
+ Job Configuration
+
+
+
+
-
+
+
+
+
+
+
+
+
+
+
@@ -183,6 +267,8 @@ Run a try job
'gpu_perftests', 'load_library_perf_tests', 'media_perftests',
'net_perftests', 'tracing_perftests', 'vr_common_perftests'];
+ const _STATISTICS = ['avg', 'count', 'max', 'min', 'std', 'sum'];
+
Polymer({
is: 'new-job-fab',
@@ -194,6 +280,7 @@ Run a try job
value: false
},
bug_id: String,
+ chart: String,
commitsList: Object,
commitRangeLoading: {
type: Boolean,
@@ -211,6 +298,7 @@ Run a try job
},
extra_test_args: String,
patch: String,
+ statistic: String,
start_git_hash: {
type: String,
value: 'HEAD'
@@ -260,6 +348,10 @@ Run a try job
this.$.try_dialog.close();
},
+ isStoryOrTagsSet(story, storyTags) {
+ return story !== '' || storyTags !== '';
+ },
+
isBisectJob(comparisonMode) {
return comparisonMode == 'performance';
},
@@ -301,6 +393,12 @@ Run a try job
if (!this.$.storyTagsDropdown.disabled) {
this.$.storyTagsDropdown.tryReselectQuery();
}
+ if (!this.$.chartDropdown.disabled) {
+ this.$.chartDropdown.tryReselectQuery();
+ }
+ if (!this.$.statisticDropdown.disabled) {
+ this.$.statisticDropdown.tryReselectQuery();
+ }
},
jobChanged(job) {
@@ -317,6 +415,8 @@ Run a try job
this.$.benchmarkDropdown.query = job.arguments.benchmark;
this.$.storyCasesDropdown.query = job.arguments.story;
this.$.storyTagsDropdown.query = job.arguments.story_tags;
+ this.$.chartDropdown.query = job.arguments.chart;
+ this.$.statisticDropdown.query = job.arguments.statistic;
if (this.config) {
this.$.botDropdown.tryReselectQuery();
}
@@ -326,6 +426,8 @@ Run a try job
if (this.benchmarkConfig) {
this.$.storyCasesDropdown.tryReselectQuery();
this.$.storyTagsDropdown.tryReselectQuery();
+ this.$.chartDropdown.tryReselectQuery();
+ this.$.statisticDropdown.tryReselectQuery();
}
},
@@ -371,6 +473,29 @@ Run a try job
return vals;
},
+ measurements(benchmarkConfig) {
+ if (!benchmarkConfig) {
+ return [];
+ }
+
+ const hasStatName = function(m) {
+ for (const s of _STATISTICS) {
+ if (m.endsWith(s)) {
+ return true;
+ }
+ }
+ return false;
+ };
+
+ return this.arrayToAutocompleteList(
+ benchmarkConfig.measurements.filter(m => !hasStatName(m)));
+ },
+
+ statistics(benchmarkConfig) {
+ const s = [''];
+ return this.arrayToAutocompleteList(s.concat(_STATISTICS));
+ },
+
storyCases(benchmarkConfig) {
if (!benchmarkConfig) {
return [];
@@ -385,7 +510,7 @@ Run a try job
this.storyCases(benchmarkConfig).length > 0);
},
- storyTags(benchmarkConfig) {
+ storyTagsFromConfig(benchmarkConfig) {
if (!benchmarkConfig) {
return [];
}
@@ -397,7 +522,7 @@ Run a try job
validTags(benchmark, benchmarkConfig) {
return (
this.validBenchmark(benchmark) &&
- this.storyTags(benchmarkConfig).length > 0);
+ this.storyTagsFromConfig(benchmarkConfig).length > 0);
},
telemetryIsolate(configuration, benchmark) {
diff --git a/dashboard/dashboard/pinpoint/handlers/refresh_jobs.py b/dashboard/dashboard/pinpoint/handlers/refresh_jobs.py
index 1e8980edb0..a6ba8a1d6d 100644
--- a/dashboard/dashboard/pinpoint/handlers/refresh_jobs.py
+++ b/dashboard/dashboard/pinpoint/handlers/refresh_jobs.py
@@ -18,8 +18,8 @@
from dashboard.pinpoint.models import job as job_module
_JOB_CACHE_KEY = 'pinpoint_refresh_jobs_%s'
-_JOB_MAX_RETRIES = 3
-_JOB_FROZEN_THRESHOLD = datetime.timedelta(hours=6)
+_JOB_MAX_RETRIES = 5
+_JOB_FROZEN_THRESHOLD = datetime.timedelta(hours=1)
def _FindFrozenJobs():
diff --git a/dashboard/dashboard/pinpoint/models/change/change.py b/dashboard/dashboard/pinpoint/models/change/change.py
index be49e0e488..d005182604 100644
--- a/dashboard/dashboard/pinpoint/models/change/change.py
+++ b/dashboard/dashboard/pinpoint/models/change/change.py
@@ -124,7 +124,7 @@ def FromUrl(cls, url):
def FromDict(cls, data):
commits = tuple(commit_module.Commit.FromDict(commit)
for commit in data['commits'])
- if 'patch' in data:
+ if data.get('patch') is not None:
patch = patch_module.GerritPatch.FromDict(data['patch'])
else:
patch = None
@@ -250,7 +250,7 @@ def _FindMidpoints(commits_a, commits_b):
commit_midpoint = commit_module.Commit.Midpoint(commit_a, commit_b)
commits_midpoint.append(commit_midpoint)
- if commit_a == commit_midpoint != commit_b:
+ if commit_a == commit_midpoint and commit_midpoint != commit_b:
# Commits are adjacent.
# Add any DEPS changes to the commit lists.
deps_a = commit_a.Deps()
diff --git a/dashboard/dashboard/pinpoint/models/change/change_test.py b/dashboard/dashboard/pinpoint/models/change/change_test.py
index dafd708297..5e67f507fc 100644
--- a/dashboard/dashboard/pinpoint/models/change/change_test.py
+++ b/dashboard/dashboard/pinpoint/models/change/change_test.py
@@ -158,6 +158,23 @@ def testFromDictWithAllFields(self):
expected = Change(chromium=123, catapult=456, patch=True)
self.assertEqual(c, expected)
+ def testFromDictWithNonePatch(self):
+ self.get_change.return_value = {
+ 'id': 'repo~branch~id',
+ 'revisions': {'abc123': {}}
+ }
+
+ c = change.Change.FromDict({
+ 'commits': (
+ {'repository': 'chromium', 'git_hash': 'commit_123'},
+ {'repository': 'catapult', 'git_hash': 'commit_456'},
+ ),
+ 'patch': None,
+ })
+
+ expected = Change(chromium=123, catapult=456, patch=False)
+ self.assertEqual(c, expected)
+
class MidpointTest(test.TestCase):
diff --git a/dashboard/dashboard/pinpoint/models/change/commit.py b/dashboard/dashboard/pinpoint/models/change/commit.py
index 283d75a75b..aca059a944 100644
--- a/dashboard/dashboard/pinpoint/models/change/commit.py
+++ b/dashboard/dashboard/pinpoint/models/change/commit.py
@@ -54,6 +54,10 @@ def ParseDateWithUTCOffset(date_string):
class Commit(collections.namedtuple('Commit', ('repository', 'git_hash'))):
"""A git repository pinned to a particular commit."""
+ def __init__(self, *args, **kwargs):
+ super(Commit, self).__init__(*args, **kwargs)
+ self._repository_url = None
+
def __str__(self):
"""Returns an informal short string representation of this Commit."""
return self.repository + '@' + self.git_hash[:7]
@@ -66,7 +70,10 @@ def id_string(self):
@property
def repository_url(self):
"""The HTTPS URL of the repository as passed to `git clone`."""
- return repository_module.RepositoryUrl(self.repository)
+ cached_url = getattr(self, '_repository_url', None)
+ if not cached_url:
+ self._repository_url = repository_module.RepositoryUrl(self.repository)
+ return self._repository_url
def Deps(self):
"""Return the DEPS of this Commit.
@@ -162,7 +169,9 @@ def FromDep(cls, dep):
"""
repository = repository_module.RepositoryName(
dep.repository_url, add_if_missing=True)
- return cls(repository, dep.git_hash)
+ commit = cls(repository, dep.git_hash)
+ commit._repository_url = dep.repository_url
+ return commit
@classmethod
def FromData(cls, data):
@@ -218,12 +227,24 @@ def FromDict(cls, data):
try:
# If they send in something like HEAD, resolve to a hash.
repository_url = repository_module.RepositoryUrl(repository)
- result = gitiles_service.CommitInfo(repository_url, git_hash)
- git_hash = result['commit']
+
+ try:
+ # If it's already in the hash, then we've resolved this recently, and we
+ # don't go resolving the data from the gitiles service.
+ result = commit_cache.Get(git_hash)
+ except KeyError:
+ result = gitiles_service.CommitInfo(repository_url, git_hash)
+ git_hash = result['commit']
except gitiles_service.NotFoundError as e:
raise KeyError(str(e))
commit = cls(repository, git_hash)
+ commit._repository_url = repository_url
+
+ # IF this is something like HEAD, cache this for a short time so that we
+ # avoid hammering gitiles.
+ if not gitiles_service.IsHash(data['git_hash']):
+ commit.CacheCommitInfo(result, memcache_timeout=30*60)
return commit
@@ -259,7 +280,7 @@ def GetOrCacheCommitInfo(self):
self.repository_url, self.git_hash)
return self.CacheCommitInfo(commit_info)
- def CacheCommitInfo(self, commit_info):
+ def CacheCommitInfo(self, commit_info, memcache_timeout=None):
url = self.repository_url + '/+/' + commit_info['commit']
author = commit_info['author']['email']
@@ -268,7 +289,9 @@ def CacheCommitInfo(self, commit_info):
subject = commit_info['message'].split('\n', 1)[0]
message = commit_info['message']
- commit_cache.Put(self.id_string, url, author, created, subject, message)
+ commit_cache.Put(
+ self.id_string, url, author, created, subject, message,
+ memcache_timeout=memcache_timeout)
return {
'url': url,
diff --git a/dashboard/dashboard/pinpoint/models/change/commit_cache.py b/dashboard/dashboard/pinpoint/models/change/commit_cache.py
index 96ec6b0ff2..71909eda2e 100644
--- a/dashboard/dashboard/pinpoint/models/change/commit_cache.py
+++ b/dashboard/dashboard/pinpoint/models/change/commit_cache.py
@@ -8,8 +8,9 @@
from google.appengine.ext import ndb
+_MEMCACHE_TIMEOUT = 60 * 60 * 24 * 30
+
-@ndb.transactional(propagation=ndb.TransactionOptions.INDEPENDENT)
def Get(id_string):
"""Retrieve cached commit or patch details from the Datastore.
@@ -19,7 +20,7 @@ def Get(id_string):
Returns:
A dict with the fields {'url', 'author', created', 'subject', 'message'}.
"""
- entity = ndb.Key(Commit, id_string).get()
+ entity = ndb.Key(Commit, id_string).get(use_datastore=False)
if not entity:
raise KeyError('Commit or Patch not found in the Datastore:\n' + id_string)
@@ -32,8 +33,9 @@ def Get(id_string):
}
-@ndb.transactional(propagation=ndb.TransactionOptions.INDEPENDENT)
-def Put(id_string, url, author, created, subject, message):
+def Put(
+ id_string, url, author, created, subject, message,
+ memcache_timeout=_MEMCACHE_TIMEOUT):
"""Add commit or patch details to the Datastore cache.
Args:
@@ -44,14 +46,29 @@ def Put(id_string, url, author, created, subject, message):
subject: The title/subject line of the Commit or Patch.
message: The Commit message.
"""
- Commit(url=url, author=author, created=created,
- subject=subject, message=message, id=id_string).put()
+ if not memcache_timeout:
+ memcache_timeout = _MEMCACHE_TIMEOUT
+
+ Commit(
+ url=url,
+ author=author,
+ created=created,
+ subject=subject,
+ message=message,
+ id=id_string).put(use_datastore=False, memcache_timeout=memcache_timeout)
class Commit(ndb.Model):
+ # Never write/read from Datastore.
+ _use_datastore = False
+
+ # Rely on this model being cached only in memory or memcache.
_use_memcache = True
_use_cache = True
- _memcache_timeout = 60 * 60 * 24
+
+ # Cache the data in Memcache for up-to 30 days
+ _memcache_timeout = _MEMCACHE_TIMEOUT
+
url = ndb.StringProperty(indexed=False, required=True)
author = ndb.StringProperty(indexed=False, required=True)
created = ndb.DateTimeProperty(indexed=False, required=True)
diff --git a/dashboard/dashboard/pinpoint/models/change/commit_test.py b/dashboard/dashboard/pinpoint/models/change/commit_test.py
index 191d3bc6b1..45ac118f3d 100644
--- a/dashboard/dashboard/pinpoint/models/change/commit_test.py
+++ b/dashboard/dashboard/pinpoint/models/change/commit_test.py
@@ -216,6 +216,31 @@ def testFromDictResolvesHEAD(self):
expected = commit.Commit('chromium', 'git hash at HEAD')
self.assertEqual(c, expected)
+ @mock.patch.object(commit.commit_cache, 'Put')
+ def testFromDict_SkipsCache(self, mock_put):
+ git_hash = 'ABCDEABCDEABCDEABCDEABCDEABCDEABCDEABCDE'
+ c = commit.Commit.FromDict({
+ 'repository': test.CHROMIUM_URL,
+ 'git_hash': git_hash,
+ })
+ e = commit.Commit(repository='chromium', git_hash=git_hash)
+ self.assertEqual(c, e)
+ self.assertFalse(mock_put.called)
+
+ @mock.patch.object(commit.commit_cache, 'Put')
+ def testFromDict_Caches(self, mock_put):
+ c = commit.Commit.FromDict({
+ 'repository': test.CHROMIUM_URL,
+ 'git_hash': 'HEAD',
+ })
+
+ expected = commit.Commit('chromium', 'git hash at HEAD')
+ self.assertEqual(c, expected)
+
+ mock_put.assert_called_once_with(
+ expected.id_string, mock.ANY, mock.ANY, mock.ANY, mock.ANY, mock.ANY,
+ memcache_timeout=1800)
+
def testFromDictFailureFromUnknownRepo(self):
with self.assertRaises(KeyError):
commit.Commit.FromDict({
diff --git a/dashboard/dashboard/pinpoint/models/evaluators.py b/dashboard/dashboard/pinpoint/models/evaluators/__init__.py
similarity index 100%
rename from dashboard/dashboard/pinpoint/models/evaluators.py
rename to dashboard/dashboard/pinpoint/models/evaluators/__init__.py
diff --git a/dashboard/dashboard/pinpoint/models/job_state.py b/dashboard/dashboard/pinpoint/models/job_state.py
index f609e56add..01c04f4741 100644
--- a/dashboard/dashboard/pinpoint/models/job_state.py
+++ b/dashboard/dashboard/pinpoint/models/job_state.py
@@ -283,40 +283,47 @@ def _Compare(self, change_a, change_b):
executions_b = executions_by_quest_b[quest]
# Compare exceptions.
- values_a = tuple(bool(execution.exception) for execution in executions_a)
- values_b = tuple(bool(execution.exception) for execution in executions_b)
- if values_a and values_b:
+ exceptions_a = tuple(
+ bool(execution.exception) for execution in executions_a)
+ exceptions_b = tuple(
+ bool(execution.exception) for execution in executions_b)
+ if exceptions_a and exceptions_b:
if self._comparison_mode == FUNCTIONAL:
- if (hasattr(self, '_comparison_magnitude') and
- self._comparison_magnitude):
+ if getattr(self, '_comparison_magnitude', None):
comparison_magnitude = self._comparison_magnitude
else:
comparison_magnitude = 0.5
else:
comparison_magnitude = 1.0
- comparison = compare.Compare(values_a, values_b, attempt_count,
+ comparison = compare.Compare(exceptions_a, exceptions_b, attempt_count,
FUNCTIONAL, comparison_magnitude)
if comparison == compare.DIFFERENT:
return compare.DIFFERENT
elif comparison == compare.UNKNOWN:
any_unknowns = True
- # Compare result values.
- values_a = tuple(Mean(execution.result_values)
- for execution in executions_a if execution.result_values)
- values_b = tuple(Mean(execution.result_values)
- for execution in executions_b if execution.result_values)
- if values_a and values_b:
- if (hasattr(self, '_comparison_magnitude') and
- self._comparison_magnitude):
- max_iqr = max(math_utils.Iqr(values_a), math_utils.Iqr(values_b))
- if max_iqr:
- comparison_magnitude = abs(self._comparison_magnitude / max_iqr)
- else:
- comparison_magnitude = 1000.0 # Something very large.
+ # Compare result values by consolidating all measurments by change, and
+ # treating those as a single sample set for comparison.
+ def AllValues(execution):
+ for e in execution:
+ if not e.result_values:
+ continue
+ for v in e.result_values:
+ yield v
+
+ all_a_values = tuple(AllValues(executions_a))
+ all_b_values = tuple(AllValues(executions_b))
+ if all_a_values and all_b_values:
+ if getattr(self, '_comparison_magnitude', None):
+ max_iqr = max(
+ max(math_utils.Iqr(all_a_values), math_utils.Iqr(all_b_values)),
+ 0.001)
+ comparison_magnitude = abs(self._comparison_magnitude / max_iqr)
else:
comparison_magnitude = 1.0
- comparison = compare.Compare(values_a, values_b, attempt_count,
+
+ sample_count = (len(all_a_values) + len(all_b_values)) // 2
+ comparison = compare.Compare(all_a_values, all_b_values, sample_count,
PERFORMANCE, comparison_magnitude)
if comparison == compare.DIFFERENT:
return compare.DIFFERENT
diff --git a/dashboard/dashboard/pinpoint/models/quest/read_value.py b/dashboard/dashboard/pinpoint/models/quest/read_value.py
index 197a0bd689..4a89e1f329 100644
--- a/dashboard/dashboard/pinpoint/models/quest/read_value.py
+++ b/dashboard/dashboard/pinpoint/models/quest/read_value.py
@@ -25,10 +25,10 @@
class ReadHistogramsJsonValue(quest.Quest):
def __init__(self, results_filename, hist_name=None,
- tir_label=None, story=None, statistic=None):
+ grouping_label=None, story=None, statistic=None):
self._results_filename = results_filename
self._hist_name = hist_name
- self._tir_label = tir_label
+ self._grouping_label = grouping_label
self._story = story
self._statistic = statistic
@@ -36,7 +36,7 @@ def __eq__(self, other):
return (isinstance(other, type(self)) and
self._results_filename == other._results_filename and
self._hist_name == other._hist_name and
- self._tir_label == other._tir_label and
+ self._grouping_label == other._grouping_label and
self._story == other._story and
self._statistic == other._statistic)
@@ -51,7 +51,7 @@ def Start(self, change, isolate_server, isolate_hash):
del change
return _ReadHistogramsJsonValueExecution(
- self._results_filename, self._hist_name, self._tir_label,
+ self._results_filename, self._hist_name, self._grouping_label,
self._story, self._statistic, isolate_server, isolate_hash)
@classmethod
@@ -65,21 +65,24 @@ def FromDict(cls, arguments):
results_filename = posixpath.join(benchmark, 'perf_results.json')
chart = arguments.get('chart')
- tir_label = arguments.get('tir_label')
+ # TODO(crbug.com/974237): Only read from 'grouping_label' when enough time
+ # has passed and clients no longer write the 'tir_label' only.
+ grouping_label = (arguments.get('grouping_label') or
+ arguments.get('tir_label'))
trace = arguments.get('trace')
statistic = arguments.get('statistic')
- return cls(results_filename, chart, tir_label, trace, statistic)
+ return cls(results_filename, chart, grouping_label, trace, statistic)
class _ReadHistogramsJsonValueExecution(execution.Execution):
- def __init__(self, results_filename, hist_name, tir_label,
+ def __init__(self, results_filename, hist_name, grouping_label,
story, statistic, isolate_server, isolate_hash):
super(_ReadHistogramsJsonValueExecution, self).__init__()
self._results_filename = results_filename
self._hist_name = hist_name
- self._tir_label = tir_label
+ self._grouping_label = grouping_label
self._story = story
self._statistic = statistic
self._isolate_server = isolate_server
@@ -104,21 +107,24 @@ def _Poll(self):
self._trace_urls = FindTraceUrls(histograms)
test_path_to_match = histogram_helpers.ComputeTestPathFromComponents(
- self._hist_name, tir_label=self._tir_label, story_name=self._story)
+ self._hist_name, grouping_label=self._grouping_label,
+ story_name=self._story)
logging.debug('Test path to match: %s', test_path_to_match)
# Have to pull out either the raw sample values, or the statistic
result_values = ExtractValuesFromHistograms(test_path_to_match,
histograms_by_path,
self._hist_name,
- self._tir_label, self._story,
+ self._grouping_label,
+ self._story,
self._statistic)
self._Complete(result_values=tuple(result_values))
def ExtractValuesFromHistograms(test_path_to_match, histograms_by_path,
- histogram_name, tir_label, story, statistic):
+ histogram_name, grouping_label, story,
+ statistic):
result_values = []
matching_histograms = []
if test_path_to_match in histograms_by_path:
@@ -149,8 +155,8 @@ def ExtractValuesFromHistograms(test_path_to_match, histograms_by_path,
raise errors.ReadValueNoValues()
else:
conditions = {'histogram': histogram_name}
- if tir_label:
- conditions['tir_label'] = tir_label
+ if grouping_label:
+ conditions['grouping_label'] = grouping_label
if story:
conditions['story'] = story
reason = ', '.join(list(':'.join(i) for i in conditions.items()))
diff --git a/dashboard/dashboard/pinpoint/models/quest/read_value_test.py b/dashboard/dashboard/pinpoint/models/quest/read_value_test.py
index 9090a75c2f..888e9f12a0 100644
--- a/dashboard/dashboard/pinpoint/models/quest/read_value_test.py
+++ b/dashboard/dashboard/pinpoint/models/quest/read_value_test.py
@@ -39,7 +39,7 @@ def testMinimumArguments(self):
def testAllArguments(self):
arguments = dict(_BASE_ARGUMENTS_HISTOGRAMS)
arguments['chart'] = 'timeToFirst'
- arguments['tir_label'] = 'pcv1-cold'
+ arguments['grouping_label'] = 'pcv1-cold'
arguments['trace'] = 'trace_name'
arguments['statistic'] = 'avg'
quest = read_value.ReadHistogramsJsonValue.FromDict(arguments)
@@ -127,14 +127,14 @@ def testReadHistogramsJsonValue(self):
histograms = histogram_set.HistogramSet([hist])
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORY_TAGS.name,
- generic_set.GenericSet(['group:tir_label']))
+ generic_set.GenericSet(['group:label']))
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORIES.name,
generic_set.GenericSet(['story']))
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadHistogramsJsonValue(
- 'chartjson-output.json', hist.name, 'tir_label', 'story')
+ 'chartjson-output.json', hist.name, 'label', 'story')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
@@ -150,14 +150,14 @@ def testReadHistogramsJsonValueStoryNeedsEscape(self):
histograms = histogram_set.HistogramSet([hist])
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORY_TAGS.name,
- generic_set.GenericSet(['group:tir_label']))
+ generic_set.GenericSet(['group:label']))
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORIES.name,
generic_set.GenericSet(['http://story']))
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadHistogramsJsonValue(
- 'chartjson-output.json', hist.name, 'tir_label', 'http://story')
+ 'chartjson-output.json', hist.name, 'label', 'http://story')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
@@ -173,15 +173,14 @@ def testReadHistogramsJsonValueStatistic(self):
histograms = histogram_set.HistogramSet([hist])
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORY_TAGS.name,
- generic_set.GenericSet(['group:tir_label']))
+ generic_set.GenericSet(['group:label']))
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORIES.name,
generic_set.GenericSet(['story']))
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadHistogramsJsonValue(
- 'chartjson-output.json', hist.name,
- 'tir_label', 'story', statistic='avg')
+ 'chartjson-output.json', hist.name, 'label', 'story', statistic='avg')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
@@ -194,15 +193,14 @@ def testReadHistogramsJsonValueStatisticNoSamples(self):
histograms = histogram_set.HistogramSet([hist])
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORY_TAGS.name,
- generic_set.GenericSet(['group:tir_label']))
+ generic_set.GenericSet(['group:label']))
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORIES.name,
generic_set.GenericSet(['story']))
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadHistogramsJsonValue(
- 'chartjson-output.json', hist.name,
- 'tir_label', 'story', statistic='avg')
+ 'chartjson-output.json', hist.name, 'label', 'story', statistic='avg')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
@@ -224,14 +222,14 @@ def testReadHistogramsJsonValueMultipleHistograms(self):
histograms = histogram_set.HistogramSet([hist, hist2, hist3])
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORY_TAGS.name,
- generic_set.GenericSet(['group:tir_label']))
+ generic_set.GenericSet(['group:label']))
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORIES.name,
generic_set.GenericSet(['story']))
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadHistogramsJsonValue(
- 'chartjson-output.json', hist.name, 'tir_label', 'story')
+ 'chartjson-output.json', hist.name, 'label', 'story')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
@@ -324,7 +322,7 @@ def testReadHistogramsDiagnosticRefSkipTraceUrls(self):
execution.AsDict())
self.assertRetrievedOutputJson()
- def testReadHistogramsJsonValueWithNoTirLabel(self):
+ def testReadHistogramsJsonValueWithNoGroupingLabel(self):
hist = histogram_module.Histogram('hist', 'count')
hist.AddSample(0)
hist.AddSample(1)
@@ -332,12 +330,12 @@ def testReadHistogramsJsonValueWithNoTirLabel(self):
histograms = histogram_set.HistogramSet([hist])
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORY_TAGS.name,
- generic_set.GenericSet(['group:tir_label']))
+ generic_set.GenericSet(['group:label']))
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadHistogramsJsonValue(
- 'chartjson-output.json', hist_name=hist.name, tir_label='tir_label')
+ 'chartjson-output.json', hist_name=hist.name, grouping_label='label')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
@@ -366,7 +364,7 @@ def testReadHistogramsJsonValueWithNoStory(self):
self.assertEqual(execution.result_values, (0, 1, 2))
self.assertRetrievedOutputJson()
- def testReadHistogramsJsonValueSummaryTIRLabel(self):
+ def testReadHistogramsJsonValueSummaryGroupingLabel(self):
samples = []
hists = []
for i in range(10):
@@ -382,12 +380,13 @@ def testReadHistogramsJsonValueSummaryTIRLabel(self):
histograms = histogram_set.HistogramSet(hists)
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORY_TAGS.name,
- generic_set.GenericSet(['group:tir_label']))
+ generic_set.GenericSet(['group:label']))
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadHistogramsJsonValue(
- 'chartjson-output.json', hist_name=hists[0].name, tir_label='tir_label')
+ 'chartjson-output.json', hist_name=hists[0].name,
+ grouping_label='label')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
@@ -406,7 +405,7 @@ def testReadHistogramsJsonValueSummary(self):
hist.diagnostics[reserved_infos.STORIES.name] = (
generic_set.GenericSet(['story%d' % i]))
hist.diagnostics[reserved_infos.STORY_TAGS.name] = (
- generic_set.GenericSet(['group:tir_label1']))
+ generic_set.GenericSet(['group:label1']))
hists.append(hist)
samples.extend(hist.sample_values)
@@ -418,14 +417,14 @@ def testReadHistogramsJsonValueSummary(self):
hist.diagnostics[reserved_infos.STORIES.name] = (
generic_set.GenericSet(['another_story%d' % i]))
hist.diagnostics[reserved_infos.STORY_TAGS.name] = (
- generic_set.GenericSet(['group:tir_label2']))
+ generic_set.GenericSet(['group:label2']))
hists.append(hist)
samples.extend(hist.sample_values)
histograms = histogram_set.HistogramSet(hists)
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORY_TAGS.name,
- generic_set.GenericSet(['group:tir_label']))
+ generic_set.GenericSet(['group:label']))
self.SetOutputFileContents(histograms.AsDicts())
@@ -449,14 +448,14 @@ def testReadHistogramsJsonValueSummaryNoHistName(self):
hist.diagnostics[reserved_infos.STORIES.name] = (
generic_set.GenericSet(['story%d' % i]))
hist.diagnostics[reserved_infos.STORY_TAGS.name] = (
- generic_set.GenericSet(['group:tir_label1']))
+ generic_set.GenericSet(['group:label1']))
hists.append(hist)
samples.extend(hist.sample_values)
histograms = histogram_set.HistogramSet(hists)
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORY_TAGS.name,
- generic_set.GenericSet(['group:tir_label']))
+ generic_set.GenericSet(['group:label']))
self.SetOutputFileContents(histograms.AsDicts())
@@ -472,7 +471,7 @@ def testReadHistogramsJsonValueWithMissingFile(self):
self._retrieve.return_value = '{"files": {}}'
quest = read_value.ReadHistogramsJsonValue(
- 'chartjson-output.json', hist_name='metric', tir_label='test')
+ 'chartjson-output.json', hist_name='metric', grouping_label='test')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
@@ -482,7 +481,7 @@ def testReadHistogramsJsonValueEmptyHistogramSet(self):
self.SetOutputFileContents([])
quest = read_value.ReadHistogramsJsonValue(
- 'chartjson-output.json', hist_name='metric', tir_label='test')
+ 'chartjson-output.json', hist_name='metric', grouping_label='test')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
@@ -512,13 +511,13 @@ def testReadHistogramsJsonValueWithNoValues(self):
self.assertReadValueError(execution, 'ReadValueNotFound')
- def testReadHistogramsJsonValueTirLabelWithNoValues(self):
+ def testReadHistogramsJsonValueGroupingLabelWithNoValues(self):
hist = histogram_module.Histogram('hist', 'count')
histograms = histogram_set.HistogramSet([hist])
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadHistogramsJsonValue(
- 'chartjson-output.json', hist_name='chart', tir_label='tir_label')
+ 'chartjson-output.json', hist_name='chart', grouping_label='label')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
diff --git a/dashboard/dashboard/pinpoint/models/quest/run_telemetry_test.py b/dashboard/dashboard/pinpoint/models/quest/run_telemetry_test.py
index 6a53305d73..3b2240af55 100644
--- a/dashboard/dashboard/pinpoint/models/quest/run_telemetry_test.py
+++ b/dashboard/dashboard/pinpoint/models/quest/run_telemetry_test.py
@@ -20,11 +20,10 @@
def _StoryToRegex(story_name):
- # During import, some chars in story names got replaced by "_" so they
- # could be safely included in the test_path. At this point we don't know
- # what the original characters were. Additionally, some special characters
- # and argument quoting are not interpreted correctly, e.g. by bisect
- # scripts (crbug.com/662472). We thus keep only a small set of "safe chars"
+ # Telemetry's --story-filter argument takes in a regex, not a
+ # plain string. Stories can have all sorts of special characters
+ # in their names (see crbug.com/983993) which would confuse a
+ # regex. We thus keep only a small set of "safe chars"
# and replace all others with match-any-character regex dots.
return '^%s$' % _STORY_REGEX.sub('.', story_name)
@@ -36,8 +35,22 @@ def Start(self, change, isolate_server, isolate_hash):
# Telemetry parameter `--results-label ` to the runs.
extra_args = copy.copy(self._extra_args)
extra_args += ('--results-label', str(change))
- extra_swarming_tags = {'change': str(change)}
+ if '--story-filter' in extra_args:
+ # TODO(crbug.com/982027): The --run-full-story-set flag was added to
+ # Chromium at revision http://crrev.com/675459, on Jul 9th, 2019. If we
+ # use this flag for changes before then, then Telemetry will fail. We can
+ # move the following code to run without checking commit_position as part
+ # of the _ExtraTestArgs method once we no longer need to be able to run
+ # Pinpoint against changes that old.
+ commit_position = change.base_commit.AsDict()['commit_position']
+ if commit_position and commit_position >= 675459:
+ # Since benchmarks are run in abridged form by default, we need to
+ # add the argument --run-full-story-set to make sure that if someone
+ # chooses to run a specific story we will run it even if it is not
+ # in the abridged version of the story set.
+ extra_args.append('--run-full-story-set')
+ extra_swarming_tags = {'change': str(change)}
return self._Start(change, isolate_server, isolate_hash, extra_args,
extra_swarming_tags)
@@ -52,6 +65,16 @@ def _ExtraTestArgs(cls, arguments):
story = arguments.get('story')
if story:
+ # TODO(crbug.com/982027): Note that usage of "--run-full-story-set"
+ # and "--story-filter"
+ # may be replaced with --story= (no regex needed). Support
+ # for --story flag landed in
+ # https://chromium-review.googlesource.com/c/catapult/+/1869800
+ # (Oct 22, 2019)
+ # so we cannot turn this on by default until we no longer need to
+ # be able to run revisions older than that. In the meantime, the
+ # following argument plus the --run-full-story-set argument added in
+ # Start() accomplish the same thing.
extra_test_args += ('--story-filter', _StoryToRegex(story))
story_tags = arguments.get('story_tags')
diff --git a/dashboard/dashboard/pinpoint/models/quest/run_telemetry_test_test.py b/dashboard/dashboard/pinpoint/models/quest/run_telemetry_test_test.py
index d5f198c3da..ff2a4a71bc 100644
--- a/dashboard/dashboard/pinpoint/models/quest/run_telemetry_test_test.py
+++ b/dashboard/dashboard/pinpoint/models/quest/run_telemetry_test_test.py
@@ -7,7 +7,10 @@
from __future__ import absolute_import
import unittest
+import mock
+from dashboard.pinpoint.models.change import change as change_module
+from dashboard.pinpoint.models.change import commit
from dashboard.pinpoint.models.quest import run_performance_test
from dashboard.pinpoint.models.quest import run_telemetry_test
from dashboard.pinpoint.models.quest import run_test_test
@@ -52,15 +55,40 @@ def testSwarmingTags(self):
execution._swarming_tags, {'benchmark': 'speedometer',
'change': 'change', 'hasfilter': '0'})
- def testSwarmingTagsWithStoryFilter(self):
+ def testSwarmingTagsWithStoryFilter_BeforeR675459(self):
arguments = dict(_BASE_ARGUMENTS)
arguments['browser'] = 'android-webview'
arguments['story'] = 'sfilter'
quest = run_telemetry_test.RunTelemetryTest.FromDict(arguments)
- execution = quest.Start('change', 'https://isolate.server', 'isolate hash')
+ change = mock.MagicMock(spec=change_module.Change)
+ change.base_commit = mock.MagicMock(spec=commit.Commit)
+ change.base_commit.AsDict = mock.MagicMock(return_value={
+ 'commit_position': 675458})
+ with mock.patch('dashboard.pinpoint.models.quest.run_test.RunTest._Start',
+ wraps=quest._Start) as internal_start:
+ execution = quest.Start(change, 'https://isolate.server', 'isolate hash')
+ self.assertNotIn('--run-full-story-set', internal_start.call_args[0][3])
self.assertEqual(
execution._swarming_tags, {'benchmark': 'speedometer',
- 'change': 'change', 'hasfilter': '1',
+ 'change': str(change), 'hasfilter': '1',
+ 'storyfilter': 'sfilter'})
+
+ def testSwarmingTagsWithStoryFilter_AfterR675459(self):
+ arguments = dict(_BASE_ARGUMENTS)
+ arguments['browser'] = 'android-webview'
+ arguments['story'] = 'sfilter'
+ quest = run_telemetry_test.RunTelemetryTest.FromDict(arguments)
+ change = mock.MagicMock(spec=change_module.Change)
+ change.base_commit = mock.MagicMock(spec=commit.Commit)
+ change.base_commit.AsDict = mock.MagicMock(return_value={
+ 'commit_position': 675460})
+ with mock.patch('dashboard.pinpoint.models.quest.run_test.RunTest._Start',
+ wraps=quest._Start) as internal_start:
+ execution = quest.Start(change, 'https://isolate.server', 'isolate hash')
+ self.assertIn('--run-full-story-set', internal_start.call_args[0][3])
+ self.assertEqual(
+ execution._swarming_tags, {'benchmark': 'speedometer',
+ 'change': str(change), 'hasfilter': '1',
'storyfilter': 'sfilter'})
def testSwarmingTagsWithStoryTagFilter(self):
diff --git a/dashboard/dashboard/pinpoint/models/results2.py b/dashboard/dashboard/pinpoint/models/results2.py
index be834726af..fef050a61f 100644
--- a/dashboard/dashboard/pinpoint/models/results2.py
+++ b/dashboard/dashboard/pinpoint/models/results2.py
@@ -163,5 +163,5 @@ def _JsonFromExecution(execution):
else:
results_filename = 'chartjson-output.json'
- return read_value._RetrieveOutputJson(
+ return read_value.RetrieveOutputJson(
isolate_server, isolate_hash, results_filename)
diff --git a/dashboard/dashboard/pinpoint/models/tasks/performance_bisection.py b/dashboard/dashboard/pinpoint/models/tasks/performance_bisection.py
new file mode 100644
index 0000000000..3eed5f87a4
--- /dev/null
+++ b/dashboard/dashboard/pinpoint/models/tasks/performance_bisection.py
@@ -0,0 +1,514 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+
+import collections
+import itertools
+import logging
+import math
+
+from dashboard.common import math_utils
+from dashboard.pinpoint.models import change as change_module
+from dashboard.pinpoint.models import compare
+from dashboard.pinpoint.models import evaluators
+from dashboard.pinpoint.models import exploration
+from dashboard.pinpoint.models import task as task_module
+from dashboard.pinpoint.models.tasks import find_isolate
+from dashboard.pinpoint.models.tasks import read_value
+from dashboard.pinpoint.models.tasks import run_test
+from dashboard.services import gitiles_service
+
+_DEFAULT_SPECULATION_LEVELS = 2
+
+AnalysisOptions = collections.namedtuple('AnalysisOptions', (
+ 'comparison_magnitude',
+ 'min_attempts',
+ 'max_attempts',
+))
+
+BuildOptionTemplate = collections.namedtuple('BuildOptionTemplate',
+ ('builder', 'target', 'bucket'))
+
+TestOptionTemplate = collections.namedtuple(
+ 'TestOptionTemplate', ('swarming_server', 'dimensions', 'extra_args'))
+
+ReadOptionTemplate = collections.namedtuple(
+ 'ReadOptionTemplate',
+ ('benchmark', 'histogram_options', 'graph_json_options', 'mode'))
+
+TaskOptions = collections.namedtuple(
+ 'TaskOptions',
+ ('build_option_template', 'test_option_template', 'read_option_template',
+ 'analysis_options', 'start_change', 'end_change', 'pinned_change'))
+
+
+def _CreateReadTaskOptions(build_option_template, test_option_template,
+ read_option_template, analysis_options, change):
+ return read_value.TaskOptions(
+ test_options=run_test.TaskOptions(
+ build_options=find_isolate.TaskOptions(
+ change=change, **build_option_template._asdict()),
+ attempts=analysis_options.min_attempts,
+ **test_option_template._asdict()),
+ **read_option_template._asdict())
+
+
+def CreateGraph(options):
+ if not isinstance(options, TaskOptions):
+ raise ValueError(
+ 'options must be an instance of performance_bisection.TaskOptions')
+
+ start_change = options.start_change
+ end_change = options.end_change
+ if options.pinned_change:
+ start_change.Update(options.pinned_change)
+ end_change.Update(options.pinned_change)
+
+ start_change_read_options = _CreateReadTaskOptions(
+ options.build_option_template, options.test_option_template,
+ options.read_option_template, options.analysis_options, start_change)
+ end_change_read_options = _CreateReadTaskOptions(
+ options.build_option_template, options.test_option_template,
+ options.read_option_template, options.analysis_options, end_change)
+
+ # Given the start_change and end_change, we create two subgraphs that we
+ # depend on from the 'find_culprit' task. This means we'll need to create
+ # independent test options and build options from the template provided by the
+ # caller.
+ start_subgraph = read_value.CreateGraph(start_change_read_options)
+ end_subgraph = read_value.CreateGraph(end_change_read_options)
+
+ # Then we add a dependency from the 'FindCulprit' task with the payload
+ # describing the options set for the performance bisection.
+ find_culprit_task = task_module.TaskVertex(
+ id='performance_bisection',
+ vertex_type='find_culprit',
+ payload={
+ 'start_change':
+ options.start_change.AsDict(),
+ 'end_change':
+ options.end_change.AsDict(),
+ 'pinned_change':
+ options.pinned_change.AsDict() if options.pinned_change else None,
+ # We still persist the templates, because we'll need that data in case
+ # we are going to extend the graph with the same build/test templates
+ # in subgraphs.
+ 'analysis_options':
+ options.analysis_options._asdict(),
+ 'build_option_template':
+ options.build_option_template._asdict(),
+ 'test_option_template':
+ options.test_option_template._asdict(),
+ 'read_option_template': {
+ 'histogram_options':
+ options.read_option_template.histogram_options._asdict(),
+ 'graph_json_options':
+ options.read_option_template.graph_json_options._asdict(),
+ 'benchmark':
+ options.read_option_template.benchmark,
+ 'mode':
+ options.read_option_template.mode,
+ },
+ })
+ return task_module.TaskGraph(
+ vertices=list(
+ itertools.chain(start_subgraph.vertices, end_subgraph.vertices)) +
+ [find_culprit_task],
+ edges=list(itertools.chain(start_subgraph.edges, end_subgraph.edges)) + [
+ task_module.Dependency(from_=find_culprit_task.id, to=v.id)
+ for v in itertools.chain(start_subgraph.vertices,
+ end_subgraph.vertices)
+ if v.vertex_type == 'read_value'
+ ])
+
+
+class PrepareCommits(collections.namedtuple('PrepareCommits', ('job', 'task'))):
+ # Save memory and avoid unnecessarily adding more attributes to objects of
+ # this type.
+ __slots__ = ()
+
+ @task_module.LogStateTransitionFailures
+ def __call__(self, _):
+ start_change = change_module.Change.FromDict(
+ self.task.payload['start_change'])
+ end_change = change_module.Change.FromDict(self.task.payload['end_change'])
+ try:
+ # We're storing this once, so that we don't need to always get this when
+ # working with the individual commits. This reduces our reliance on
+ # datastore operations throughout the course of handling the culprit
+ # finding process.
+ #
+ # TODO(dberris): Expand the commits into the full table of dependencies?
+ # Because every commit in the chromium repository is likely to be building
+ # against different versions of the dependencies (v8, skia, etc.)
+ # we'd need to expand the concept of a changelist (CL, or Change in the
+ # Pinpoint codebase) so that we know which versions of the dependencies to
+ # use in specific CLs. Once we have this, we might be able to operate
+ # cleanly on just Change instances instead of just raw commits.
+ #
+ # TODO(dberris): Model the "merge-commit" like nature of auto-roll CLs by
+ # allowing the preparation action to model the non-linearity of the
+ # history. This means we'll need a concept of levels, where changes in a
+ # single repository history (the main one) operates at a higher level
+ # linearly, and if we're descending into rolls that we're exploring a
+ # lower level in the linear history. This is similar to the following
+ # diagram:
+ #
+ # main -> m0 -> m1 -> m2 -> roll0 -> m3 -> ...
+ # |
+ # dependency .............. +-> d0 -> d1
+ #
+ # Ideally we'll already have this expanded before we go ahead and perform
+ # a bisection, to amortise the cost of making requests to back-end
+ # services for this kind of information in tight loops.
+ commits = change_module.Commit.CommitRange(start_change.base_commit,
+ end_change.base_commit)
+ self.task.payload.update({
+ 'commits': [
+ collections.OrderedDict(
+ [('repository', start_change.base_commit.repository),
+ ('git_hash', start_change.base_commit.git_hash)])
+ ] + [
+ collections.OrderedDict(
+ [('repository', start_change.base_commit.repository),
+ ('git_hash', commit['commit'])])
+ for commit in reversed(commits)
+ ]
+ })
+ task_module.UpdateTask(
+ self.job,
+ self.task.id,
+ new_state='ongoing',
+ payload=self.task.payload)
+ except gitiles_service.NotFoundError as e:
+ # TODO(dberris): We need to be more resilient to intermittent failures
+ # from the Gitiles service here.
+ self.task.payload.update({
+ 'errors':
+ self.task.payload.get('errors', []) + [{
+ 'reason': 'GitilesFetchError',
+ 'message': e.message
+ }]
+ })
+ task_module.UpdateTask(
+ self.job, self.task.id, new_state='failed', payload=self.task.payload)
+
+ def __str__(self):
+ return 'PrepareCLs( job = %s, task = %s )' % (self.job.job_id, self.task.id)
+
+
+class RefineExplorationAction(
+ collections.namedtuple('RefineExplorationAction',
+ ('job', 'task', 'change', 'additional_attempts'))):
+ __slots__ = ()
+
+ def __str__(self):
+ return ('RefineExplorationAction(job = %s, task = %s, change = %s, +%s '
+ 'attempts)') % (self.job.job_id, self.task.id,
+ self.change.id_string, self.additional_attempts)
+
+ def __call__(self, accumulator):
+ # Outline:
+ # - Given the job and task, extend the TaskGraph to add new tasks and
+ # dependencies, being careful to filter the IDs from what we already see
+ # in the accumulator to avoid graph amendment errors.
+ # - If we do encounter graph amendment errors, we should log those and not
+ # block progress because that can only happen if there's concurrent
+ # updates being performed with the same actions.
+ build_option_template = BuildOptionTemplate(
+ **self.task.payload.get('build_option_template'))
+ test_option_template = TestOptionTemplate(
+ **self.task.payload.get('test_option_template'))
+
+ # The ReadOptionTemplate is special because it has nested structures, so
+ # we'll have to reconstitute those accordingly.
+ read_option_template_map = self.task.payload.get('read_option_template')
+ read_option_template = ReadOptionTemplate(
+ benchmark=self.task.payload.get('read_option_template').get(
+ 'benchmark'),
+ histogram_options=read_value.HistogramOptions(
+ **read_option_template_map.get('histogram_options')),
+ graph_json_options=read_value.GraphJsonOptions(
+ **read_option_template_map.get('graph_json_options')),
+ mode=read_option_template_map.get('mode'))
+
+ analysis_options_dict = self.task.payload.get('analysis_options')
+ if self.additional_attempts:
+ analysis_options_dict['min_attempts'] = min(
+ analysis_options_dict.get('min_attempts', 0) +
+ self.additional_attempts,
+ analysis_options_dict.get('max_attempts', 100))
+ analysis_options = AnalysisOptions(**analysis_options_dict)
+
+ new_subgraph = read_value.CreateGraph(
+ _CreateReadTaskOptions(build_option_template, test_option_template,
+ read_option_template, analysis_options,
+ self.change))
+ try:
+ task_module.ExtendTaskGraph(
+ self.job,
+ vertices=[
+ # Add all of the new vertices we do not have in the graph yet.
+ v for v in new_subgraph.vertices if v.id not in accumulator
+ ],
+ dependencies=[
+ # Only add dependencies to the new 'read_value' tasks.
+ task_module.Dependency(from_=self.task.id, to=v.id)
+ for v in new_subgraph.vertices
+ if v.id not in accumulator and v.vertex_type == 'read_value'
+ ])
+ except task_module.InvalidAmendment as e:
+ logging.error('Failed to amend graph: %s', e)
+
+
+class CompleteExplorationAction(
+ collections.namedtuple('CompleteExplorationAction',
+ ('job', 'task', 'state'))):
+ __slots__ = ()
+
+ def __str__(self):
+ return 'CompleteExplorationAction(job = %s, task = %s, state = %s)' % (
+ self.job.job_id, self.task.id, self.state)
+
+ @task_module.LogStateTransitionFailures
+ def __call__(self, accumulator):
+ # TODO(dberris): Maybe consider cancelling outstanding actions? Here we'll
+ # need a way of synthesising actions if we want to force the continuation of
+ # a task graph's evaluation.
+ task_module.UpdateTask(
+ self.job, self.task.id, new_state=self.state, payload=self.task.payload)
+
+
+class FindCulprit(collections.namedtuple('FindCulprit', ('job'))):
+ __slots__ = ()
+
+ def __call__(self, task, event, accumulator):
+ # Outline:
+ # - If the task is still pending, this means this is the first time we're
+ # encountering the task in an evaluation. Set up the payload data to
+ # include the full range of commits, so that we load it once and have it
+ # ready, and emit an action to mark the task ongoing.
+ #
+ # - If the task is ongoing, gather all the dependency data (both results
+ # and status) and see whether we have enough data to determine the next
+ # action. We have three main cases:
+ #
+ # 1. We cannot detect a significant difference between the results from
+ # two different CLs. We call this the NoReproduction case.
+ #
+ # 2. We do not have enough confidence that there's a difference. We call
+ # this the Indeterminate case.
+ #
+ # 3. We have enough confidence that there's a difference between any two
+ # ordered changes. We call this the SignificantChange case.
+ #
+ # - Delegate the implementation to handle the independent cases for each
+ # change point we find in the CL continuum.
+ if task.status == 'pending':
+ return [PrepareCommits(self.job, task)]
+
+ if task.status == 'ongoing':
+ # TODO(dberris): Validate and fail gracefully instead of asserting?
+ assert 'commits' in task.payload, ('Programming error, need commits to '
+ 'proceed!')
+
+ # Collect all the dependency task data and analyse the results.
+ # Group them by change.
+ # Order them by appearance in the CL range.
+ # Also count the status per CL (failed, ongoing, etc.)
+ deps = set(task.dependencies)
+ results_by_change = collections.defaultdict(list)
+ status_by_change = collections.defaultdict(dict)
+ changes_with_data = set()
+ changes_by_status = collections.defaultdict(set)
+
+ # TODO(dberris): Determine a better way of creating these Change objects
+ # which doesn't involve these .FromDict(...) calls which might force calls
+ # to back-end services.
+ associated_results = [(change_module.Change.FromDict(t.get('change')),
+ t.get('status'), t.get('result_values'))
+ for dep, t in accumulator.items()
+ if dep in deps]
+ for change, status, result_values in associated_results:
+ if result_values:
+ filtered_results = [r for r in result_values if r is not None]
+ if filtered_results:
+ results_by_change[change].append(filtered_results)
+ status_by_change[change].update({
+ status: status_by_change[change].get(status, 0) + 1,
+ })
+ changes_by_status[status].add(change)
+ if status not in {'ongoing', 'pending'}:
+ changes_with_data.add(change)
+
+ # If the dependencies have converged into a single status, we can make
+ # decisions on the terminal state of the bisection.
+ if len(changes_by_status) == 1 and changes_with_data:
+
+ # Check whether all dependencies are completed and if we do
+ # not have data in any of the dependencies.
+ if changes_by_status['completed'] == changes_with_data:
+ changes_with_empty_results = [
+ change for change in changes_with_data
+ if not results_by_change[change]
+ ]
+ if changes_with_empty_results:
+ task.payload.update({
+ 'errors':
+ task.payload.get('errors', []) + [{
+ 'reason':
+ 'BisectionFailed',
+ 'message': ('We did not find any results from '
+ 'successful test runs.')
+ }]
+ })
+ return [CompleteExplorationAction(self.job, task, 'failed')]
+
+ # Check whether all the dependencies had the tests fail consistently.
+ if changes_by_status['failed'] == changes_with_data:
+ task.payload.update({
+ 'errors':
+ task.payload.get('errors', []) + [{
+ 'reason': 'BisectionFailed',
+ 'message': 'All attempts in all dependencies failed.'
+ }]
+ })
+ return [CompleteExplorationAction(self.job, task, 'failed')]
+
+ # We want to reduce the list of ordered changes to only the ones that have
+ # data available.
+ all_changes = [
+ change_module.Change(
+ commits=[
+ change_module.Commit(
+ repository=commit.get('repository'),
+ git_hash=commit.get('git_hash'))
+ ],
+ patch=task.payload.get('pinned_change'))
+ for commit in task.payload.get('commits', [])
+ ]
+ change_index = {change: index for index, change in enumerate(all_changes)}
+ ordered_changes = [c for c in all_changes if c in changes_with_data]
+
+ if len(ordered_changes) < 2:
+ # We do not have enough data yet to determine whether we should do
+ # anything.
+ return None
+
+ # From here we can then do the analysis on a pairwise basis, as we're
+ # going through the list of Change instances we have data for.
+ # NOTE: A lot of this algorithm is already in pinpoint/models/job_state.py
+ # which we're adapting.
+ def Compare(a, b):
+ # This is the comparison function which determines whether the samples
+ # we have from the two changes (a and b) are statistically significant.
+ if 'pending' in status_by_change[a] or 'pending' in status_by_change[b]:
+ return compare.PENDING
+
+ # NOTE: Here we're attempting to scale the provided comparison magnitude
+ # threshold by using the central tendencies (means) of the resulting
+ # values from individual test attempt results, and scaling those by the
+ # larger inter-quartile range (a measure of dispersion, simply computed
+ # as the 75th percentile minus the 25th percentile). The reason we're
+ # doing this is so that we can scale the tolerance according to the
+ # noise inherent in the measurements -- i.e. more noisy measurements
+ # will require a larger difference for us to consider statistically
+ # significant.
+ #
+ # NOTE: We've changed this computation to consider the consolidated
+ # measurements for a single change, instead of looking at the means,
+ # since we cannot assume that the means can be relied on as a good
+ # measure of central tendency for small sample sizes.
+ values_for_a = tuple(itertools.chain(*results_by_change[a]))
+ values_for_b = tuple(itertools.chain(*results_by_change[b]))
+ max_iqr = max(
+ math_utils.Iqr(values_for_a), math_utils.Iqr(values_for_b), 0.001)
+ comparison_magnitude = task.payload.get('comparison_magnitude',
+ 1.0) / max_iqr
+ attempts = (len(values_for_a) + len(values_for_b)) // 2
+ return compare.Compare(values_for_a, values_for_b, attempts,
+ 'performance', comparison_magnitude)
+
+ def DetectChange(change_a, change_b):
+ # We return None if the comparison determines that the result is
+ # inconclusive. This is required by the exploration.Speculate contract.
+ comparison = Compare(change_a, change_b)
+ if comparison == compare.UNKNOWN:
+ return None
+ return comparison == compare.DIFFERENT
+
+ changes_to_refine = []
+
+ def CollectChangesToRefine(a, b):
+ # Here we're collecting changes that need refinement, which happens when
+ # two changes when compared yield the "unknown" result.
+ attempts_for_a = sum(status_by_change[a].values())
+ attempts_for_b = sum(status_by_change[b].values())
+ if min(attempts_for_a, attempts_for_b) == task.payload.get(
+ 'analysis_options').get('max_attempts'):
+ return None
+
+ # Grow the attempts by 50% every time when increasing attempt counts.
+ # This number is arbitrary, and we should probably use something like a
+ # Fibonacci sequence when scaling attempt counts.
+ additional_attempts = int(
+ math.floor(min(attempts_for_a, attempts_for_b) * 0.5))
+ changes_to_refine.append(
+ (a if attempts_for_a <= attempts_for_b else b, additional_attempts))
+ return None
+
+ def FindMidpoint(a, b):
+ # Here we use the (very simple) midpoint finding algorithm given that we
+ # already have the full range of commits to bisect through.
+ a_index = change_index[a]
+ b_index = change_index[b]
+ subrange = all_changes[a_index:b_index + 1]
+ return None if len(subrange) <= 2 else subrange[len(subrange) // 2]
+
+ additional_changes = exploration.Speculate(
+ ordered_changes,
+ change_detected=DetectChange,
+ on_unknown=CollectChangesToRefine,
+ midpoint=FindMidpoint,
+ levels=_DEFAULT_SPECULATION_LEVELS)
+
+ # At this point we can collect the actions to extend the task graph based
+ # on the results of the speculation.
+ actions = [
+ RefineExplorationAction(self.job, task, change, more_attempts)
+ for change, more_attempts in itertools.chain(
+ [(c, 0) for _, c in additional_changes],
+ [(c, a) for c, a in changes_to_refine],
+ )
+ ]
+
+ # Here we collect the points where we've found the changes.
+ def Pairwise(iterable):
+ """s -> (s0, s1), (s1, s2), (s2, s3), ..."""
+ a, b = itertools.tee(iterable)
+ next(b, None)
+ return itertools.izip(a, b)
+
+ task.payload.update({
+ 'culprits': [(a.AsDict(), b.AsDict())
+ for a, b in Pairwise(ordered_changes)
+ if DetectChange(a, b)]
+ })
+ if not actions:
+ # Mark this operation complete, storing the differences we can compute.
+ actions = [CompleteExplorationAction(self.job, task, 'completed')]
+ return actions
+
+
+class Evaluator(evaluators.FilteringEvaluator):
+
+ def __init__(self, job):
+ super(Evaluator, self).__init__(
+ predicate=evaluators.All(
+ evaluators.TaskTypeEq('find_culprit'),
+ evaluators.Not(evaluators.TaskStatusIn({'completed', 'failed'}))),
+ delegate=FindCulprit(job))
diff --git a/dashboard/dashboard/pinpoint/models/tasks/performance_bisection_test.py b/dashboard/dashboard/pinpoint/models/tasks/performance_bisection_test.py
new file mode 100644
index 0000000000..b2a11ac3fc
--- /dev/null
+++ b/dashboard/dashboard/pinpoint/models/tasks/performance_bisection_test.py
@@ -0,0 +1,330 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+
+import collections
+import logging
+
+from dashboard.pinpoint import test
+from dashboard.pinpoint.models import change as change_module
+from dashboard.pinpoint.models import job as job_module
+from dashboard.pinpoint.models import task as task_module
+from dashboard.pinpoint.models import evaluators
+from dashboard.pinpoint.models import event as event_module
+from dashboard.pinpoint.models.tasks import performance_bisection
+from dashboard.pinpoint.models.tasks import read_value
+
+
+def SelectEvent():
+ return event_module.Event(type='select', target_task=None, payload={})
+
+
+class EvaluatorTest(test.TestCase):
+
+ def setUp(self):
+ super(EvaluatorTest, self).setUp()
+ self.maxDiff = None
+ self.job = job_module.Job.New((), ())
+
+ def PopulateSimpleBisectionGraph(self):
+ """Helper function to populate a task graph representing a bisection.
+
+ This function will populate the following graph on the associated job
+ initialised in the setUp function:
+
+ find_culprit
+ | |
+ | +--> read_value(start_cl, [0..min_attempts])
+ | |
+ | +--> run_test(start_cl, [0..min_attempts])
+ | |
+ | +--> find_isolate(start_cl)
+ |
+ +--> read_value(end_cl, [0..min_attempts])
+ |
+ +--> run_test(end_cl, [0..min_attempts])
+ |
+ +--> find_isolate(end_cl)
+
+
+ This is the starting point for all bisections on which we expect the
+ evaluator implementation will be operating with. In this specific case,
+ we're setting min_attempts at 10 and max_attempts at 100, then using the
+ special `commit_0` and `commit_5` git hashes as the range to bisect over.
+ The test base class sets up special meanings for these pseudo-hashes and all
+ infrastructure related to expanding that range.
+ """
+
+ task_module.PopulateTaskGraph(
+ self.job,
+ performance_bisection.CreateGraph(
+ performance_bisection.TaskOptions(
+ build_option_template=performance_bisection.BuildOptionTemplate(
+ builder='Some Builder',
+ target='performance_telemetry_test',
+ bucket='luci.bucket'),
+ test_option_template=performance_bisection.TestOptionTemplate(
+ swarming_server='some_server',
+ dimensions=[],
+ extra_args=[],
+ ),
+ read_option_template=performance_bisection.ReadOptionTemplate(
+ benchmark='some_benchmark',
+ histogram_options=read_value.HistogramOptions(
+ grouping_label='some_label',
+ story='some_story',
+ statistic='avg',
+ ),
+ graph_json_options=read_value.GraphJsonOptions(
+ chart='some_chart',
+ trace='some_trace',
+ ),
+ mode='histogram_sets'),
+ analysis_options=performance_bisection.AnalysisOptions(
+ comparison_magnitude=1.0,
+ min_attempts=10,
+ max_attempts=100,
+ ),
+ start_change=change_module.Change.FromDict({
+ 'commits': [{
+ 'repository': 'chromium',
+ 'git_hash': 'commit_0'
+ }]
+ }),
+ end_change=change_module.Change.FromDict({
+ 'commits': [{
+ 'repository': 'chromium',
+ 'git_hash': 'commit_5'
+ }]
+ }),
+ pinned_change=None,
+ )))
+
+ def CompoundEvaluatorForTesting(self, fake_evaluator):
+ return evaluators.SequenceEvaluator([
+ evaluators.FilteringEvaluator(
+ predicate=evaluators.All(
+ evaluators.TaskTypeEq('read_value'),
+ evaluators.TaskStatusIn({'pending'})),
+ delegate=evaluators.SequenceEvaluator(
+ [fake_evaluator,
+ evaluators.TaskPayloadLiftingEvaluator()])),
+ evaluators.SequenceEvaluator([
+ performance_bisection.Evaluator(self.job),
+ evaluators.TaskPayloadLiftingEvaluator(exclude_keys={'commits'})
+ ]),
+ ])
+
+ def testPopulateWorks(self):
+ self.PopulateSimpleBisectionGraph()
+
+ def testEvaluateSuccess_NoReproduction(self):
+ self.PopulateSimpleBisectionGraph()
+ task_module.Evaluate(
+ self.job,
+ event_module.Event(type='initiate', target_task=None, payload={}),
+ self.CompoundEvaluatorForTesting(
+ FakeReadValueSameResult(self.job, 1.0)))
+ evaluate_result = task_module.Evaluate(
+ self.job,
+ event_module.Event(type='select', target_task=None, payload={}),
+ evaluators.Selector(task_type='find_culprit'))
+ self.assertIn('performance_bisection', evaluate_result)
+ logging.info('Results: %s', evaluate_result['performance_bisection'])
+ self.assertEquals(evaluate_result['performance_bisection']['culprits'], [])
+
+ def testEvaluateSuccess_SpeculateBisection(self):
+ self.PopulateSimpleBisectionGraph()
+ task_module.Evaluate(
+ self.job,
+ event_module.Event(type='initiate', target_task=None, payload={}),
+ self.CompoundEvaluatorForTesting(
+ FakeReadValueMapResult(
+ self.job, {
+ change_module.Change.FromDict({
+ 'commits': [{
+ 'repository': 'chromium',
+ 'git_hash': commit
+ }]
+ }): values for commit, values in (
+ ('commit_0', [1.0] * 10),
+ ('commit_1', [1.0] * 10),
+ ('commit_2', [2.0] * 10),
+ ('commit_3', [2.0] * 10),
+ ('commit_4', [2.0] * 10),
+ ('commit_5', [2.0] * 10),
+ )
+ })))
+ evaluate_result = task_module.Evaluate(
+ self.job, SelectEvent(), evaluators.Selector(task_type='find_culprit'))
+ self.assertIn('performance_bisection', evaluate_result)
+ logging.info('Results: %s', evaluate_result['performance_bisection'])
+
+ # Here we're testing that we can find the change between commit_1 and
+ # commit_2 in the values we seed above.
+ self.assertEquals(evaluate_result['performance_bisection']['culprits'], [[
+ change_module.Change.FromDict({
+ 'commits': [{
+ 'repository': 'chromium',
+ 'git_hash': 'commit_1'
+ }]
+ }).AsDict(),
+ change_module.Change.FromDict({
+ 'commits': [{
+ 'repository': 'chromium',
+ 'git_hash': 'commit_2'
+ }]
+ }).AsDict()
+ ]])
+
+ def testEvaluateSuccess_NeedToRefineAttempts(self):
+ self.PopulateSimpleBisectionGraph()
+ task_module.Evaluate(
+ self.job,
+ event_module.Event(type='initiate', target_task=None, payload={}),
+ self.CompoundEvaluatorForTesting(
+ FakeReadValueMapResult(
+ self.job, {
+ change_module.Change.FromDict({
+ 'commits': [{
+ 'repository': 'chromium',
+ 'git_hash': commit
+ }]
+ }): values for commit, values in (
+ ('commit_0', range(10)),
+ ('commit_1', range(10)),
+ ('commit_2', range(4, 14)),
+ ('commit_3', range(3, 13)),
+ ('commit_4', range(3, 13)),
+ ('commit_5', range(3, 13)),
+ )
+ })))
+
+ # Here we test that we have more than the minimum attempts for the change
+ # between commit_1 and commit_2.
+ evaluate_result = task_module.Evaluate(
+ self.job, SelectEvent(), evaluators.Selector(task_type='read_value'))
+ attempt_counts = {}
+ for payload in evaluate_result.values():
+ change = change_module.Change.FromDict(payload.get('change'))
+ attempt_counts[change] = attempt_counts.get(change, 0) + 1
+ self.assertGreater(
+ attempt_counts[change_module.Change.FromDict(
+ {'commits': [{
+ 'repository': 'chromium',
+ 'git_hash': 'commit_2',
+ }]})], 10)
+ self.assertLess(
+ attempt_counts[change_module.Change.FromDict(
+ {'commits': [{
+ 'repository': 'chromium',
+ 'git_hash': 'commit_2',
+ }]})], 100)
+
+ # We know that we will never get a deterministic answer, so we ensure that
+ # we don't inadvertently blame the wrong changes at the end of the
+ # refinement.
+ evaluate_result = task_module.Evaluate(
+ self.job, SelectEvent(), evaluators.Selector(task_type='find_culprit'))
+ self.assertIn('performance_bisection', evaluate_result)
+ logging.info('Results: %s', evaluate_result['performance_bisection'])
+ self.assertEquals(evaluate_result['performance_bisection']['culprits'], [])
+
+ def testEvaluateFailure_DependenciesFailed(self):
+ self.PopulateSimpleBisectionGraph()
+ task_module.Evaluate(
+ self.job,
+ event_module.Event(type='initiate', target_task=None, payload={}),
+ self.CompoundEvaluatorForTesting(FakeReadValueFails(self.job)))
+ evaluate_result = task_module.Evaluate(
+ self.job, SelectEvent(), evaluators.Selector(task_type='find_culprit'))
+ self.assertIn('performance_bisection', evaluate_result)
+ self.assertEqual(evaluate_result['performance_bisection']['status'],
+ 'failed')
+ self.assertNotEqual([], evaluate_result['performance_bisection']['errors'])
+
+ def testEvaluateFailure_DependenciesNoResults(self):
+ self.PopulateSimpleBisectionGraph()
+ task_module.Evaluate(
+ self.job,
+ event_module.Event(type='initiate', target_task=None, payload={}),
+ self.CompoundEvaluatorForTesting(
+ FakeReadValueSameResult(self.job, None)))
+ evaluate_result = task_module.Evaluate(
+ self.job, SelectEvent(), evaluators.Selector(task_type='find_culprit'))
+ self.assertIn('performance_bisection', evaluate_result)
+ self.assertEqual(evaluate_result['performance_bisection']['status'],
+ 'failed')
+ self.assertNotEqual([], evaluate_result['performance_bisection']['errors'])
+
+ def testEvaluateAmbiguous_IntermediatePartialFailure(self):
+ self.skipTest(
+ 'Implement the case where intermediary builds/tests failed but we can '
+ 'find some non-failing intermediary CLs')
+
+ def testEvaluateAmbiguous_IntermediateCulpritIsAutoRoll(self):
+ self.skipTest(
+ 'Implement the case where the likely culprit is an auto-roll commit, '
+ 'in which case we want to embellish the commit range with commits '
+ 'from the remote repositories')
+
+ def testEvaluateAmbiguous_IntermediateCulpritFound_CancelOngoing(self):
+ self.skipTest(
+ 'Implement the case where we have already found a culprit and we still '
+ 'have ongoing builds/tests running but have the chance to cancel '
+ 'those.')
+
+ def testEvaluateFailure_ExtentClsFailed(self):
+ self.skipTest(
+ 'Implement the case where either the start or end commits are broken.')
+
+
+class FakeReadValueSameResult(
+ collections.namedtuple('FakeReadValueSameResult', (
+ 'job',
+ 'result',
+ ))):
+ __slots__ = ()
+
+ def __call__(self, task, *_):
+ task.payload.update({'result_values': [self.result]})
+ return [
+ lambda _: task_module.UpdateTask(
+ self.job, task.id, new_state='completed', payload=task.payload)
+ ]
+
+
+class FakeReadValueFails(collections.namedtuple('FakeReadValueFails', ('job'))):
+ __slots__ = ()
+
+ def __call__(self, task, *_):
+ task.payload.update({
+ 'errors': [{
+ 'reason': 'SomeReason',
+ 'message': 'This is a message explaining things.',
+ }]
+ })
+ return [
+ lambda _: task_module.UpdateTask(
+ self.job, task.id, new_state='failed', payload=task.payload)
+ ]
+
+
+class FakeReadValueMapResult(
+ collections.namedtuple('FakeReadValueMapResult', ('job', 'value_map'))):
+ __slots__ = ()
+
+ def __call__(self, task, *_):
+ task.payload.update({
+ 'result_values':
+ self.value_map[change_module.Change.FromDict(
+ task.payload.get('change'))]
+ })
+ return [
+ lambda _: task_module.UpdateTask(
+ self.job, task.id, new_state='completed', payload=task.payload)
+ ]
diff --git a/dashboard/dashboard/pinpoint/models/tasks/read_value.py b/dashboard/dashboard/pinpoint/models/tasks/read_value.py
index c0d535f66f..13a1b70602 100644
--- a/dashboard/dashboard/pinpoint/models/tasks/read_value.py
+++ b/dashboard/dashboard/pinpoint/models/tasks/read_value.py
@@ -20,13 +20,14 @@
from dashboard.pinpoint.models.tasks import run_test
from tracing.value import histogram_set
-HistogramOptions = collections.namedtuple('HistogramOptions',
- ('tir_label', 'story', 'statistic'))
+HistogramOptions = collections.namedtuple(
+ 'HistogramOptions', ('grouping_label', 'story', 'statistic'))
-GraphJsonOptions = collections.namedtuple('GraphJsonOptions', ('trace'))
+GraphJsonOptions = collections.namedtuple('GraphJsonOptions',
+ ('chart', 'trace'))
TaskOptions = collections.namedtuple(
- 'TaskOptions', ('test_options', 'benchmark', 'chart', 'histogram_options',
+ 'TaskOptions', ('test_options', 'benchmark', 'histogram_options',
'graph_json_options', 'mode'))
@@ -98,19 +99,20 @@ def __call__(self, task, _, accumulator):
def HandleHistogramSets(self, task, histogram_dicts):
histogram_name = task.payload.get('benchmark')
- tir_label = task.payload.get('histogram_options', {}).get('tir_label', '')
- story = task.payload.get('histogram_options', {}).get('story', '')
- statistic = task.payload.get('histogram_options', {}).get('statistic', '')
+ histogram_options = task.payload.get('histogram_options', {})
+ grouping_label = histogram_options.get('grouping_label', '')
+ story = histogram_options.get('story', '')
+ statistic = histogram_options.get('statistic', '')
histograms = histogram_set.HistogramSet()
histograms.ImportDicts(histogram_dicts)
histograms_by_path = read_value_quest.CreateHistogramSetByTestPathDict(
histograms)
trace_urls = read_value_quest.FindTraceUrls(histograms)
test_path_to_match = histogram_helpers.ComputeTestPathFromComponents(
- histogram_name, tir_label=tir_label, story_name=story)
+ histogram_name, grouping_label=grouping_label, story_name=story)
logging.debug('Test path to match: %s', test_path_to_match)
result_values = read_value_quest.ExtractValuesFromHistograms(
- test_path_to_match, histograms_by_path, histogram_name, tir_label,
+ test_path_to_match, histograms_by_path, histogram_name, grouping_label,
story, statistic)
logging.debug('Results: %s', result_values)
task.payload.update({
@@ -175,7 +177,7 @@ def GenerateVertexAndDep(attempts):
change_id = find_isolate.ChangeId(
options.test_options.build_options.change)
read_value_id = 'read_value_%s_%s' % (change_id, attempt)
- run_test_id = 'run_test_%s_%s' % (change_id, attempt)
+ run_test_id = run_test.TaskId(change_id, attempt)
yield (task_module.TaskVertex(
id=read_value_id,
vertex_type='read_value',
@@ -184,14 +186,15 @@ def GenerateVertexAndDep(attempts):
'mode': options.mode,
'results_filename': path,
'histogram_options': {
- 'tir_label': options.histogram_options.tir_label,
+ 'grouping_label': options.histogram_options.grouping_label,
'story': options.histogram_options.story,
'statistic': options.histogram_options.statistic,
},
'graph_json_options': {
- 'chart': options.chart,
+ 'chart': options.graph_json_options.chart,
'trace': options.graph_json_options.trace
- }
+ },
+ 'change': options.test_options.build_options.change.AsDict(),
}), task_module.Dependency(from_=read_value_id, to=run_test_id))
for vertex, edge in GenerateVertexAndDep(options.test_options.attempts):
diff --git a/dashboard/dashboard/pinpoint/models/tasks/read_value_test.py b/dashboard/dashboard/pinpoint/models/tasks/read_value_test.py
index 8224779c0d..7990491189 100644
--- a/dashboard/dashboard/pinpoint/models/tasks/read_value_test.py
+++ b/dashboard/dashboard/pinpoint/models/tasks/read_value_test.py
@@ -54,7 +54,7 @@ def setUp(self):
def PopulateTaskGraph(self,
benchmark=None,
chart=None,
- tir_label=None,
+ grouping_label=None,
story=None,
statistic=None,
trace='some_trace',
@@ -79,13 +79,13 @@ def PopulateTaskGraph(self,
extra_args=[],
attempts=10),
benchmark=benchmark,
- chart=chart,
histogram_options=read_value.HistogramOptions(
- tir_label=tir_label,
+ grouping_label=grouping_label,
story=story,
statistic=statistic,
),
- graph_json_options=read_value.GraphJsonOptions(trace=trace),
+ graph_json_options=read_value.GraphJsonOptions(
+ chart=chart, trace=trace),
mode=mode,
)))
@@ -98,7 +98,7 @@ def testEvaluateSuccess_WithData(self, isolate_retrieve):
histograms = histogram_set.HistogramSet([histogram])
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORY_TAGS.name,
- generic_set.GenericSet(['group:tir_label']))
+ generic_set.GenericSet(['group:label']))
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORIES.name, generic_set.GenericSet(['story']))
isolate_retrieve.side_effect = itertools.chain(
@@ -110,7 +110,7 @@ def testEvaluateSuccess_WithData(self, isolate_retrieve):
self.PopulateTaskGraph(
benchmark='some_benchmark',
chart='some_chart',
- tir_label='tir_label',
+ grouping_label='label',
story='story')
self.assertNotEqual({},
task_module.Evaluate(
@@ -125,10 +125,11 @@ def testEvaluateSuccess_WithData(self, isolate_retrieve):
{
'read_value_chromium@aaaaaaa_%s' % (attempt,): {
'benchmark': 'some_benchmark',
+ 'change': mock.ANY,
'mode': 'histogram_sets',
'results_filename': 'some_benchmark/perf_results.json',
'histogram_options': {
- 'tir_label': 'tir_label',
+ 'grouping_label': 'label',
'story': 'story',
'statistic': None,
},
@@ -154,7 +155,7 @@ def testEvaluateSuccess_HistogramStat(self, isolate_retrieve):
histograms = histogram_set.HistogramSet([histogram])
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORY_TAGS.name,
- generic_set.GenericSet(['group:tir_label']))
+ generic_set.GenericSet(['group:label']))
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORIES.name, generic_set.GenericSet(['story']))
isolate_retrieve.side_effect = itertools.chain(
@@ -164,7 +165,7 @@ def testEvaluateSuccess_HistogramStat(self, isolate_retrieve):
self.PopulateTaskGraph(
benchmark='some_benchmark',
chart='some_chart',
- tir_label='tir_label',
+ grouping_label='label',
story='story',
statistic='avg')
self.assertNotEqual({},
@@ -177,10 +178,11 @@ def testEvaluateSuccess_HistogramStat(self, isolate_retrieve):
{
'read_value_chromium@aaaaaaa_%s' % (attempt,): {
'benchmark': 'some_benchmark',
+ 'change': mock.ANY,
'mode': 'histogram_sets',
'results_filename': 'some_benchmark/perf_results.json',
'histogram_options': {
- 'tir_label': 'tir_label',
+ 'grouping_label': 'label',
'story': 'story',
'statistic': 'avg',
},
@@ -206,7 +208,7 @@ def testEvaluateSuccess_HistogramStoryNeedsEscape(self, isolate_retrieve):
histograms = histogram_set.HistogramSet([histogram])
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORY_TAGS.name,
- generic_set.GenericSet(['group:tir_label']))
+ generic_set.GenericSet(['group:label']))
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORIES.name, generic_set.GenericSet(['https://story']))
isolate_retrieve.side_effect = itertools.chain(
@@ -216,7 +218,7 @@ def testEvaluateSuccess_HistogramStoryNeedsEscape(self, isolate_retrieve):
self.PopulateTaskGraph(
benchmark='some_benchmark',
chart='some_chart',
- tir_label='tir_label',
+ grouping_label='label',
story='https://story')
self.assertNotEqual({},
task_module.Evaluate(
@@ -228,10 +230,11 @@ def testEvaluateSuccess_HistogramStoryNeedsEscape(self, isolate_retrieve):
{
'read_value_chromium@aaaaaaa_%s' % (attempt,): {
'benchmark': 'some_benchmark',
+ 'change': mock.ANY,
'mode': 'histogram_sets',
'results_filename': 'some_benchmark/perf_results.json',
'histogram_options': {
- 'tir_label': 'tir_label',
+ 'grouping_label': 'label',
'story': 'https://story',
'statistic': None,
},
@@ -264,7 +267,7 @@ def CreateHistogram(name):
])
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORY_TAGS.name,
- generic_set.GenericSet(['group:tir_label']))
+ generic_set.GenericSet(['group:label']))
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORIES.name, generic_set.GenericSet(['story']))
isolate_retrieve.side_effect = itertools.chain(
@@ -274,7 +277,7 @@ def CreateHistogram(name):
self.PopulateTaskGraph(
benchmark='some_benchmark',
chart='some_chart',
- tir_label='tir_label',
+ grouping_label='label',
story='story')
self.assertNotEqual({},
task_module.Evaluate(
@@ -286,10 +289,11 @@ def CreateHistogram(name):
{
'read_value_chromium@aaaaaaa_%s' % (attempt,): {
'benchmark': 'some_benchmark',
+ 'change': mock.ANY,
'mode': 'histogram_sets',
'results_filename': 'some_benchmark/perf_results.json',
'histogram_options': {
- 'tir_label': 'tir_label',
+ 'grouping_label': 'label',
'story': 'story',
'statistic': None,
},
@@ -335,12 +339,13 @@ def testEvaluateSuccess_HistogramsTraceUrls(self, isolate_retrieve):
'read_value_chromium@aaaaaaa_%s' % (attempt,): {
'benchmark':
'some_benchmark',
+ 'change': mock.ANY,
'mode':
'histogram_sets',
'results_filename':
'some_benchmark/perf_results.json',
'histogram_options': {
- 'tir_label': None,
+ 'grouping_label': None,
'story': None,
'statistic': None,
},
@@ -399,12 +404,13 @@ def testEvaluateSuccess_HistogramSkipRefTraceUrls(self, isolate_retrieve):
'read_value_chromium@aaaaaaa_%s' % (attempt,): {
'benchmark':
'some_benchmark',
+ 'change': mock.ANY,
'mode':
'histogram_sets',
'results_filename':
'some_benchmark/perf_results.json',
'histogram_options': {
- 'tir_label': None,
+ 'grouping_label': None,
'story': None,
'statistic': None,
},
@@ -444,7 +450,7 @@ def testEvaluateSuccess_HistogramSummary(self, isolate_retrieve):
hist.diagnostics[reserved_infos.STORIES.name] = (
generic_set.GenericSet(['story%d' % i]))
hist.diagnostics[reserved_infos.STORY_TAGS.name] = (
- generic_set.GenericSet(['group:tir_label1']))
+ generic_set.GenericSet(['group:label1']))
hists.append(hist)
samples.extend(hist.sample_values)
@@ -456,14 +462,14 @@ def testEvaluateSuccess_HistogramSummary(self, isolate_retrieve):
hist.diagnostics[reserved_infos.STORIES.name] = (
generic_set.GenericSet(['another_story%d' % i]))
hist.diagnostics[reserved_infos.STORY_TAGS.name] = (
- generic_set.GenericSet(['group:tir_label2']))
+ generic_set.GenericSet(['group:label2']))
hists.append(hist)
samples.extend(hist.sample_values)
histograms = histogram_set.HistogramSet(hists)
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORY_TAGS.name,
- generic_set.GenericSet(['group:tir_label']))
+ generic_set.GenericSet(['group:label']))
isolate_retrieve.side_effect = itertools.chain(
*itertools.repeat([('{"files": {"some_benchmark/perf_results.json": '
'{"h": "394890891823812873798734a"}}}'),
@@ -479,10 +485,11 @@ def testEvaluateSuccess_HistogramSummary(self, isolate_retrieve):
{
'read_value_chromium@aaaaaaa_%s' % (attempt,): {
'benchmark': 'some_benchmark',
+ 'change': mock.ANY,
'mode': 'histogram_sets',
'results_filename': 'some_benchmark/perf_results.json',
'histogram_options': {
- 'tir_label': None,
+ 'grouping_label': None,
'story': None,
'statistic': None,
},
@@ -505,7 +512,7 @@ def testEvaluateFailure_HistogramNoSamples(self, isolate_retrieve):
histograms = histogram_set.HistogramSet([histogram])
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORY_TAGS.name,
- generic_set.GenericSet(['group:tir_label']))
+ generic_set.GenericSet(['group:label']))
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORIES.name, generic_set.GenericSet(['https://story']))
isolate_retrieve.side_effect = itertools.chain(
@@ -515,7 +522,7 @@ def testEvaluateFailure_HistogramNoSamples(self, isolate_retrieve):
self.PopulateTaskGraph(
benchmark='some_benchmark',
chart='some_chart',
- tir_label='tir_label',
+ grouping_label='label',
story='https://story')
self.assertNotEqual({},
task_module.Evaluate(
@@ -527,10 +534,11 @@ def testEvaluateFailure_HistogramNoSamples(self, isolate_retrieve):
{
'read_value_chromium@aaaaaaa_%s' % (attempt,): {
'benchmark': 'some_benchmark',
+ 'change': mock.ANY,
'mode': 'histogram_sets',
'results_filename': 'some_benchmark/perf_results.json',
'histogram_options': {
- 'tir_label': 'tir_label',
+ 'grouping_label': 'label',
'story': 'https://story',
'statistic': None,
},
@@ -559,7 +567,7 @@ def testEvaluateFailure_EmptyHistogramSet(self, isolate_retrieve):
self.PopulateTaskGraph(
benchmark='some_benchmark',
chart='some_chart',
- tir_label='tir_label',
+ grouping_label='label',
story='https://story')
self.assertNotEqual({},
task_module.Evaluate(
@@ -571,10 +579,11 @@ def testEvaluateFailure_EmptyHistogramSet(self, isolate_retrieve):
{
'read_value_chromium@aaaaaaa_%s' % (attempt,): {
'benchmark': 'some_benchmark',
+ 'change': mock.ANY,
'mode': 'histogram_sets',
'results_filename': 'some_benchmark/perf_results.json',
'histogram_options': {
- 'tir_label': 'tir_label',
+ 'grouping_label': 'label',
'story': 'https://story',
'statistic': None,
},
@@ -606,7 +615,7 @@ def testEvaluateFailure_HistogramNoValues(self, isolate_retrieve):
self.PopulateTaskGraph(
benchmark='some_benchmark',
chart='some_chart',
- tir_label='tir_label',
+ grouping_label='label',
story='https://story')
self.assertNotEqual({},
task_module.Evaluate(
@@ -618,10 +627,11 @@ def testEvaluateFailure_HistogramNoValues(self, isolate_retrieve):
{
'read_value_chromium@aaaaaaa_%s' % (attempt,): {
'benchmark': 'some_benchmark',
+ 'change': mock.ANY,
'mode': 'histogram_sets',
'results_filename': 'some_benchmark/perf_results.json',
'histogram_options': {
- 'tir_label': 'tir_label',
+ 'grouping_label': 'label',
'story': 'https://story',
'statistic': None,
},
@@ -666,10 +676,11 @@ def testEvaluateSuccess_GraphJson(self, isolate_retrieve):
{
'read_value_chromium@aaaaaaa_%s' % (attempt,): {
'benchmark': 'some_benchmark',
+ 'change': mock.ANY,
'mode': 'graph_json',
'results_filename': 'some_benchmark/perf_results.json',
'histogram_options': {
- 'tir_label': None,
+ 'grouping_label': None,
'story': None,
'statistic': None,
},
@@ -704,10 +715,11 @@ def testEvaluateFailure_GraphJsonMissingFile(self, isolate_retrieve):
{
'read_value_chromium@aaaaaaa_%s' % (attempt,): {
'benchmark': 'some_benchmark',
+ 'change': mock.ANY,
'mode': 'graph_json',
'results_filename': 'some_benchmark/perf_results.json',
'histogram_options': {
- 'tir_label': None,
+ 'grouping_label': None,
'story': None,
'statistic': None,
},
@@ -748,10 +760,11 @@ def testEvaluateFail_GraphJsonMissingChart(self, isolate_retrieve):
{
'read_value_chromium@aaaaaaa_%s' % (attempt,): {
'benchmark': 'some_benchmark',
+ 'change': mock.ANY,
'mode': 'graph_json',
'results_filename': 'some_benchmark/perf_results.json',
'histogram_options': {
- 'tir_label': None,
+ 'grouping_label': None,
'story': None,
'statistic': None,
},
@@ -796,10 +809,11 @@ def testEvaluateFail_GraphJsonMissingTrace(self, isolate_retrieve):
{
'read_value_chromium@aaaaaaa_%s' % (attempt,): {
'benchmark': 'some_benchmark',
+ 'change': mock.ANY,
'mode': 'graph_json',
'results_filename': 'some_benchmark/perf_results.json',
'histogram_options': {
- 'tir_label': None,
+ 'grouping_label': None,
'story': None,
'statistic': None,
},
@@ -851,10 +865,11 @@ def testEvaluateFailedDependency(self, *_):
{
'read_value_chromium@aaaaaaa_%s' % (attempt,): {
'benchmark': 'some_benchmark',
+ 'change': mock.ANY,
'mode': 'graph_json',
'results_filename': 'some_benchmark/perf_results.json',
'histogram_options': {
- 'tir_label': None,
+ 'grouping_label': None,
'story': None,
'statistic': None,
},
diff --git a/dashboard/dashboard/pinpoint/models/tasks/run_test.py b/dashboard/dashboard/pinpoint/models/tasks/run_test.py
index 283a2bf423..bc5468b8ba 100644
--- a/dashboard/dashboard/pinpoint/models/tasks/run_test.py
+++ b/dashboard/dashboard/pinpoint/models/tasks/run_test.py
@@ -300,6 +300,10 @@ def __init__(self):
predicate=evaluators.TaskTypeEq('run_test'), delegate=ReportError)
+def TaskId(change, attempt):
+ return 'run_test_%s_%s' % (change, attempt)
+
+
TaskOptions = collections.namedtuple('TaskOptions',
('build_options', 'swarming_server',
'dimensions', 'extra_args', 'attempts'))
@@ -316,8 +320,8 @@ def CreateGraph(options):
find_isolate_task = find_isolate_tasks[0]
subgraph.vertices.extend([
task_module.TaskVertex(
- id='run_test_%s_%s' %
- (find_isolate.ChangeId(options.build_options.change), attempt),
+ id=TaskId(
+ find_isolate.ChangeId(options.build_options.change), attempt),
vertex_type='run_test',
payload={
'swarming_server': options.swarming_server,
diff --git a/dashboard/dashboard/pinpoint_request.py b/dashboard/dashboard/pinpoint_request.py
index e9f0fc07d1..f5529a5400 100644
--- a/dashboard/dashboard/pinpoint_request.py
+++ b/dashboard/dashboard/pinpoint_request.py
@@ -177,19 +177,19 @@ def _GetIsolateTarget(bot_name, suite, start_commit,
return 'performance_test_suite'
-def ParseTIRLabelChartNameAndTraceName(test_path):
- """Returns tir_label, chart_name, trace_name from a test path."""
+def ParseGroupingLabelChartNameAndTraceName(test_path):
+ """Returns grouping_label, chart_name, trace_name from a test path."""
test_path_parts = test_path.split('/')
suite = test_path_parts[2]
if suite in _NON_CHROME_TARGETS:
return '', '', ''
test = ndb.Key('TestMetadata', '/'.join(test_path_parts)).get()
- tir_label, chart_name, trace_name = utils.ParseTelemetryMetricParts(
+ grouping_label, chart_name, trace_name = utils.ParseTelemetryMetricParts(
test_path)
if trace_name and test.unescaped_story_name:
trace_name = test.unescaped_story_name
- return tir_label, chart_name, trace_name
+ return grouping_label, chart_name, trace_name
def ParseStatisticNameFromChart(chart_name):
@@ -297,12 +297,12 @@ def PinpointParamsFromBisectParams(params):
if bisect_mode != 'performance' and bisect_mode != 'functional':
raise InvalidParamsError('Invalid bisect mode %s specified.' % bisect_mode)
- tir_label = ''
+ grouping_label = ''
chart_name = ''
trace_name = ''
if bisect_mode == 'performance':
- tir_label, chart_name, trace_name = ParseTIRLabelChartNameAndTraceName(
- test_path)
+ grouping_label, chart_name, trace_name = (
+ ParseGroupingLabelChartNameAndTraceName(test_path))
start_commit = params['start_commit']
end_commit = params['end_commit']
@@ -360,8 +360,11 @@ def PinpointParamsFromBisectParams(params):
pinpoint_params['statistic'] = statistic_name
if story_filter:
pinpoint_params['story'] = story_filter
- if tir_label:
- pinpoint_params['tir_label'] = tir_label
+ if grouping_label:
+ # TODO(crbug.com/974237): Stop filling in the 'tir_label' when pinpoint
+ # jobs have switched to rely on the 'grouping_label' instead.
+ pinpoint_params['tir_label'] = grouping_label
+ pinpoint_params['grouping_label'] = grouping_label
if trace_name:
pinpoint_params['trace'] = trace_name
diff --git a/dashboard/dashboard/pinpoint_request_test.py b/dashboard/dashboard/pinpoint_request_test.py
index d77ff90108..b6f58500dc 100644
--- a/dashboard/dashboard/pinpoint_request_test.py
+++ b/dashboard/dashboard/pinpoint_request_test.py
@@ -546,7 +546,7 @@ def testPinpointParams_Metric_ChartAndTrace(self):
@mock.patch.object(
utils, 'IsValidSheriffUser', mock.MagicMock(return_value=True))
- def testPinpointParams_Metric_TIRLabelChartAndTrace(self):
+ def testPinpointParams_Metric_GroupingLabelChartAndTrace(self):
params = {
'test_path': 'ChromiumPerf/mac/blink_perf/foo/label/bar.html',
'start_commit': 'abcd1234',
@@ -561,7 +561,10 @@ def testPinpointParams_Metric_TIRLabelChartAndTrace(self):
t.put()
results = pinpoint_request.PinpointParamsFromBisectParams(params)
+ # TODO(crbug.com/974237): Stop expecting 'tir_label' when be start relying
+ # on 'grouping_label' only.
self.assertEqual('label', results['tir_label'])
+ self.assertEqual('label', results['grouping_label'])
self.assertEqual('foo', results['chart'])
self.assertEqual('bar.html', results['trace'])
@@ -695,7 +698,10 @@ def testPinpointParams_V8(self, mock_crrev):
}
results = pinpoint_request.PinpointParamsFromBisectParams(params)
+ # TODO(crbug.com/974237): Stop checking for 'tir_label' when we start
+ # relying on 'grouping_label' only.
self.assertNotIn('tir_label', results)
+ self.assertNotIn('grouping_label', results)
self.assertNotIn('trace', results)
self.assertEqual('', results['chart'])
self.assertEqual('', results['target'])
diff --git a/dashboard/dashboard/post_data_handler.py b/dashboard/dashboard/post_data_handler.py
index e60363d19d..ee56473132 100644
--- a/dashboard/dashboard/post_data_handler.py
+++ b/dashboard/dashboard/post_data_handler.py
@@ -17,12 +17,10 @@ class PostDataHandler(request_handler.RequestHandler):
"""Helper class to handle common functionality for dealing with slaves."""
def post(self):
- """Checks the IP of the request against the white list.
-
- Real sub-class handlers should override this and use
- _CheckIpAgainstWhitelist; this is provided here for convenience in tests.
- """
- self._CheckIpAgainstWhitelist()
+ # This used to just call the _CheckIpAgainstWhitelist() function, and
+ # subclasses are expected now to call that function directly if they need
+ # it.
+ raise NotImplementedError('Handlers must implement this method!')
def _CheckIpAgainstWhitelist(self):
"""Checks the remote address of the request against the IP whitelist.
@@ -44,6 +42,4 @@ def _CheckIpAgainstWhitelist(self):
logging.warn('Received data: %s...', data_param[:200])
except Exception: # pylint: disable=broad-except
pass
- self.ReportError(
- 'IP address %s not in IP whitelist!' % self.request.remote_addr, 403)
return False
diff --git a/dashboard/dashboard/post_data_handler_test.py b/dashboard/dashboard/post_data_handler_test.py
index 80ad3d1fda..c0b6fedc76 100644
--- a/dashboard/dashboard/post_data_handler_test.py
+++ b/dashboard/dashboard/post_data_handler_test.py
@@ -24,12 +24,20 @@
}
+class TestingPostDataHandler(post_data_handler.PostDataHandler):
+
+ def post(self):
+ if not self._CheckIpAgainstWhitelist():
+ self.ReportError(
+ 'IP address %s not in IP whitelist!' % (self.request.remote_addr,),
+ 403)
+
+
class PostDataHandlerTest(testing_common.TestCase):
def setUp(self):
super(PostDataHandlerTest, self).setUp()
- app = webapp2.WSGIApplication(
- [('/whitelist_test', post_data_handler.PostDataHandler)])
+ app = webapp2.WSGIApplication([('/whitelist_test', TestingPostDataHandler)])
self.testapp = webtest.TestApp(app)
def testPost_NoIPWhitelist_Authorized(self):
@@ -38,7 +46,8 @@ def testPost_NoIPWhitelist_Authorized(self):
def testPost_IPNotInWhitelist_NotAuthorized(self):
testing_common.SetIpWhitelist(['123.45.67.89', '98.76.54.32'])
self.testapp.post(
- '/whitelist_test', {'data': json.dumps([_SAMPLE_POINT])}, status=403,
+ '/whitelist_test', {'data': json.dumps([_SAMPLE_POINT])},
+ status=403,
extra_environ={'REMOTE_ADDR': '22.45.67.89'})
def testPost_IPInWhiteList_Authorized(self):
diff --git a/dashboard/dashboard/services/gitiles_service.py b/dashboard/dashboard/services/gitiles_service.py
index 458b29d2a4..835efe11df 100644
--- a/dashboard/dashboard/services/gitiles_service.py
+++ b/dashboard/dashboard/services/gitiles_service.py
@@ -33,7 +33,7 @@ def CommitInfo(repository_url, git_hash):
"""
# TODO: Update the docstrings in this file.
url = '%s/+/%s?format=JSON' % (repository_url, git_hash)
- return request.RequestJson(url, use_cache=_IsHash(git_hash), use_auth=True,
+ return request.RequestJson(url, use_cache=IsHash(git_hash), use_auth=True,
scope=gerrit_service.GERRIT_SCOPE)
@@ -60,7 +60,7 @@ def CommitRange(repository_url, first_git_hash, last_git_hash):
while last_git_hash:
url = '%s/+log/%s..%s?format=JSON' % (
repository_url, first_git_hash, last_git_hash)
- use_cache = _IsHash(first_git_hash) and _IsHash(last_git_hash)
+ use_cache = IsHash(first_git_hash) and IsHash(last_git_hash)
response = request.RequestJson(url, use_cache=use_cache, use_auth=True,
scope=gerrit_service.GERRIT_SCOPE)
commits += response['log']
@@ -84,12 +84,12 @@ def FileContents(repository_url, git_hash, path):
httplib.HTTPException: A network or HTTP error occurred.
"""
url = '%s/+/%s/%s?format=TEXT' % (repository_url, git_hash, path)
- response = request.Request(url, use_cache=_IsHash(git_hash), use_auth=True,
+ response = request.Request(url, use_cache=IsHash(git_hash), use_auth=True,
scope=gerrit_service.GERRIT_SCOPE)
return base64.b64decode(response)
-def _IsHash(git_hash):
+def IsHash(git_hash):
"""Returns True iff git_hash is a full SHA-1 hash.
Commits keyed by full git hashes are guaranteed to not change. It's unsafe
diff --git a/dashboard/dashboard/sheriff_config/README.md b/dashboard/dashboard/sheriff_config/README.md
index 102de40a33..eee16579e9 100644
--- a/dashboard/dashboard/sheriff_config/README.md
+++ b/dashboard/dashboard/sheriff_config/README.md
@@ -12,21 +12,10 @@ in isolation from the rest of the dashboard to let us determine a small scope.
## Testing
We recommend using Docker to build an isolated environment for testing the
-sheriff-config service in isolation. The associated `Dockerfile` contains the
-steps required to develop an isolated version of the service, which can be
-tested locally.
-
-1. Run `make` first to generate the required protobuf images.
-1. Build a docker image:
-
- `docker build -t sheriff-config .`
-
-1. Run the docker image locally:
-
- ```
- docker run -e GAE_APPLICATION='chromeperf' -e GAE_SERVICE='sheriff-config' \
- -P sheriff-config:latest
- ```
+sheriff-config service in isolation. Follow [steps](/tests/README.md) in tests/
+to run the integration test for the sheriff-config service. The associated
+`Dockerfile` and `docker-compose.yml` in tests/ contains the steps required to
+develop an isolated version of the service, which can be tested locally.
To run the unit tests, we'll need the same requirements as the dashboard
installed and available in the environment.
diff --git a/dashboard/dashboard/sheriff_config/tests/datastore-emulator/Dockerfile b/dashboard/dashboard/sheriff_config/tests/datastore-emulator/Dockerfile
index fe74755aa4..895ad183ad 100644
--- a/dashboard/dashboard/sheriff_config/tests/datastore-emulator/Dockerfile
+++ b/dashboard/dashboard/sheriff_config/tests/datastore-emulator/Dockerfile
@@ -8,7 +8,7 @@ ENV EMULATOR_PORT $emulator_port
ENV EMULATOR_PROJECT $emulator_project
# TODO(dberris): Figure out how to signal this container to shutdown externally.
-CMD gcloud beta emulators datastore start --no-legacy \
+CMD gcloud beta emulators datastore start \
--project "$EMULATOR_PROJECT" \
--host-port="$EMULATOR_HOST:$EMULATOR_PORT" \
--no-store-on-disk \
diff --git a/devil/devil/android/device_errors.py b/devil/devil/android/device_errors.py
index e6893a4f69..27f285ba29 100644
--- a/devil/devil/android/device_errors.py
+++ b/devil/devil/android/device_errors.py
@@ -11,6 +11,7 @@
+-- CommandFailedError
| +-- AdbCommandFailedError
| | +-- AdbShellCommandFailedError
+ | +-- AdbVersionError
| +-- FastbootCommandFailedError
| +-- DeviceVersionError
| +-- DeviceChargingError
@@ -114,6 +115,20 @@ def __init__(self, message, device_serial=None):
super(DeviceVersionError, self).__init__(message, device_serial)
+class AdbVersionError(CommandFailedError):
+ """Exception for running a command on an incompatible version of adb."""
+
+ def __init__(self, args, desc=None, actual_version=None, min_version=None):
+ adb_cmd = ' '.join(cmd_helper.SingleQuote(arg) for arg in args)
+ desc = desc or 'not supported'
+ if min_version:
+ desc += ' prior to %s' % min_version
+ if actual_version:
+ desc += ' (actual: %s)' % actual_version
+ super(AdbVersionError, self).__init__(
+ message='adb %s: %s' % (adb_cmd, desc))
+
+
class AdbShellCommandFailedError(AdbCommandFailedError):
"""Exception for shell command failures run via adb."""
diff --git a/devil/devil/android/device_utils.py b/devil/devil/android/device_utils.py
index e2ae1d08b9..abbfde389d 100644
--- a/devil/devil/android/device_utils.py
+++ b/devil/devil/android/device_utils.py
@@ -355,6 +355,58 @@ def _FormatPartialOutputError(output):
return '\n'.join(message)
+_PushableComponents = collections.namedtuple(
+ '_PushableComponents', ('host', 'device', 'collapse'))
+
+
+def _IterPushableComponents(host_path, device_path):
+ """Yields a sequence of paths that can be pushed directly via adb push.
+
+ `adb push` doesn't currently handle pushing directories that contain
+ symlinks: https://bit.ly/2pMBlW5
+
+ To circumvent this issue, we get the smallest set of files and/or
+ directories that can be pushed without attempting to push a directory
+ that contains a symlink.
+
+ This function does so by recursing through |host_path|. Each call
+ yields 3-tuples that include the smallest set of (host, device) path pairs
+ that can be passed to adb push and a bool indicating whether the parent
+ directory can be pushed -- i.e., if True, the host path is neither a
+ symlink nor a directory that contains a symlink.
+
+ Args:
+ host_path: an absolute path of a file or directory on the host
+ device_path: an absolute path of a file or directory on the device
+ Yields:
+ 3-tuples containing
+ host (str): the host path, with symlinks dereferenced
+ device (str): the device path
+ collapse (bool): whether this entity permits its parent to be pushed
+ in its entirety. (Parents need permission from all child entities
+ in order to be pushed in their entirety.)
+ """
+ if os.path.isfile(host_path):
+ yield _PushableComponents(
+ os.path.realpath(host_path), device_path,
+ not os.path.islink(host_path))
+ else:
+ components = []
+ for child in os.listdir(host_path):
+ components.extend(
+ _IterPushableComponents(
+ os.path.join(host_path, child),
+ posixpath.join(device_path, child)))
+
+ if all(c.collapse for c in components):
+ yield _PushableComponents(
+ os.path.realpath(host_path), device_path,
+ not os.path.islink(host_path))
+ else:
+ for c in components:
+ yield c
+
+
class DeviceUtils(object):
_MAX_ADB_COMMAND_LENGTH = 512
@@ -1577,8 +1629,8 @@ def SendKeyEvent(self, keycode, timeout=None, retries=None):
@decorators.WithTimeoutAndRetriesFromInstance(
min_default_timeout=PUSH_CHANGED_FILES_DEFAULT_TIMEOUT)
- def PushChangedFiles(self, host_device_tuples, timeout=None,
- retries=None, delete_device_stale=False):
+ def PushChangedFiles(self, host_device_tuples, delete_device_stale=False,
+ timeout=None, retries=None):
"""Push files to the device, skipping files that don't need updating.
When a directory is pushed, it is traversed recursively on the host and
@@ -1591,15 +1643,28 @@ def PushChangedFiles(self, host_device_tuples, timeout=None,
|host_path| is an absolute path of a file or directory on the host
that should be minimially pushed to the device, and |device_path| is
an absolute path of the destination on the device.
+ delete_device_stale: option to delete stale files on device
timeout: timeout in seconds
retries: number of retries
- delete_device_stale: option to delete stale files on device
Raises:
CommandFailedError on failure.
CommandTimeoutError on timeout.
DeviceUnreachableError on missing device.
"""
+ # TODO(crbug.com/1005504): Experiment with this on physical devices after
+ # upgrading devil's default adb beyond 1.0.39.
+ # TODO(crbug.com/1020716): disabled as can result in extra directory.
+ enable_push_sync = False
+
+ if enable_push_sync:
+ try:
+ self._PushChangedFilesSync(host_device_tuples)
+ return
+ except device_errors.AdbVersionError as e:
+ # If we don't meet the adb requirements, fall back to the previous
+ # sync-unaware implementation.
+ logging.warning(str(e))
all_changed_files = []
all_stale_files = []
@@ -1648,8 +1713,18 @@ def PushChangedFiles(self, host_device_tuples, timeout=None,
for func in cache_commit_funcs:
func()
+ def _PushChangedFilesSync(self, host_device_tuples):
+ """Push changed files via `adb sync`.
+
+ Args:
+ host_device_tuples: Same as PushChangedFiles.
+ """
+ for h, d in host_device_tuples:
+ for ph, pd, _ in _IterPushableComponents(h, d):
+ self.adb.Push(ph, pd, sync=True)
+
def _GetChangedAndStaleFiles(self, host_path, device_path, track_stale=False):
- """Get files to push and delete
+ """Get files to push and delete.
Args:
host_path: an absolute path of a file or directory on the host
diff --git a/devil/devil/android/device_utils_test.py b/devil/devil/android/device_utils_test.py
index bb9478af1b..207bf5641f 100755
--- a/devil/devil/android/device_utils_test.py
+++ b/devil/devil/android/device_utils_test.py
@@ -10,14 +10,17 @@
# pylint: disable=protected-access
# pylint: disable=unused-argument
+import collections
import contextlib
import json
import logging
import os
+import posixpath
import stat
import sys
import unittest
+from py_utils import tempfile_ext
from devil import devil_env
from devil.android import device_errors
from devil.android import device_signal
@@ -3593,6 +3596,129 @@ def testLanguageAndCountry(self):
self.assertEquals(self.device.GetCountry(), 'US')
+class IterPushableComponentsTest(unittest.TestCase):
+
+ @classmethod
+ @contextlib.contextmanager
+ def sampleLayout(cls):
+ Layout = collections.namedtuple(
+ 'Layout',
+ ['root', 'basic_file', 'symlink_file', 'symlink_dir',
+ 'dir_with_symlinks', 'dir_without_symlinks'])
+
+ with tempfile_ext.NamedTemporaryDirectory() as layout_root:
+ dir1 = os.path.join(layout_root, 'dir1')
+ os.makedirs(dir1)
+
+ basic_file = os.path.join(dir1, 'file1.txt')
+ with open(basic_file, 'w') as f:
+ f.write('hello world')
+
+ symlink = os.path.join(dir1, 'symlink.txt')
+ os.symlink(basic_file, symlink)
+
+ dir2 = os.path.join(layout_root, 'dir2')
+ os.makedirs(dir2)
+
+ with open(os.path.join(dir2, 'file2.txt'), 'w') as f:
+ f.write('goodnight moon')
+
+ symlink_dir = os.path.join(layout_root, 'dir3')
+ os.symlink(dir2, symlink_dir)
+
+ yield Layout(layout_root, basic_file, symlink, symlink_dir, dir1, dir2)
+
+ def testFile(self):
+ with self.sampleLayout() as layout:
+ device_path = '/sdcard/basic_file'
+
+ expected = [(layout.basic_file, device_path, True)]
+ actual = list(
+ device_utils._IterPushableComponents(
+ layout.basic_file, device_path))
+ self.assertItemsEqual(expected, actual)
+
+ def testSymlinkFile(self):
+ with self.sampleLayout() as layout:
+ device_path = '/sdcard/basic_symlink'
+
+ expected = [(os.path.realpath(layout.symlink_file), device_path, False)]
+ actual = list(
+ device_utils._IterPushableComponents(
+ layout.symlink_file, device_path))
+ self.assertItemsEqual(expected, actual)
+
+ def testDirectoryWithNoSymlink(self):
+ with self.sampleLayout() as layout:
+ device_path = '/sdcard/basic_directory'
+
+ expected = [(layout.dir_without_symlinks, device_path, True)]
+ actual = list(
+ device_utils._IterPushableComponents(
+ layout.dir_without_symlinks, device_path))
+ self.assertItemsEqual(expected, actual)
+
+ def testDirectoryWithSymlink(self):
+ with self.sampleLayout() as layout:
+ device_path = '/sdcard/directory'
+
+ expected = [
+ (layout.basic_file,
+ posixpath.join(device_path, os.path.basename(layout.basic_file)),
+ True),
+ (os.path.realpath(layout.symlink_file),
+ posixpath.join(device_path, os.path.basename(layout.symlink_file)),
+ False),
+ ]
+ actual = list(
+ device_utils._IterPushableComponents(
+ layout.dir_with_symlinks, device_path))
+ self.assertItemsEqual(expected, actual)
+
+ def testSymlinkDirectory(self):
+ with self.sampleLayout() as layout:
+ device_path = '/sdcard/directory'
+
+ expected = [(os.path.realpath(layout.symlink_dir), device_path, False)]
+ actual = list(
+ device_utils._IterPushableComponents(
+ layout.symlink_dir, device_path))
+ self.assertItemsEqual(expected, actual)
+
+ def testDirectoryWithNestedSymlink(self):
+ with self.sampleLayout() as layout:
+ device_path = '/sdcard/directory'
+
+ expected = [
+ (layout.dir_without_symlinks,
+ posixpath.join(
+ device_path,
+ os.path.basename(layout.dir_without_symlinks)),
+ True),
+ (layout.basic_file,
+ posixpath.join(
+ device_path,
+ *os.path.split(os.path.relpath(layout.basic_file, layout.root))),
+ True),
+ (os.path.realpath(layout.symlink_file),
+ posixpath.join(
+ device_path,
+ *os.path.split(
+ os.path.relpath(layout.symlink_file, layout.root))),
+ False),
+ (os.path.realpath(layout.symlink_dir),
+ posixpath.join(
+ device_path,
+ *os.path.split(
+ os.path.relpath(layout.symlink_dir, layout.root))),
+ False),
+ ]
+ actual = list(
+ device_utils._IterPushableComponents(
+ layout.root, device_path))
+ self.assertItemsEqual(expected, actual)
+
+
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main(verbosity=2)
diff --git a/devil/devil/android/sdk/adb_wrapper.py b/devil/devil/android/sdk/adb_wrapper.py
index 13c0f5207c..66db900b0c 100644
--- a/devil/devil/android/sdk/adb_wrapper.py
+++ b/devil/devil/android/sdk/adb_wrapper.py
@@ -447,14 +447,20 @@ def GetDeviceSerial(self):
"""
return self._device_serial
- def Push(self, local, remote, timeout=60 * 5, retries=DEFAULT_RETRIES):
+ def Push(self, local, remote, sync=False,
+ timeout=60 * 5, retries=DEFAULT_RETRIES):
"""Pushes a file from the host to the device.
Args:
local: Path on the host filesystem.
remote: Path on the device filesystem.
+ sync: (optional) Whether to only push files that are newer on the host.
+ Not supported when using adb prior to 1.0.39.
timeout: (optional) Timeout per try in seconds.
retries: (optional) Number of retries to attempt.
+
+ Raises:
+ AdbVersionError if sync=True with versions of adb prior to 1.0.39.
"""
VerifyLocalFileExists(local)
@@ -495,7 +501,24 @@ def Push(self, local, remote, timeout=60 * 5, retries=DEFAULT_RETRIES):
# without modification.
pass
- self._RunDeviceAdbCmd(['push', local, remote], timeout, retries)
+ push_cmd = ['push']
+
+ if sync:
+ push_cmd += ['--sync']
+ if (du_version.LooseVersion(self.Version()) <
+ du_version.LooseVersion('1.0.39')):
+ # The --sync flag for `adb push` is a relatively recent addition.
+ # We're not sure exactly which release first contained it, but it
+ # exists at least as far back as 1.0.39.
+ raise device_errors.AdbVersionError(
+ push_cmd,
+ desc='--sync not supported',
+ actual_version=self.Version(),
+ min_version='1.0.39')
+
+ push_cmd += [local, remote]
+
+ self._RunDeviceAdbCmd(push_cmd, timeout, retries)
def Pull(self, remote, local, timeout=60 * 5, retries=DEFAULT_RETRIES):
"""Pulls a file from the device to the host.
diff --git a/devil/devil/android/tools/system_app.py b/devil/devil/android/tools/system_app.py
index 61a623544f..9a67f4615f 100755
--- a/devil/devil/android/tools/system_app.py
+++ b/devil/devil/android/tools/system_app.py
@@ -39,7 +39,12 @@
# will remove either version. This doesn't appear to cause any issues, but
# will cause a few unnecessary reboots if this is the only package getting
# removed and it's already not a system app.
- 'com.google.ar.core': '/data/app/',
+ 'com.google.ar.core': ['/data/app/'],
+ # On older versions of VrCore, the system app version is installed in /system/
+ # like normal. However, at some point, this moved to /data/. So, we have to
+ # handle both cases. Like ArCore, this means we'll end up removing even
+ # non-system versions due to this, but it doesn't cause any issues.
+ 'com.google.vr.core': ['/data/app/', '/system/'],
}
# Gets app path and package name pm list packages -f output.
@@ -108,8 +113,10 @@ def _GetApplicationPaths(device, package):
def _GetSystemPath(package, paths):
for p in paths:
- if p.startswith(SPECIAL_SYSTEM_APP_LOCATIONS.get(package, '/system/')):
- return p
+ app_locations = SPECIAL_SYSTEM_APP_LOCATIONS.get(package, ['/system/'])
+ for location in app_locations:
+ if p.startswith(location):
+ return p
return None
diff --git a/devil/devil/devil_dependencies.json b/devil/devil/devil_dependencies.json
index 9893413fba..577ab1405d 100644
--- a/devil/devil/devil_dependencies.json
+++ b/devil/devil/devil_dependencies.json
@@ -16,7 +16,7 @@
"cloud_storage_bucket": "chromium-telemetry",
"file_info": {
"linux2_x86_64": {
- "cloud_storage_hash": "8bd43e3930f6eec643d5dc64cab9e5bb4ddf4909",
+ "cloud_storage_hash": "210c2c38f91f753a05341cba8d7290b125a9bc99",
"download_path": "../bin/deps/linux2/x86_64/bin/adb"
}
}
diff --git a/telemetry/OWNERS b/telemetry/OWNERS
index 8390c5755e..d06f28b0b8 100644
--- a/telemetry/OWNERS
+++ b/telemetry/OWNERS
@@ -8,7 +8,7 @@ rmhasan@google.com
achuith@chromium.org
# TEAM: telemetry@chromium.org
-# COMPONENT: Speed>Telemetry
+# COMPONENT: Test>Telemetry
# emeritus:
# aiolos@chromium.org
# chrishenry@google.com
diff --git a/telemetry/telemetry/benchmark.py b/telemetry/telemetry/benchmark.py
index 1d9638e6bb..01d762a169 100644
--- a/telemetry/telemetry/benchmark.py
+++ b/telemetry/telemetry/benchmark.py
@@ -3,14 +3,12 @@
# found in the LICENSE file.
import optparse
-import sys
from telemetry import decorators
from telemetry.internal import story_runner
from telemetry.internal.util import command_line
from telemetry.page import legacy_page_test
from telemetry.story import expectations as expectations_module
-from telemetry.story import typ_expectations
from telemetry.web_perf import story_test
from telemetry.web_perf import timeline_based_measurement
@@ -39,7 +37,6 @@ class Benchmark(command_line.Command):
page_set = None
test = timeline_based_measurement.TimelineBasedMeasurement
SUPPORTED_PLATFORMS = [expectations_module.ALL]
- MAX_NUM_VALUES = sys.maxint
def __init__(self, max_failures=None):
"""Creates a new Benchmark.
@@ -48,7 +45,6 @@ def __init__(self, max_failures=None):
max_failures: The number of story run's failures before bailing
from executing subsequent page runs. If None, we never bail.
"""
- self._expectations = typ_expectations.StoryExpectations(self.Name())
self._max_failures = max_failures
# TODO: There should be an assertion here that checks that only one of
# the following is true:
@@ -58,7 +54,20 @@ def __init__(self, max_failures=None):
# Benchmark.test set.
# See https://github.com/catapult-project/catapult/issues/3708
- def _CanRunOnPlatform(self, platform, finder_options):
+ def CanRunOnPlatform(self, platform, finder_options):
+ """Figures out if the benchmark is meant to support this platform.
+
+ This is based on the SUPPORTED_PLATFORMS class member of the benchmark.
+
+ This method should not be overriden or called outside of the Telemetry
+ framework.
+
+ Note that finder_options object in practice sometimes is actually not
+ a BrowserFinderOptions object but a PossibleBrowser object.
+ The key is that it can be passed to ShouldDisable, which only uses
+ finder_options.browser_type, which is available on both PossibleBrowser
+ and BrowserFinderOptions.
+ """
for p in self.SUPPORTED_PLATFORMS:
# This is reusing StoryExpectation code, so it is a bit unintuitive. We
# are trying to detect the opposite of the usual case in StoryExpectations
@@ -232,10 +241,3 @@ def CreateStorySet(self, options):
if not self.page_set:
raise NotImplementedError('This test has no "page_set" attribute.')
return self.page_set() # pylint: disable=not-callable
-
- def AugmentExpectationsWithFile(self, raw_data):
- self._expectations.GetBenchmarkExpectationsFromParser(raw_data)
-
- @property
- def expectations(self):
- return self._expectations
diff --git a/telemetry/telemetry/benchmark_unittest.py b/telemetry/telemetry/benchmark_unittest.py
index c1742d5991..81a4e80bff 100644
--- a/telemetry/telemetry/benchmark_unittest.py
+++ b/telemetry/telemetry/benchmark_unittest.py
@@ -17,7 +17,6 @@
from telemetry.page import shared_page_state
from telemetry import story as story_module
from telemetry.web_perf import timeline_based_measurement
-from telemetry.story import typ_expectations
class DummyPageTest(legacy_page_test.LegacyPageTest):
@@ -46,15 +45,6 @@ def setUp(self):
def tearDown(self):
shutil.rmtree(self.options.output_dir)
- def testNewTestExpectationsFormatIsUsed(self):
- b = TestBenchmark(
- story_module.Story(
- name='test name',
- shared_state_class=shared_page_state.SharedPageState))
- b.AugmentExpectationsWithFile('# results: [ Skip ]\nb1 [ Skip ]\n')
- self.assertIsInstance(
- b.expectations, typ_expectations.StoryExpectations)
-
def testPageTestWithIncompatibleStory(self):
b = TestBenchmark(story_module.Story(
name='test story',
@@ -219,7 +209,7 @@ def testCanRunOnPlatformReturnTrue(self):
shared_state_class=shared_page_state.SharedPageState))
# We can pass None for both arguments because it defaults to ALL for
# supported platforms, which always returns true.
- self.assertTrue(b._CanRunOnPlatform(None, None))
+ self.assertTrue(b.CanRunOnPlatform(None, None))
def testCanRunOnPlatformReturnFalse(self):
b = TestBenchmark(story_module.Story(
@@ -228,15 +218,4 @@ def testCanRunOnPlatformReturnFalse(self):
b.SUPPORTED_PLATFORMS = [] # pylint: disable=invalid-name
# We can pass None for both arguments because we select no platforms as
# supported, which always returns false.
- self.assertFalse(b._CanRunOnPlatform(None, None))
-
- def testAugmentExpectationsWithFileData(self):
- b = TestBenchmark(story_module.Story(
- name='test_name',
- shared_state_class=shared_page_state.SharedPageState))
- data = ('# results: [ skip ]\n'
- 'crbug.com/123 benchmark_unittest.TestBenchmark/test_name [ Skip ]')
- b.AugmentExpectationsWithFile(data)
- story = mock.MagicMock()
- story.name = 'test_name'
- self.assertTrue(b.expectations.IsStoryDisabled(story))
+ self.assertFalse(b.CanRunOnPlatform(None, None))
diff --git a/telemetry/telemetry/command_line/commands.py b/telemetry/telemetry/command_line/commands.py
index 41c9e6be74..4266f979d1 100644
--- a/telemetry/telemetry/command_line/commands.py
+++ b/telemetry/telemetry/command_line/commands.py
@@ -6,7 +6,6 @@
import json
import logging
import optparse
-import os
import sys
from telemetry import benchmark
@@ -16,23 +15,9 @@
from telemetry.util import matching
-def _SetExpectations(bench, path):
- if path and os.path.exists(path):
- with open(path) as fp:
- bench.AugmentExpectationsWithFile(fp.read())
- return bench.expectations
-
-
-def _IsBenchmarkEnabled(bench, possible_browser, expectations_file):
- b = bench()
- expectations = _SetExpectations(b, expectations_file)
- expectations.SetTags(possible_browser.GetTypExpectationsTags())
- return (
- # Test that the current platform is supported.
- any(t.ShouldDisable(possible_browser.platform, possible_browser)
- for t in b.SUPPORTED_PLATFORMS) and
- # Test that expectations say it is enabled.
- not expectations.IsBenchmarkDisabled())
+def _IsBenchmarkSupported(benchmark_, possible_browser):
+ return benchmark_().CanRunOnPlatform(
+ possible_browser.platform, possible_browser)
def _GetStoriesWithTags(b):
@@ -72,9 +57,9 @@ def _GetStoriesWithTags(b):
def PrintBenchmarkList(
- benchmarks, possible_browser, expectations_file, output_pipe=None,
+ benchmarks, possible_browser, output_pipe=None,
json_pipe=None):
- """ Print benchmarks that are not filtered in the same order of benchmarks in
+ """Print benchmarks that are not filtered in the same order of benchmarks in
the |benchmarks| list.
If json_pipe is available, a json file with the following contents will be
@@ -84,6 +69,7 @@ def PrintBenchmarkList(
"name": ,
"description": ,
"enabled": ,
+ "supported": ,
"story_tags": [
,
...
@@ -93,11 +79,16 @@ def PrintBenchmarkList(
...
]
+ Note that "enabled" and "supported" carry the same value. "enabled" is
+ deprecated since it is misleading since a benchmark could be theoretically
+ supported but have all of its stories disabled with expectations.config file.
+ "supported" simply checks the benchmark's SUPPORTED_PLATFORMS member.
+
Args:
benchmarks: the list of benchmarks to be printed (in the same order of the
list).
possible_browser: the possible_browser instance that's used for checking
- which benchmarks are enabled.
+ which benchmarks are supported.
output_pipe: the stream in which benchmarks are printed on.
json_pipe: if available, also serialize the list into json_pipe.
"""
@@ -118,9 +109,10 @@ def PrintBenchmarkList(
all_benchmark_info = []
for b in benchmarks:
benchmark_info = {'name': b.Name(), 'description': b.Description()}
- benchmark_info['enabled'] = (
+ benchmark_info['supported'] = (
not possible_browser or
- _IsBenchmarkEnabled(b, possible_browser, expectations_file))
+ _IsBenchmarkSupported(b, possible_browser))
+ benchmark_info['enabled'] = benchmark_info['supported']
benchmark_info['stories'] = _GetStoriesWithTags(b)
all_benchmark_info.append(benchmark_info)
@@ -131,19 +123,19 @@ def PrintBenchmarkList(
# Sort the benchmarks by benchmark name.
all_benchmark_info.sort(key=lambda b: b['name'])
- enabled = [b for b in all_benchmark_info if b['enabled']]
- if enabled:
+ supported = [b for b in all_benchmark_info if b['supported']]
+ if supported:
print >> output_pipe, 'Available benchmarks %sare:' % (
'for %s ' % possible_browser.browser_type if possible_browser else '')
- for b in enabled:
+ for b in supported:
print >> output_pipe, format_string % (b['name'], b['description'])
- disabled = [b for b in all_benchmark_info if not b['enabled']]
- if disabled:
+ not_supported = [b for b in all_benchmark_info if not b['supported']]
+ if not_supported:
print >> output_pipe, (
- '\nDisabled benchmarks for %s are (force run with -d):' %
+ '\nNot supported benchmarks for %s are (force run with -d):' %
possible_browser.browser_type)
- for b in disabled:
+ for b in not_supported:
print >> output_pipe, format_string % (b['name'], b['description'])
print >> output_pipe, (
@@ -171,11 +163,6 @@ def CreateParser(cls):
@classmethod
def ProcessCommandLineArgs(cls, parser, options, environment):
- if environment.expectations_files:
- assert len(environment.expectations_files) == 1
- expectations_file = environment.expectations_files[0]
- else:
- expectations_file = None
if not options.positional_args:
options.benchmarks = environment.GetBenchmarks()
elif len(options.positional_args) == 1:
@@ -183,7 +170,6 @@ def ProcessCommandLineArgs(cls, parser, options, environment):
options.positional_args[0], environment.GetBenchmarks())
else:
parser.error('Must provide at most one benchmark name.')
- cls._expectations_file = expectations_file
def Run(self, options):
# Set at least log info level for List command.
@@ -194,11 +180,9 @@ def Run(self, options):
if options.json_filename:
with open(options.json_filename, 'w') as json_out:
PrintBenchmarkList(options.benchmarks, possible_browser,
- self._expectations_file,
json_pipe=json_out)
else:
- PrintBenchmarkList(options.benchmarks, possible_browser,
- self._expectations_file)
+ PrintBenchmarkList(options.benchmarks, possible_browser)
return 0
@@ -234,16 +218,10 @@ def AddCommandLineArgs(cls, parser, args, environment):
@classmethod
def ProcessCommandLineArgs(cls, parser, options, environment):
all_benchmarks = environment.GetBenchmarks()
- if environment.expectations_files:
- assert len(environment.expectations_files) == 1
- expectations_file = environment.expectations_files[0]
- else:
- expectations_file = None
if not options.positional_args:
possible_browser = (browser_finder.FindBrowser(options)
if options.browser_type else None)
- PrintBenchmarkList(
- all_benchmarks, possible_browser, expectations_file)
+ PrintBenchmarkList(all_benchmarks, possible_browser)
parser.error('missing required argument: benchmark_name')
benchmark_name = options.positional_args[0]
@@ -253,8 +231,7 @@ def ProcessCommandLineArgs(cls, parser, options, environment):
all_benchmarks, benchmark_name, lambda x: x.Name())
if most_likely_matched_benchmarks:
print >> sys.stderr, 'Do you mean any of those benchmarks below?'
- PrintBenchmarkList(most_likely_matched_benchmarks, None,
- expectations_file, sys.stderr)
+ PrintBenchmarkList(most_likely_matched_benchmarks, None, sys.stderr)
parser.error('no such benchmark: %s' % benchmark_name)
if len(options.positional_args) > 1:
@@ -264,15 +241,13 @@ def ProcessCommandLineArgs(cls, parser, options, environment):
assert issubclass(benchmark_class,
benchmark.Benchmark), ('Trying to run a non-Benchmark?!')
- story_runner.ProcessCommandLineArgs(parser, options)
+ story_runner.ProcessCommandLineArgs(parser, options, environment)
benchmark_class.ProcessCommandLineArgs(parser, options)
cls._benchmark = benchmark_class
- cls._expectations_path = expectations_file
def Run(self, options):
b = self._benchmark()
- _SetExpectations(b, self._expectations_path)
return min(255, b.Run(options))
diff --git a/telemetry/telemetry/command_line/commands_unittest.py b/telemetry/telemetry/command_line/commands_unittest.py
index c881d65654..70d9f42965 100644
--- a/telemetry/telemetry/command_line/commands_unittest.py
+++ b/telemetry/telemetry/command_line/commands_unittest.py
@@ -3,8 +3,6 @@
# found in the LICENSE file.
import json
-import os
-import tempfile
import StringIO
import unittest
@@ -30,15 +28,18 @@ def Name(cls):
return 'BenchmarkFoo'
-class BenchmarkBar(benchmark.Benchmark):
- """Benchmark bar for testing."""
+class BenchmarkDisabled(benchmark.Benchmark):
+ """Benchmark disabled for testing."""
+
+ # An empty list means that this benchmark cannot run anywhere.
+ SUPPORTED_PLATFORMS = []
def page_set(self):
return story_module.StorySet()
@classmethod
def Name(cls):
- return 'BenchmarkBar'
+ return 'BenchmarkDisabled'
class PrintBenchmarkListTests(unittest.TestCase):
@@ -52,11 +53,10 @@ def setUp(self):
def testPrintBenchmarkListWithNoDisabledBenchmark(self):
expected_printed_stream = (
'Available benchmarks for TestBrowser are:\n'
- ' BenchmarkBar Benchmark bar for testing.\n'
' BenchmarkFoo Benchmark foo for testing.\n'
'Pass --browser to list benchmarks for another browser.\n\n')
- commands.PrintBenchmarkList([BenchmarkBar, BenchmarkFoo],
- self._mock_possible_browser, None,
+ commands.PrintBenchmarkList([BenchmarkFoo],
+ self._mock_possible_browser,
self._stream)
self.assertEquals(expected_printed_stream, self._stream.getvalue())
@@ -64,39 +64,27 @@ def testPrintBenchmarkListWithNoDisabledBenchmark(self):
def testPrintBenchmarkListWithOneDisabledBenchmark(self):
expected_printed_stream = (
'Available benchmarks for TestBrowser are:\n'
- ' BenchmarkFoo Benchmark foo for testing.\n'
+ ' BenchmarkFoo Benchmark foo for testing.\n'
'\n'
- 'Disabled benchmarks for TestBrowser are (force run with -d):\n'
- ' BenchmarkBar Benchmark bar for testing.\n'
+ 'Not supported benchmarks for TestBrowser are (force run with -d):\n'
+ ' BenchmarkDisabled Benchmark disabled for testing.\n'
'Pass --browser to list benchmarks for another browser.\n\n')
- expectations_file_contents = (
- '# tags: [ All ]\n'
- '# results: [ Skip ]\n'
- 'crbug.com/123 [ All ] BenchmarkBar* [ Skip ]\n'
- )
-
- expectations_file = tempfile.NamedTemporaryFile(bufsize=0, delete=False)
with mock.patch.object(
self._mock_possible_browser, 'GetTypExpectationsTags',
return_value=['All']):
- try:
- expectations_file.write(expectations_file_contents)
- expectations_file.close()
- commands.PrintBenchmarkList([BenchmarkFoo, BenchmarkBar],
- self._mock_possible_browser,
- expectations_file.name,
- self._stream)
- self.assertEquals(expected_printed_stream, self._stream.getvalue())
- finally:
- os.remove(expectations_file.name)
+ commands.PrintBenchmarkList([BenchmarkFoo, BenchmarkDisabled],
+ self._mock_possible_browser,
+ self._stream)
+ self.assertEquals(expected_printed_stream, self._stream.getvalue())
def testPrintBenchmarkListInJSON(self):
expected_json_stream = json.dumps(
- sorted([
+ [
{'name': BenchmarkFoo.Name(),
'description': BenchmarkFoo.Description(),
'enabled': True,
+ 'supported': True,
'stories': [
{
'name': 'dummy_page',
@@ -106,27 +94,9 @@ def testPrintBenchmarkListInJSON(self):
]
}
]
- },
- {'name': BenchmarkBar.Name(),
- 'description': BenchmarkBar.Description(),
- 'enabled': False,
- 'stories': []}], key=lambda b: b['name']),
- indent=4, sort_keys=True, separators=(',', ': '))
-
- expectations_file_contents = (
- '# results: [ Skip ]\n'
- 'crbug.com/123 BenchmarkBar/* [ Skip ]\n'
- )
-
- expectations_file = tempfile.NamedTemporaryFile(bufsize=0, delete=False)
- try:
- expectations_file.write(expectations_file_contents)
- expectations_file.close()
- commands.PrintBenchmarkList([BenchmarkFoo, BenchmarkBar],
- self._mock_possible_browser,
- expectations_file.name,
- self._stream, self._json_stream)
- self.assertEquals(expected_json_stream, self._json_stream.getvalue())
+ }], indent=4, sort_keys=True, separators=(',', ': '))
- finally:
- os.remove(expectations_file.name)
+ commands.PrintBenchmarkList([BenchmarkFoo],
+ self._mock_possible_browser,
+ json_pipe=self._json_stream)
+ self.assertEquals(expected_json_stream, self._json_stream.getvalue())
diff --git a/telemetry/telemetry/command_line/parser_unittest.py b/telemetry/telemetry/command_line/parser_unittest.py
index 952b0a7149..2588271956 100644
--- a/telemetry/telemetry/command_line/parser_unittest.py
+++ b/telemetry/telemetry/command_line/parser_unittest.py
@@ -4,15 +4,13 @@
import argparse
import optparse
-import os
import sys
import unittest
import mock
from telemetry.command_line import parser
-from telemetry.core import util
-from telemetry import decorators
+from telemetry import benchmark
from telemetry import project_config
@@ -24,13 +22,20 @@ class ParserError(Exception):
pass
+class ExampleBenchmark(benchmark.Benchmark):
+ @classmethod
+ def Name(cls):
+ return 'example_benchmark'
+
+
class ParseArgsTests(unittest.TestCase):
def setUp(self):
# TODO(crbug.com/981349): Ideally parsing args should not have any side
- # effects; for now we need to mock out calls to set up logging and binary
- # manager.
+ # effects; for now we need to mock out calls to set up logging, binary
+ # manager, and browser finding logic.
mock.patch('telemetry.command_line.parser.logging').start()
mock.patch('telemetry.command_line.parser.binary_manager').start()
+ mock.patch('telemetry.command_line.commands.browser_finder').start()
mock.patch.object(
argparse.ArgumentParser, 'exit', side_effect=ParserExit).start()
@@ -41,104 +46,116 @@ def setUp(self):
self._optparse_error = mock.patch.object(
optparse.OptionParser, 'error', side_effect=ParserError).start()
- examples_dir = os.path.join(util.GetTelemetryDir(), 'examples')
- self.config = project_config.ProjectConfig(
- top_level_dir=examples_dir,
- benchmark_dirs=[os.path.join(examples_dir, 'benchmarks')])
+ self.benchmarks = [ExampleBenchmark]
+ def find_by_name(name):
+ return next((b for b in self.benchmarks if b.Name() == name), None)
+
+ self.mock_config = mock.Mock(spec=project_config.ProjectConfig)
+ self.mock_config.GetBenchmarks.return_value = self.benchmarks
+ self.mock_config.GetBenchmarkByName.side_effect = find_by_name
+ self.mock_config.expectations_files = []
def tearDown(self):
mock.patch.stopall()
def testHelpFlag(self):
with self.assertRaises(ParserExit):
- parser.ParseArgs(self.config, ['--help'])
+ parser.ParseArgs(self.mock_config, ['--help'])
self.assertIn('Command line tool to run performance benchmarks.',
sys.stdout.getvalue())
def testHelpCommand(self):
with self.assertRaises(ParserExit):
- parser.ParseArgs(self.config, ['help', 'run'])
+ parser.ParseArgs(self.mock_config, ['help', 'run'])
self.assertIn('To get help about a command use', sys.stdout.getvalue())
def testRunHelp(self):
with self.assertRaises(ParserExit):
- parser.ParseArgs(self.config, ['run', '--help'])
+ parser.ParseArgs(self.mock_config, ['run', '--help'])
self.assertIn('--browser=BROWSER_TYPE', sys.stdout.getvalue())
def testRunBenchmarkHelp(self):
with self.assertRaises(ParserExit):
- parser.ParseArgs(self.config, ['tbm_sample.tbm_sample', '--help'])
+ parser.ParseArgs(self.mock_config, ['example_benchmark', '--help'])
self.assertIn('--browser=BROWSER_TYPE', sys.stdout.getvalue())
def testListBenchmarks(self):
- args = parser.ParseArgs(self.config, ['list', '--json', 'output.json'])
+ args = parser.ParseArgs(self.mock_config, ['list', '--json', 'output.json'])
self.assertEqual(args.command, 'list')
self.assertEqual(args.json_filename, 'output.json')
def testRunBenchmark(self):
- args = parser.ParseArgs(self.config, [
- 'run', 'tbm_sample.tbm_sample', '--browser=stable'])
+ args = parser.ParseArgs(self.mock_config, [
+ 'run', 'example_benchmark', '--browser=stable'])
self.assertEqual(args.command, 'run')
- self.assertEqual(args.positional_args, ['tbm_sample.tbm_sample'])
+ self.assertEqual(args.positional_args, ['example_benchmark'])
self.assertEqual(args.browser_type, 'stable')
def testRunCommandIsDefault(self):
- args = parser.ParseArgs(self.config, [
- 'tbm_sample.tbm_sample', '--browser', 'stable'])
+ args = parser.ParseArgs(self.mock_config, [
+ 'example_benchmark', '--browser', 'stable'])
self.assertEqual(args.command, 'run')
- self.assertEqual(args.positional_args, ['tbm_sample.tbm_sample'])
+ self.assertEqual(args.positional_args, ['example_benchmark'])
self.assertEqual(args.browser_type, 'stable')
def testRunCommandBenchmarkNameAtEnd(self):
- args = parser.ParseArgs(self.config, [
- '--browser', 'stable', 'tbm_sample.tbm_sample'])
+ args = parser.ParseArgs(self.mock_config, [
+ '--browser', 'stable', 'example_benchmark'])
self.assertEqual(args.command, 'run')
- self.assertEqual(args.positional_args, ['tbm_sample.tbm_sample'])
+ self.assertEqual(args.positional_args, ['example_benchmark'])
self.assertEqual(args.browser_type, 'stable')
def testRunBenchmark_UnknownBenchmark(self):
with self.assertRaises(ParserError):
- parser.ParseArgs(self.config, [
+ parser.ParseArgs(self.mock_config, [
'run', 'foo.benchmark', '--browser=stable'])
self._optparse_error.assert_called_with(
'no such benchmark: foo.benchmark')
- # TODO(crbug.com/799950): This command attempts to find benchmarks available
- # for the given --browser; which in turn causes an attempt to download
- # browser binaries from cloud storage. But this is not allowed in ChromeOs.
- # Re-enable when listing benchmarks and parsing args does not have any such
- # side effects.
- @decorators.Disabled('chromeos')
def testRunBenchmark_MissingBenchmark(self):
with self.assertRaises(ParserError):
- parser.ParseArgs(self.config, ['run', '--browser=stable'])
+ parser.ParseArgs(self.mock_config, ['run', '--browser=stable'])
self._optparse_error.assert_called_with(
'missing required argument: benchmark_name')
def testRunBenchmark_TooManyArgs(self):
with self.assertRaises(ParserError):
- parser.ParseArgs(self.config, [
- 'run', 'tbm_sample.tbm_sample', 'other', '--browser=beta', 'args'])
+ parser.ParseArgs(self.mock_config, [
+ 'run', 'example_benchmark', 'other', '--browser=beta', 'args'])
self._optparse_error.assert_called_with(
'unrecognized arguments: other args')
def testRunBenchmark_UnknownArg(self):
with self.assertRaises(ParserError):
- parser.ParseArgs(self.config, [
- 'run', 'tbm_sample.tbm_sample', '--non-existent-option'])
+ parser.ParseArgs(self.mock_config, [
+ 'run', 'example_benchmark', '--non-existent-option'])
self._optparse_error.assert_called_with(
'no such option: --non-existent-option')
+ def testRunBenchmark_WithCustomOptionDefaults(self):
+ class BenchmarkWithCustomDefaults(benchmark.Benchmark):
+ options = {'upload_results': True}
+
+ @classmethod
+ def Name(cls):
+ return 'custom_benchmark'
+
+ self.benchmarks.append(BenchmarkWithCustomDefaults)
+ args = parser.ParseArgs(self.mock_config, [
+ 'custom_benchmark', '--browser', 'stable'])
+ self.assertTrue(args.upload_results)
+ self.assertEqual(args.positional_args, ['custom_benchmark'])
+
def testRunBenchmark_ExternalOption(self):
my_parser = argparse.ArgumentParser(add_help=False)
my_parser.add_argument('--extra-special-option', action='store_true')
args = parser.ParseArgs(
- self.config,
- ['run', 'tbm_sample.tbm_sample', '--extra-special-option'],
+ self.mock_config,
+ ['run', 'example_benchmark', '--extra-special-option'],
results_arg_parser=my_parser)
self.assertEqual(args.command, 'run')
- self.assertEqual(args.positional_args, ['tbm_sample.tbm_sample'])
+ self.assertEqual(args.positional_args, ['example_benchmark'])
self.assertTrue(args.extra_special_option)
def testListBenchmarks_NoExternalOptions(self):
@@ -148,7 +165,7 @@ def testListBenchmarks_NoExternalOptions(self):
with self.assertRaises(ParserError):
# Listing benchmarks does not require the external results processor.
parser.ParseArgs(
- self.config, ['list', '--extra-special-option'],
+ self.mock_config, ['list', '--extra-special-option'],
results_arg_parser=my_parser)
self._optparse_error.assert_called_with(
'no such option: --extra-special-option')
@@ -160,7 +177,7 @@ def testRunBenchmark_WithExternalHelp(self):
with self.assertRaises(ParserExit):
parser.ParseArgs(
- self.config, ['run', '--help'], results_arg_parser=my_parser)
+ self.mock_config, ['run', '--help'], results_arg_parser=my_parser)
self.assertIn('--browser=BROWSER_TYPE', sys.stdout.getvalue())
self.assertIn('--extra-special-option', sys.stdout.getvalue())
@@ -171,6 +188,6 @@ def testListBenchmarks_WithExternalHelp(self):
with self.assertRaises(ParserExit):
parser.ParseArgs(
- self.config, ['list', '--help'], results_arg_parser=my_parser)
+ self.mock_config, ['list', '--help'], results_arg_parser=my_parser)
self.assertIn('--browser=BROWSER_TYPE', sys.stdout.getvalue())
self.assertNotIn('--extra-special-option', sys.stdout.getvalue())
diff --git a/telemetry/telemetry/decorators.py b/telemetry/telemetry/decorators.py
index 8b51e85f56..56063037b5 100644
--- a/telemetry/telemetry/decorators.py
+++ b/telemetry/telemetry/decorators.py
@@ -106,12 +106,14 @@ def Disabled(*args):
"""Decorator for disabling tests/benchmarks.
If args are given, the test will be disabled if ANY of the args match the
- browser type, OS name or OS version:
- @Disabled('canary') # Disabled for canary browsers
- @Disabled('win') # Disabled on Windows.
- @Disabled('win', 'linux') # Disabled on both Windows and Linux.
- @Disabled('mavericks') # Disabled on Mac Mavericks (10.9) only.
- @Disabled('all') # Unconditionally disabled.
+ browser type, OS name, OS version, or any tags returned by a PossibleBrowser's
+ GetTypExpectationsTags():
+ @Disabled('canary') # Disabled for canary browsers
+ @Disabled('win') # Disabled on Windows.
+ @Disabled('win', 'linux') # Disabled on both Windows and Linux.
+ @Disabled('mavericks') # Disabled on Mac Mavericks (10.9) only.
+ @Disabled('all') # Unconditionally disabled.
+ @Disabled('chromeos-local') # Disabled in ChromeOS local mode.
"""
def _Disabled(func):
@@ -140,12 +142,14 @@ def _Disabled(func):
def Enabled(*args):
"""Decorator for enabling tests/benchmarks.
- The test will be enabled if ANY of the args match the browser type, OS name
- or OS version:
- @Enabled('canary') # Enabled only for canary browsers
- @Enabled('win') # Enabled only on Windows.
- @Enabled('win', 'linux') # Enabled only on Windows or Linux.
- @Enabled('mavericks') # Enabled only on Mac Mavericks (10.9).
+ The test will be enabled if ANY of the args match the browser type, OS name,
+ OS version, or any tags returned by a PossibleBrowser's
+ GetTypExpectationsTags():
+ @Enabled('canary') # Enabled only for canary browsers
+ @Enabled('win') # Enabled only on Windows.
+ @Enabled('win', 'linux') # Enabled only on Windows or Linux.
+ @Enabled('mavericks') # Enabled only on Mac Mavericks (10.9).
+ @Enabled('chromeos-local') # Enabled only in ChromeOS local mode.
"""
def _Enabled(func):
@@ -378,4 +382,5 @@ def _PlatformAttributes(possible_browser):
if attribute != 'reference':
ref_attributes.append('%s-reference' % attribute)
attributes.extend(ref_attributes)
+ attributes.extend(possible_browser.GetTypExpectationsTags())
return attributes
diff --git a/telemetry/telemetry/decorators_unittest.py b/telemetry/telemetry/decorators_unittest.py
index adfa9a8b0c..3f202784b9 100644
--- a/telemetry/telemetry/decorators_unittest.py
+++ b/telemetry/telemetry/decorators_unittest.py
@@ -187,6 +187,7 @@ def setUp(self):
self.possible_browser.browser_type = 'browser_type'
self.possible_browser.platform = fake_platform
self.possible_browser.supports_tab_control = False
+ self.possible_browser.GetTypExpectationsTags.return_value = []
def testEnabledStrings(self):
test = FakeTest()
@@ -277,6 +278,10 @@ def testDisabledStrings(self):
'another_os_version_name-reference'])
self.assertFalse(decorators.ShouldSkip(test, self.possible_browser)[0])
+ self.possible_browser.GetTypExpectationsTags.return_value = ['typ_value']
+ test.SetDisabledStrings(['typ_value'])
+ self.assertTrue(decorators.ShouldSkip(test, self.possible_browser)[0])
+
def testReferenceEnabledStrings(self):
self.possible_browser.browser_type = 'reference'
test = FakeTest()
@@ -303,6 +308,11 @@ def testReferenceEnabledStrings(self):
'another_os_version_name-reference'])
self.assertTrue(decorators.ShouldSkip(test, self.possible_browser)[0])
+ test.SetEnabledStrings(['typ_value'])
+ self.assertTrue(decorators.ShouldSkip(test, self.possible_browser)[0])
+ self.possible_browser.GetTypExpectationsTags.return_value = ['typ_value']
+ self.assertFalse(decorators.ShouldSkip(test, self.possible_browser)[0])
+
def testReferenceDisabledStrings(self):
self.possible_browser.browser_type = 'reference'
test = FakeTest()
diff --git a/telemetry/telemetry/internal/backends/chrome/chrome_startup_args.py b/telemetry/telemetry/internal/backends/chrome/chrome_startup_args.py
index 5d8f8f2a6a..595eb7d82f 100644
--- a/telemetry/telemetry/internal/backends/chrome/chrome_startup_args.py
+++ b/telemetry/telemetry/internal/backends/chrome/chrome_startup_args.py
@@ -100,8 +100,13 @@ def GetReplayArgs(network_backend, supports_spki_list=True):
# Send all browser traffic (including requests to 127.0.0.1 and localhost) to
# ts_proxy_server.
+ # The proxy should NOT be set to "localhost", otherwise Chrome will first
+ # attempt to use the IPv6 version (::1) before falling back to IPv4. This
+ # causes issues if the IPv4 port we got randomly assigned on the device is
+ # also being used in IPv6 by some other process. See
+ # https://crbug.com/1005971 for more information.
proxy_port = network_backend.forwarder.remote_port
- args.append('--proxy-server=socks://localhost:%s' % proxy_port)
+ args.append('--proxy-server=socks://127.0.0.1:%s' % proxy_port)
args.append('--proxy-bypass-list=<-loopback>')
if not network_backend.use_live_traffic:
diff --git a/telemetry/telemetry/internal/backends/chrome/chrome_startup_args_unittest.py b/telemetry/telemetry/internal/backends/chrome/chrome_startup_args_unittest.py
index 411e2b4c95..02a63c463c 100644
--- a/telemetry/telemetry/internal/backends/chrome/chrome_startup_args_unittest.py
+++ b/telemetry/telemetry/internal/backends/chrome/chrome_startup_args_unittest.py
@@ -69,7 +69,7 @@ def testReplayArgsBasic(self):
network_backend.forwarder.remote_port = 789
expected_args = [
- '--proxy-server=socks://localhost:789',
+ '--proxy-server=socks://127.0.0.1:789',
'--proxy-bypass-list=<-loopback>',
'--ignore-certificate-errors-spki-list='
'PhrPvGIaAMmd29hj8BCZOq096yj7uMpRNHpn5PDxI6I=']
@@ -84,7 +84,7 @@ def testReplayArgsNoSpkiSupport(self):
network_backend.forwarder.remote_port = 789
expected_args = [
- '--proxy-server=socks://localhost:789',
+ '--proxy-server=socks://127.0.0.1:789',
'--proxy-bypass-list=<-loopback>',
'--ignore-certificate-errors']
self.assertItemsEqual(
@@ -98,7 +98,7 @@ def testReplayArgsUseLiveTrafficWithSpkiSupport(self):
network_backend.forwarder.remote_port = 789
expected_args = [
- '--proxy-server=socks://localhost:789',
+ '--proxy-server=socks://127.0.0.1:789',
'--proxy-bypass-list=<-loopback>']
self.assertItemsEqual(
expected_args,
@@ -112,7 +112,7 @@ def testReplayArgsUseLiveTrafficWithNoSpkiSupport(self):
network_backend.forwarder.remote_port = 123
expected_args = [
- '--proxy-server=socks://localhost:123',
+ '--proxy-server=socks://127.0.0.1:123',
'--proxy-bypass-list=<-loopback>']
self.assertItemsEqual(
expected_args,
diff --git a/telemetry/telemetry/internal/backends/chrome/cros_browser_backend.py b/telemetry/telemetry/internal/backends/chrome/cros_browser_backend.py
index 87105aab10..bf1e26394e 100644
--- a/telemetry/telemetry/internal/backends/chrome/cros_browser_backend.py
+++ b/telemetry/telemetry/internal/backends/chrome/cros_browser_backend.py
@@ -4,10 +4,7 @@
import logging
import os
-import platform
import shutil
-import subprocess
-import sys
import tempfile
import time
@@ -17,14 +14,12 @@
from telemetry.core import exceptions
from telemetry import decorators
from telemetry.internal.backends.chrome import chrome_browser_backend
-from telemetry.internal.backends.chrome import desktop_minidump_finder
+from telemetry.internal.backends.chrome import cros_minidump_symbolizer
+from telemetry.internal.backends.chrome import minidump_finder
from telemetry.internal.backends.chrome import misc_web_contents_backend
-from telemetry.internal.util import binary_manager
from telemetry.internal.util import format_for_logging
-# TODO(https://crbug.com/994274): Move the minidump symbolization code in this
-# class into a separate class.
class CrOSBrowserBackend(chrome_browser_backend.ChromeBrowserBackend):
def __init__(self, cros_platform_backend, browser_options,
browser_directory, profile_directory, is_guest, build_dir):
@@ -73,12 +68,7 @@ def Start(self, startup_args):
# prior to restarting chrome.
self._cri.RmRF(self._GetDevToolsActivePortPath())
- # DesktopMinidumpFinder is meant for Linux/Mac/Windows, but since dumps are
- # pulled off the emulator/device onto the host, and we only support Linux
- # hosts, we can use it as-is.
- # TODO(https://crbug.com/994274): Rename this class when minidump
- # symbolization code is consolidated.
- self._dump_finder = desktop_minidump_finder.DesktopMinidumpFinder(
+ self._dump_finder = minidump_finder.MinidumpFinder(
self.browser.platform.GetOSName(), self.browser.platform.GetArchName())
# Escape all commas in the startup arguments we pass to Chrome
@@ -330,69 +320,6 @@ def _GetStackFromMinidump(self, minidump):
None if the stack could not be retrieved for some reason, otherwise a
string containing the stack trace.
"""
- if platform.system() != 'Linux' and platform.system() != 'Darwin':
- logging.warning('Cannot get stack traces unless running on a Posix host.')
- return None
- if not self._build_dir:
- logging.warning('Cannot get stack traces unless '
- '--chromium-output-directory is specified.')
- return None
-
- os_name = 'linux'
- # TODO(https://crbug.com/994265): Figure out if this works on all host
- # architectures or if we need to create a mapping from Python architectures
- # to Telemetry architectures.
- arch_name = platform.machine()
- stackwalk = binary_manager.FetchPath(
- 'minidump_stackwalk', arch_name, os_name)
- if not stackwalk:
- logging.warning('minidump_stackwalk binary not found.')
- return None
- if not self._dump_finder.MinidumpObtainedFromCrashpad(minidump):
- with open(minidump, 'rb') as infile:
- minidump += '.stripped'
- with open(minidump, 'wb') as outfile:
- outfile.write(''.join(infile.read().partition('MDMP')[1:]))
-
- symbols_path = os.path.join(self._tmp_minidump_dir, 'symbols')
- GenerateBreakpadSymbols(arch_name, os_name,
- symbols_path, self._build_dir)
-
- return subprocess.check_output([stackwalk, minidump, symbols_path],
- stderr=open(os.devnull, 'w'))
-
-
-def GenerateBreakpadSymbols(arch, os_name, symbols_dir, build_dir):
- """Generates Breakpad symbols for the given build directory.
-
- Args:
- arch: the architecture of the host, used to find dependencies
- os_name: the OS of the host, used to find dependencies
- symbols_dir: the directory where Breakpad symbols will be dumped to
- build_dir: the directory containing Chromium build artifacts to generate
- symbols from.
- """
- logging.info('Dumping breakpad symbols.')
- generate_breakpad_symbols_command = binary_manager.FetchPath(
- 'generate_breakpad_symbols', arch, os_name)
- if not generate_breakpad_symbols_command:
- logging.warning('generate_breakpad_symbols binary not found')
- return
-
- cmd = [
- sys.executable,
- generate_breakpad_symbols_command,
- '--binary=%s' % os.path.join(build_dir, 'chrome'),
- '--symbols-dir=%s' % symbols_dir,
- '--build-dir=%s' % build_dir,
- '--platform=chromeos',
- ]
-
- try:
- subprocess.check_output(cmd)
- except subprocess.CalledProcessError as e:
- logging.error(e.output)
- logging.warning('Failed to execute "%s"', ' '.join(cmd))
- return
-
-
+ dump_symbolizer = cros_minidump_symbolizer.CrOSMinidumpSymbolizer(
+ self._dump_finder, self._build_dir)
+ return dump_symbolizer.SymbolizeMinidump(minidump)
diff --git a/telemetry/telemetry/internal/backends/chrome/cros_minidump_symbolizer.py b/telemetry/telemetry/internal/backends/chrome/cros_minidump_symbolizer.py
new file mode 100644
index 0000000000..506b2ace01
--- /dev/null
+++ b/telemetry/telemetry/internal/backends/chrome/cros_minidump_symbolizer.py
@@ -0,0 +1,46 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import platform
+
+from telemetry.internal.backends.chrome import minidump_symbolizer
+
+
+class CrOSMinidumpSymbolizer(minidump_symbolizer.MinidumpSymbolizer):
+ def __init__(self, dump_finder, build_dir):
+ """Class for handling all minidump symbolizing code on ChromeOS.
+
+ Args:
+ dump_finder: The minidump_finder.MinidumpFinder instance that is being
+ used to find minidumps for the test.
+ build_dir: The directory containing Chromium build artifacts to generate
+ symbols from.
+ """
+ super(CrOSMinidumpSymbolizer, self).__init__(
+ 'linux', platform.machine(), dump_finder, build_dir)
+
+ def SymbolizeMinidump(self, minidump):
+ if platform.system() != 'Linux' and platform.system() != 'Darwin':
+ logging.warning('Cannot get stack traces unless running on a Posix host.')
+ return None
+ if not self._build_dir:
+ logging.warning('Cannot get stack traces unless '
+ '--chromium-output-directory is specified.')
+ return None
+ return super(CrOSMinidumpSymbolizer, self).SymbolizeMinidump(minidump)
+
+ def GetSymbolBinaries(self, minidump):
+ """Returns a list of paths to binaries where symbols may be located.
+
+ Args:
+ minidump: The path to the minidump being symbolized.
+ """
+ del minidump # unused.
+ return [os.path.join(self._build_dir, 'chrome')]
+
+ def GetBreakpadPlatformOverride(self):
+ """Returns the platform to be passed to generate_breakpad_symbols."""
+ return 'chromeos'
diff --git a/telemetry/telemetry/internal/backends/chrome/desktop_browser_backend.py b/telemetry/telemetry/internal/backends/chrome/desktop_browser_backend.py
index 9c9a1d22e8..d364c11909 100644
--- a/telemetry/telemetry/internal/backends/chrome/desktop_browser_backend.py
+++ b/telemetry/telemetry/internal/backends/chrome/desktop_browser_backend.py
@@ -19,80 +19,16 @@
from py_utils import cloud_storage
from py_utils import exc_util
-from telemetry.internal.util import binary_manager
from telemetry.core import exceptions
from telemetry.internal.backends.chrome import chrome_browser_backend
-from telemetry.internal.backends.chrome import desktop_minidump_finder
+from telemetry.internal.backends.chrome import minidump_finder
+from telemetry.internal.backends.chrome import desktop_minidump_symbolizer
from telemetry.internal.util import format_for_logging
-from telemetry.internal.util import path
DEVTOOLS_ACTIVE_PORT_FILE = 'DevToolsActivePort'
-def GetSymbolBinaries(minidump, arch_name, os_name):
- # Returns binary file where symbols are located.
- minidump_dump = binary_manager.FetchPath('minidump_dump', arch_name, os_name)
- assert minidump_dump
-
- symbol_binaries = []
-
- minidump_cmd = [minidump_dump, minidump]
- try:
- with open(os.devnull, 'wb') as dev_null:
- minidump_output = subprocess.check_output(minidump_cmd, stderr=dev_null)
- except subprocess.CalledProcessError as e:
- # For some reason minidump_dump always fails despite successful dumping.
- minidump_output = e.output
-
- minidump_binary_re = re.compile(r'\W+\(code_file\)\W+=\W\"(.*)\"')
- for minidump_line in minidump_output.splitlines():
- line_match = minidump_binary_re.match(minidump_line)
- if line_match:
- binary_path = line_match.group(1)
- if not os.path.isfile(binary_path):
- continue
-
- # Filter out system binaries.
- if (binary_path.startswith('/usr/lib/') or
- binary_path.startswith('/System/Library/') or
- binary_path.startswith('/lib/')):
- continue
-
- # Filter out other binary file types which have no symbols.
- if (binary_path.endswith('.pak') or
- binary_path.endswith('.bin') or
- binary_path.endswith('.dat') or
- binary_path.endswith('.ttf')):
- continue
-
- symbol_binaries.append(binary_path)
- return symbol_binaries
-
-
-def GenerateBreakpadSymbols(minidump, arch, os_name, symbols_dir, browser_dir):
- logging.info('Dumping breakpad symbols.')
- generate_breakpad_symbols_command = binary_manager.FetchPath(
- 'generate_breakpad_symbols', arch, os_name)
- if not generate_breakpad_symbols_command:
- return
-
- for binary_path in GetSymbolBinaries(minidump, arch, os_name):
- cmd = [
- sys.executable,
- generate_breakpad_symbols_command,
- '--binary=%s' % binary_path,
- '--symbols-dir=%s' % symbols_dir,
- '--build-dir=%s' % browser_dir,
- ]
-
- try:
- subprocess.check_call(cmd)
- except subprocess.CalledProcessError:
- logging.warning('Failed to execute "%s"', ' '.join(cmd))
- return
-
-
class DesktopBrowserBackend(chrome_browser_backend.ChromeBrowserBackend):
"""The backend for controlling a locally-executed browser instance, on Linux,
Mac or Windows.
@@ -151,35 +87,6 @@ def supports_uploading_logs(self):
def _GetDevToolsActivePortPath(self):
return os.path.join(self.profile_directory, DEVTOOLS_ACTIVE_PORT_FILE)
- def _GetCdbPath(self):
- # cdb.exe might have been co-located with the browser's executable
- # during the build, but that's not a certainty. (This is only done
- # in Chromium builds on the bots, which is why it's not a hard
- # requirement.) See if it's available.
- colocated_cdb = os.path.join(self.browser_directory, 'cdb', 'cdb.exe')
- if path.IsExecutable(colocated_cdb):
- return colocated_cdb
- possible_paths = (
- # Installed copies of the Windows SDK.
- os.path.join('Windows Kits', '*', 'Debuggers', 'x86'),
- os.path.join('Windows Kits', '*', 'Debuggers', 'x64'),
- # Old copies of the Debugging Tools for Windows.
- 'Debugging Tools For Windows',
- 'Debugging Tools For Windows (x86)',
- 'Debugging Tools For Windows (x64)',
- # The hermetic copy of the Windows toolchain in depot_tools.
- os.path.join('win_toolchain', 'vs_files', '*', 'win_sdk',
- 'Debuggers', 'x86'),
- os.path.join('win_toolchain', 'vs_files', '*', 'win_sdk',
- 'Debuggers', 'x64'),
- )
- for possible_path in possible_paths:
- app_path = os.path.join(possible_path, 'cdb.exe')
- app_path = path.FindInstalledWindowsApplication(app_path)
- if app_path:
- return app_path
- return None
-
def _FindDevToolsPortAndTarget(self):
devtools_file_path = self._GetDevToolsActivePortPath()
if not os.path.isfile(devtools_file_path):
@@ -200,7 +107,7 @@ def _FindDevToolsPortAndTarget(self):
def Start(self, startup_args):
assert not self._proc, 'Must call Close() before Start()'
- self._dump_finder = desktop_minidump_finder.DesktopMinidumpFinder(
+ self._dump_finder = minidump_finder.MinidumpFinder(
self.browser.platform.GetOSName(), self.browser.platform.GetArchName())
# macOS displays a blocking crash resume dialog that we need to suppress.
@@ -325,53 +232,11 @@ def _IsExecutableStripped(self):
return False
def _GetStackFromMinidump(self, minidump):
- os_name = self.browser.platform.GetOSName()
- if os_name == 'win':
- cdb = self._GetCdbPath()
- if not cdb:
- logging.warning('cdb.exe not found.')
- return None
- # Move to the thread which triggered the exception (".ecxr"). Then include
- # a description of the exception (".lastevent"). Also include all the
- # threads' stacks ("~*kb30") as well as the ostensibly crashed stack
- # associated with the exception context record ("kb30"). Note that stack
- # dumps, including that for the crashed thread, may not be as precise as
- # the one starting from the exception context record.
- # Specify kb instead of k in order to get four arguments listed, for
- # easier diagnosis from stacks.
- output = subprocess.check_output([cdb, '-y', self.browser_directory,
- '-c', '.ecxr;.lastevent;kb30;~*kb30;q',
- '-z', minidump])
- # The output we care about starts with "Last event:" or possibly
- # other things we haven't seen yet. If we can't find the start of the
- # last event entry, include output from the beginning.
- info_start = 0
- info_start_match = re.search("Last event:", output, re.MULTILINE)
- if info_start_match:
- info_start = info_start_match.start()
- info_end = output.find('quit:')
- return output[info_start:info_end]
-
- arch_name = self.browser.platform.GetArchName()
- stackwalk = binary_manager.FetchPath(
- 'minidump_stackwalk', arch_name, os_name)
- if not stackwalk:
- logging.warning('minidump_stackwalk binary not found.')
- return None
- # We only want this logic on linux platforms that are still using breakpad.
- # See crbug.com/667475
- if not self._dump_finder.MinidumpObtainedFromCrashpad(minidump):
- with open(minidump, 'rb') as infile:
- minidump += '.stripped'
- with open(minidump, 'wb') as outfile:
- outfile.write(''.join(infile.read().partition('MDMP')[1:]))
-
- symbols_path = os.path.join(self._tmp_minidump_dir, 'symbols')
- GenerateBreakpadSymbols(minidump, arch_name, os_name,
- symbols_path, self.browser_directory)
-
- return subprocess.check_output([stackwalk, minidump, symbols_path],
- stderr=open(os.devnull, 'w'))
+ dump_symbolizer = desktop_minidump_symbolizer.DesktopMinidumpSymbolizer(
+ self.browser.platform.GetOSName(),
+ self.browser.platform.GetArchName(),
+ self._dump_finder, self.browser_directory)
+ return dump_symbolizer.SymbolizeMinidump(minidump)
def _UploadMinidumpToCloudStorage(self, minidump_path):
""" Upload minidump_path to cloud storage and return the cloud storage url.
diff --git a/telemetry/telemetry/internal/backends/chrome/desktop_minidump_symbolizer.py b/telemetry/telemetry/internal/backends/chrome/desktop_minidump_symbolizer.py
new file mode 100644
index 0000000000..7924236ee8
--- /dev/null
+++ b/telemetry/telemetry/internal/backends/chrome/desktop_minidump_symbolizer.py
@@ -0,0 +1,137 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import re
+import subprocess
+
+from telemetry.internal.backends.chrome import minidump_symbolizer
+from telemetry.internal.util import binary_manager
+from telemetry.internal.util import path
+
+class DesktopMinidumpSymbolizer(minidump_symbolizer.MinidumpSymbolizer):
+ def __init__(self, os_name, arch_name, dump_finder, build_dir):
+ """Class for handling all minidump symbolizing code on Desktop platforms.
+
+ Args:
+ os_name: The OS of the test machine.
+ arch_name: The arch name of the test machine.
+ dump_finder: The minidump_finder.MinidumpFinder instance that is being
+ used to find minidumps for the test.
+ build_dir: The directory containing Chromium build artifacts to generate
+ symbols from.
+ """
+ super(DesktopMinidumpSymbolizer, self).__init__(
+ os_name, arch_name, dump_finder, build_dir)
+
+ def SymbolizeMinidump(self, minidump):
+ """Gets the stack trace from the given minidump.
+
+ Args:
+ minidump: the path to the minidump on disk
+
+ Returns:
+ None if the stack could not be retrieved for some reason, otherwise a
+ string containing the stack trace.
+ """
+ if self._os_name == 'win':
+ cdb = self._GetCdbPath()
+ if not cdb:
+ logging.warning('cdb.exe not found.')
+ return None
+ # Move to the thread which triggered the exception (".ecxr"). Then include
+ # a description of the exception (".lastevent"). Also include all the
+ # threads' stacks ("~*kb30") as well as the ostensibly crashed stack
+ # associated with the exception context record ("kb30"). Note that stack
+ # dumps, including that for the crashed thread, may not be as precise as
+ # the one starting from the exception context record.
+ # Specify kb instead of k in order to get four arguments listed, for
+ # easier diagnosis from stacks.
+ output = subprocess.check_output([cdb, '-y', self._build_dir,
+ '-c', '.ecxr;.lastevent;kb30;~*kb30;q',
+ '-z', minidump])
+ # The output we care about starts with "Last event:" or possibly
+ # other things we haven't seen yet. If we can't find the start of the
+ # last event entry, include output from the beginning.
+ info_start = 0
+ info_start_match = re.search("Last event:", output, re.MULTILINE)
+ if info_start_match:
+ info_start = info_start_match.start()
+ info_end = output.find('quit:')
+ return output[info_start:info_end]
+ return super(DesktopMinidumpSymbolizer, self).SymbolizeMinidump(minidump)
+
+ def GetSymbolBinaries(self, minidump):
+ """Returns a list of paths to binaries where symbols may be located.
+
+ Args:
+ minidump: The path to the minidump being symbolized.
+ """
+ minidump_dump = binary_manager.FetchPath(
+ 'minidump_dump', self._arch_name, self._os_name)
+ assert minidump_dump
+
+ symbol_binaries = []
+
+ minidump_cmd = [minidump_dump, minidump]
+ try:
+ with open(os.devnull, 'wb') as dev_null:
+ minidump_output = subprocess.check_output(minidump_cmd, stderr=dev_null)
+ except subprocess.CalledProcessError as e:
+ # For some reason minidump_dump always fails despite successful dumping.
+ minidump_output = e.output
+
+ minidump_binary_re = re.compile(r'\W+\(code_file\)\W+=\W\"(.*)\"')
+ for minidump_line in minidump_output.splitlines():
+ line_match = minidump_binary_re.match(minidump_line)
+ if line_match:
+ binary_path = line_match.group(1)
+ if not os.path.isfile(binary_path):
+ continue
+
+ # Filter out system binaries.
+ if (binary_path.startswith('/usr/lib/') or
+ binary_path.startswith('/System/Library/') or
+ binary_path.startswith('/lib/')):
+ continue
+
+ # Filter out other binary file types which have no symbols.
+ if (binary_path.endswith('.pak') or
+ binary_path.endswith('.bin') or
+ binary_path.endswith('.dat') or
+ binary_path.endswith('.ttf')):
+ continue
+
+ symbol_binaries.append(binary_path)
+ return symbol_binaries
+
+ def _GetCdbPath(self):
+ # cdb.exe might have been co-located with the browser's executable
+ # during the build, but that's not a certainty. (This is only done
+ # in Chromium builds on the bots, which is why it's not a hard
+ # requirement.) See if it's available.
+ colocated_cdb = os.path.join(self._build_dir, 'cdb', 'cdb.exe')
+ if path.IsExecutable(colocated_cdb):
+ return colocated_cdb
+ possible_paths = (
+ # Installed copies of the Windows SDK.
+ os.path.join('Windows Kits', '*', 'Debuggers', 'x86'),
+ os.path.join('Windows Kits', '*', 'Debuggers', 'x64'),
+ # Old copies of the Debugging Tools for Windows.
+ 'Debugging Tools For Windows',
+ 'Debugging Tools For Windows (x86)',
+ 'Debugging Tools For Windows (x64)',
+ # The hermetic copy of the Windows toolchain in depot_tools.
+ os.path.join('win_toolchain', 'vs_files', '*', 'win_sdk',
+ 'Debuggers', 'x86'),
+ os.path.join('win_toolchain', 'vs_files', '*', 'win_sdk',
+ 'Debuggers', 'x64'),
+ )
+ for possible_path in possible_paths:
+ app_path = os.path.join(possible_path, 'cdb.exe')
+ app_path = path.FindInstalledWindowsApplication(app_path)
+ if app_path:
+ return app_path
+ return None
diff --git a/telemetry/telemetry/internal/backends/chrome/desktop_minidump_finder.py b/telemetry/telemetry/internal/backends/chrome/minidump_finder.py
similarity index 97%
rename from telemetry/telemetry/internal/backends/chrome/desktop_minidump_finder.py
rename to telemetry/telemetry/internal/backends/chrome/minidump_finder.py
index 505302e687..bec18674d3 100644
--- a/telemetry/telemetry/internal/backends/chrome/desktop_minidump_finder.py
+++ b/telemetry/telemetry/internal/backends/chrome/minidump_finder.py
@@ -1,4 +1,4 @@
-# Copyright 2013 The Chromium Authors. All rights reserved.
+# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -22,8 +22,8 @@ def _ParseCrashpadDateTime(date_time_str):
return datetime.datetime.strptime(date_time_str, '%Y-%m-%d %H:%M:%S')
-class DesktopMinidumpFinder(object):
- """Handles finding Crashpad/Breakpad minidumps on desktop platforms.
+class MinidumpFinder(object):
+ """Handles finding Crashpad/Breakpad minidumps.
In addition to whatever data is expected to be returned, most public methods
also return a list of strings. These strings are what would normally be
@@ -32,7 +32,7 @@ class DesktopMinidumpFinder(object):
BrowserBackend.GetRecentMinidumpPathWithTimeout().
"""
def __init__(self, os_name, arch_name):
- super(DesktopMinidumpFinder, self).__init__()
+ super(MinidumpFinder, self).__init__()
self._os = os_name
self._arch = arch_name
self._minidump_path_crashpad_retrieval = {}
diff --git a/telemetry/telemetry/internal/backends/chrome/minidump_symbolizer.py b/telemetry/telemetry/internal/backends/chrome/minidump_symbolizer.py
new file mode 100644
index 0000000000..c28483cc6b
--- /dev/null
+++ b/telemetry/telemetry/internal/backends/chrome/minidump_symbolizer.py
@@ -0,0 +1,107 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import shutil
+import subprocess
+import sys
+import tempfile
+
+from telemetry.internal.util import binary_manager
+
+
+class MinidumpSymbolizer(object):
+ def __init__(self, os_name, arch_name, dump_finder, build_dir):
+ """Abstract class for handling all minidump symbolizing code.
+
+ Args:
+ os_name: The OS of the host (if running the test on a device), or the OS
+ of the test machine (if running the test locally).
+ arch_name: The arch name of the host (if running the test on a device), or
+ the OS of the test machine (if running the test locally).
+ dump_finder: The minidump_finder.MinidumpFinder instance that is being
+ used to find minidumps for the test.
+ build_dir: The directory containing Chromium build artifacts to generate
+ symbols from.
+ """
+ self._os_name = os_name
+ self._arch_name = arch_name
+ self._dump_finder = dump_finder
+ self._build_dir = build_dir
+
+ def SymbolizeMinidump(self, minidump):
+ """Gets the stack trace from the given minidump.
+
+ Args:
+ minidump: the path to the minidump on disk
+
+ Returns:
+ None if the stack could not be retrieved for some reason, otherwise a
+ string containing the stack trace.
+ """
+ stackwalk = binary_manager.FetchPath(
+ 'minidump_stackwalk', self._arch_name, self._os_name)
+ if not stackwalk:
+ logging.warning('minidump_stackwalk binary not found.')
+ return None
+ # We only want this logic on linux platforms that are still using breakpad.
+ # See crbug.com/667475
+ if not self._dump_finder.MinidumpObtainedFromCrashpad(minidump):
+ with open(minidump, 'rb') as infile:
+ minidump += '.stripped'
+ with open(minidump, 'wb') as outfile:
+ outfile.write(''.join(infile.read().partition('MDMP')[1:]))
+
+ symbols_dir = tempfile.mkdtemp()
+ try:
+ self._GenerateBreakpadSymbols(symbols_dir, minidump)
+ return subprocess.check_output([stackwalk, minidump, symbols_dir],
+ stderr=open(os.devnull, 'w'))
+ finally:
+ shutil.rmtree(symbols_dir)
+
+ def GetSymbolBinaries(self, minidump):
+ """Returns a list of paths to binaries where symbols may be located.
+
+ Args:
+ minidump: The path to the minidump being symbolized.
+ """
+ raise NotImplementedError()
+
+ def GetBreakpadPlatformOverride(self):
+ """Returns the platform to be passed to generate_breakpad_symbols."""
+ return None
+
+ def _GenerateBreakpadSymbols(self, symbols_dir, minidump):
+ """Generates Breakpad symbols for use with stackwalking tools.
+
+ Args:
+ symbols_dir: The directory where symbols will be written to.
+ minidump: The path to the minidump being symbolized.
+ """
+ logging.info('Dumping Breakpad symbols.')
+ generate_breakpad_symbols_command = binary_manager.FetchPath(
+ 'generate_breakpad_symbols', self._arch_name, self._os_name)
+ if not generate_breakpad_symbols_command:
+ logging.warning('generate_breakpad_symbols binary not found')
+ return
+
+ for binary_path in self.GetSymbolBinaries(minidump):
+ cmd = [
+ sys.executable,
+ generate_breakpad_symbols_command,
+ '--binary=%s' % binary_path,
+ '--symbols-dir=%s' % symbols_dir,
+ '--build-dir=%s' % self._build_dir,
+ ]
+ if self.GetBreakpadPlatformOverride():
+ cmd.append('--platform=%s' % self.GetBreakpadPlatformOverride())
+
+ try:
+ subprocess.check_output(cmd)
+ except subprocess.CalledProcessError as e:
+ logging.error(e.output)
+ logging.warning('Failed to execute "%s"', ' '.join(cmd))
+ return
diff --git a/telemetry/telemetry/internal/backends/chrome_inspector/tracing_backend.py b/telemetry/telemetry/internal/backends/chrome_inspector/tracing_backend.py
index fd7156968d..45ce3e5e4a 100644
--- a/telemetry/telemetry/internal/backends/chrome_inspector/tracing_backend.py
+++ b/telemetry/telemetry/internal/backends/chrome_inspector/tracing_backend.py
@@ -5,6 +5,7 @@
import base64
import json
import logging
+import re
import socket
import time
import traceback
@@ -104,26 +105,43 @@ class TracingBackend(object):
_TRACING_DOMAIN = 'Tracing'
- def __init__(self, inspector_socket, config=None):
+ def __init__(self, inspector_socket, startup_tracing_config=None):
self._inspector_websocket = inspector_socket
self._inspector_websocket.RegisterDomain(
self._TRACING_DOMAIN, self._NotificationHandler)
- # If we have a config at this point it means that startup tracing has
- # already started.
- self._is_tracing_running = config is not None
- self._trace_format = None
- if self._is_tracing_running:
- self._trace_format = config.chrome_trace_config.trace_format
- self._start_issued = False
+ self._is_tracing_running = False
self._can_collect_data = False
self._has_received_all_tracing_data = False
self._trace_data_builder = None
self._data_loss_occurred = False
+ if startup_tracing_config is not None:
+ self._TakeOwnershipOfTracingSession(startup_tracing_config)
@property
def is_tracing_running(self):
return self._is_tracing_running
+ def _TakeOwnershipOfTracingSession(self, config):
+ # Startup tracing should already be running, but we still need to send a
+ # Tracing.start command for DevTools to become owner of the tracing session
+ # and to update the transfer settings.
+ # This also ensures that tracing data from early startup is flushed to the
+ # tracing service before the thread-local buffers for startup tracing are
+ # exhausted (crbug.com/914092).
+ response = self._SendTracingStartRequest(
+ trace_format=config.chrome_trace_config.trace_format)
+ # Note: we do in fact expect an "error" response as the call, in addition
+ # to updating the transfer settings for trace collection, also serves to
+ # confirm the fact that startup tracing is in place. In fact, it would be
+ # an error if this request succeeds.
+ error_message = response.get('error', {}).get('message', '')
+ if not re.match(r'Tracing.*already.*started', error_message):
+ raise TracingUnexpectedResponseException(
+ 'Tracing.start failed to confirm startup tracing:\n' +
+ json.dumps(response, indent=2))
+ logging.info('Successfully confirmed startup tracing is in place.')
+ self._is_tracing_running = True
+
def StartTracing(self, chrome_trace_config, timeout=20):
"""When first called, starts tracing, and returns True.
@@ -133,29 +151,51 @@ def StartTracing(self, chrome_trace_config, timeout=20):
return False
assert not self._can_collect_data, 'Data not collected from last trace.'
# Reset collected tracing data from previous tracing calls.
-
self._has_received_all_tracing_data = False
self._data_loss_occurred = False
-
if not self.IsTracingSupported():
raise TracingUnsupportedException(
'Chrome tracing not supported for this app.')
-
- req = _MakeTracingStartRequest(
+ response = self._SendTracingStartRequest(
trace_config=chrome_trace_config.GetChromeTraceConfigForDevTools(),
- trace_format=chrome_trace_config.trace_format)
- logging.info('Start Tracing Request: %r', req)
- response = self._inspector_websocket.SyncRequest(req, timeout)
-
+ trace_format=chrome_trace_config.trace_format, timeout=timeout)
if 'error' in response:
raise TracingUnexpectedResponseException(
- 'Inspector returned unexpected response for '
- 'Tracing.start:\n' + json.dumps(response, indent=2))
-
+ 'Inspector returned unexpected response for Tracing.start:\n' +
+ json.dumps(response, indent=2))
+ logging.info('Successfully started tracing.')
self._is_tracing_running = True
- self._start_issued = True
return True
+ def _SendTracingStartRequest(self, trace_config=None, trace_format=None,
+ timeout=20):
+ """Send a Tracing.start request and wait for a response.
+
+ Args:
+ trace_config: A dictionary speficying to Chrome what should be traced.
+ For example: {'recordMode': 'recordUntilFull', 'includedCategories':
+ ['x', 'y'], ...}. It is required to start tracing via DevTools, and
+ should be omitted if startup tracing was already started.
+ trace_format: An optional string identifying the requested format in which
+ to stream the recorded trace back to the client. Chrome currently
+ defaults to JSON if omitted.
+
+ Returns:
+ A dictionary suitable to pass as a DevTools request.
+ """
+ # Using 'gzip' compression reduces the amount of data transferred over
+ # websocket. This reduces the time waiting for all data to be received,
+ # especially when the test is running on an android device. Using
+ # compression can save upto 10 seconds (or more) for each story.
+ params = {
+ 'transferMode': 'ReturnAsStream',
+ 'streamCompression': 'gzip',
+ 'traceConfig': trace_config or {}}
+ if trace_format is not None:
+ params['streamFormat'] = trace_format
+ request = {'method': 'Tracing.start', 'params': params}
+ return self._inspector_websocket.SyncRequest(request, timeout)
+
def RecordClockSyncMarker(self, sync_id):
assert self.is_tracing_running, 'Tracing must be running to clock sync.'
req = {
@@ -177,12 +217,6 @@ def StopTracing(self):
if not self.is_tracing_running:
raise TracingHasNotRunException()
else:
- if not self._start_issued:
- # Tracing is running but start was not issued so, startup tracing must
- # be in effect. Issue another Tracing.start to update the transfer mode.
- req = _MakeTracingStartRequest(trace_format=self._trace_format)
- self._inspector_websocket.SendAndIgnoreResponse(req)
-
req = {'method': 'Tracing.end'}
response = self._inspector_websocket.SyncRequest(req, timeout=2)
if 'error' in response:
@@ -190,8 +224,8 @@ def StopTracing(self):
'Inspector returned unexpected response for '
'Tracing.end:\n' + json.dumps(response, indent=2))
+ logging.info('Successfully stopped tracing.')
self._is_tracing_running = False
- self._start_issued = False
self._can_collect_data = True
def DumpMemory(self, timeout=None):
@@ -230,18 +264,17 @@ def DumpMemory(self, timeout=None):
raise TracingUnrecoverableException(
'Exception raised while sending a Tracing.requestMemoryDump '
'request:\n' + traceback.format_exc())
-
-
- if ('error' in response or
- 'result' not in response or
- 'success' not in response['result'] or
- 'dumpGuid' not in response['result']):
+ dump_id = None
+ try:
+ if response['result']['success'] and 'error' not in response:
+ dump_id = response['result']['dumpGuid']
+ except KeyError:
+ pass # If any of the keys are missing, there is an error and no dump_id.
+ if not dump_id:
raise TracingUnexpectedResponseException(
'Inspector returned unexpected response for '
'Tracing.requestMemoryDump:\n' + json.dumps(response, indent=2))
-
- result = response['result']
- return result['dumpGuid'] if result['success'] else None
+ return dump_id
def CollectTraceData(self, trace_data_builder, timeout=60):
if not self._can_collect_data:
@@ -306,6 +339,7 @@ def _CollectTracingData(self, trace_data_builder, timeout):
'the timeout amount.' % elapsed_time)
finally:
self._trace_data_builder = None
+ logging.info('Successfully collected all trace data.')
def _NotificationHandler(self, res):
if res.get('method') == 'Tracing.dataCollected':
@@ -343,34 +377,6 @@ def IsTracingSupported(self):
return not res.get('response')
-def _MakeTracingStartRequest(trace_config=None, trace_format=None):
- """Build a Tracing.start request with suitable parameters.
-
- Args:
- trace_config: A dictionary speficying to Chrome what should be traced.
- For example: {'recordMode': 'recordUntilFull', 'includedCategories':
- ['x', 'y'], ...}. It is required to start tracing via DevTools, and
- should be omitted if startup tracing was already started.
- trace_format: An optional string identifying the requested format in which
- to stream the recorded trace back to the client. Chrome currently
- defaults to JSON if omitted.
-
- Returns:
- A dictionary suitable to pass as a DevTools request.
- """
- # Using 'gzip' compression reduces the amount of data transferred over
- # websocket. This reduces the time waiting for all data to be received,
- # especially when the test is running on an android device. Using
- # compression can save upto 10 seconds (or more) for each story.
- params = {
- 'transferMode': 'ReturnAsStream',
- 'streamCompression': 'gzip',
- 'traceConfig': trace_config or {}}
- if trace_format is not None:
- params['streamFormat'] = trace_format
- return {'method': 'Tracing.start', 'params': params}
-
-
def _GetTraceFileSuffix(params):
suffix = '.' + params.get('traceFormat', 'json')
if suffix == '.proto':
diff --git a/telemetry/telemetry/internal/backends/chrome_inspector/tracing_backend_unittest.py b/telemetry/telemetry/internal/backends/chrome_inspector/tracing_backend_unittest.py
index 00ac47859c..5af153c251 100644
--- a/telemetry/telemetry/internal/backends/chrome_inspector/tracing_backend_unittest.py
+++ b/telemetry/telemetry/internal/backends/chrome_inspector/tracing_backend_unittest.py
@@ -106,7 +106,8 @@ def testDumpMemoryFailure(self):
lambda req: {'result': {'success': False, 'dumpGuid': '42abc'}})
backend = tracing_backend.TracingBackend(self._inspector_socket)
- self.assertIsNone(backend.DumpMemory())
+ with self.assertRaises(tracing_backend.TracingUnexpectedResponseException):
+ backend.DumpMemory()
def testStartTracingFailure(self):
self._inspector_socket.AddResponseHandler(
diff --git a/telemetry/telemetry/internal/browser/browser_unittest.py b/telemetry/telemetry/internal/browser/browser_unittest.py
index d62e074147..3b2c2cf8e8 100644
--- a/telemetry/telemetry/internal/browser/browser_unittest.py
+++ b/telemetry/telemetry/internal/browser/browser_unittest.py
@@ -234,6 +234,7 @@ def testCreateWithBadOptionsRaises(self):
with self.browser_to_create.BrowserSession(self.finder_options):
pass # Do nothing.
+ @decorators.Disabled('chromeos') # crbug.com/1014115
def testCreateBrowserTwice(self):
try:
self.browser_to_create.SetUpEnvironment(self.browser_options)
diff --git a/telemetry/telemetry/internal/platform/cros_platform_backend.py b/telemetry/telemetry/internal/platform/cros_platform_backend.py
index 713a265ade..1813a92a44 100644
--- a/telemetry/telemetry/internal/platform/cros_platform_backend.py
+++ b/telemetry/telemetry/internal/platform/cros_platform_backend.py
@@ -132,4 +132,8 @@ def TakeScreenshot(self, file_path):
def GetTypExpectationsTags(self):
tags = super(CrosPlatformBackend, self).GetTypExpectationsTags()
tags.append('desktop')
+ if self.cri.local:
+ tags.append('chromeos-local')
+ else:
+ tags.append('chromeos-remote')
return tags
diff --git a/telemetry/telemetry/internal/results/chart_json_output_formatter.py b/telemetry/telemetry/internal/results/chart_json_output_formatter.py
deleted file mode 100644
index f83d576386..0000000000
--- a/telemetry/telemetry/internal/results/chart_json_output_formatter.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# Copyright 2014 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import collections
-import itertools
-import json
-import os
-
-from telemetry.internal.results import output_formatter
-from telemetry.internal.results import results_processor
-
-
-def _GetChartAndTraceName(value):
- # Telemetry names values using a chart_name.trace_name convention, wheras
- # chartjson uses a (measurement_name, trace_name) convention. This maps from
- # the Telemetry to the chartjson convention.
- if '.' in value.name:
- chart_name, trace_name = value.name.split('.')
- else:
- chart_name, trace_name = value.name, value.name
- if value.page:
- trace_name = value.page.name # Summary values for a single page.
- elif chart_name == trace_name:
- trace_name = 'summary' # Summary values for a metric on all pages.
-
- # Dashboard handles the chart_name of trace values specially: it
- # strips out the field with chart_name 'trace'. Hence in case trace
- # value has grouping_label, we preserve the chart_name.
- # For relevant section code of dashboard code that handles this, see:
- # https://github.com/catapult-project/catapult/blob/25e660b/dashboard/dashboard/add_point.py#L199#L216
- if value.grouping_label:
- chart_name = value.grouping_label + '@@' + chart_name
-
- return chart_name, trace_name
-
-
-def ResultsAsChartDict(results):
- """Produces a dict for serialization to Chart JSON format from raw values.
-
- Chart JSON is a transformation of the basic Telemetry JSON format that
- removes the page map, summarizes the raw values, and organizes the results
- by chart and trace name. This function takes the key pieces of data needed to
- perform this transformation and processes them into a dict which can be
- serialized using the json module.
-
- Design doc for schema: http://goo.gl/kOtf1Y
-
- Args:
- results: an instance of PageTestResults
-
- Returns:
- A Chart JSON dict corresponding to the given data.
- """
- values = itertools.chain(
- output_formatter.SummarizePageSpecificValues(results),
- results.all_summary_values)
- charts = collections.defaultdict(dict)
-
- for value in values:
- chart_name, trace_name = _GetChartAndTraceName(value)
-
- # This intentionally overwrites the trace if it already exists because this
- # is expected of output from the buildbots currently.
- # See: crbug.com/413393
- charts[chart_name][trace_name] = value.AsDict()
- if value.page:
- charts[chart_name][trace_name]['story_tags'] = list(value.page.tags)
-
- for run in results.IterStoryRuns():
- artifact = run.GetArtifact(results_processor.HTML_TRACE_NAME)
- if artifact is not None:
- # This intentionally overwrites the trace if it already exists because
- # this is expected of output from the buildbots currently.
- # See: crbug.com/413393
- charts['trace'][run.story.name] = {
- 'name': 'trace',
- 'type': 'trace',
- 'units': '',
- 'important': False,
- 'page_id': run.story.id,
- 'file_path': os.path.relpath(artifact.local_path, results.output_dir),
- }
- if artifact.url is not None:
- charts['trace'][run.story.name]['cloud_url'] = artifact.url
-
- result_dict = {
- 'format_version': '0.1',
- 'next_version': '0.2',
- # TODO(sullivan): benchmark_name and benchmark_description should be
- # removed when incrementing format_version to 0.1.
- 'benchmark_name': results.benchmark_name,
- 'benchmark_description': results.benchmark_description,
- 'benchmark_metadata': {
- 'type': 'telemetry_benchmark',
- 'name': results.benchmark_name,
- 'description': results.benchmark_description,
- },
- 'charts': charts,
- # Conveys whether the whole benchmark was disabled.
- 'enabled': not results.empty,
- }
-
- return result_dict
-
-
-class ChartJsonOutputFormatter(output_formatter.OutputFormatter):
- def __init__(self, output_stream):
- super(ChartJsonOutputFormatter, self).__init__(output_stream)
-
- def Format(self, results):
- self._Dump(ResultsAsChartDict(results))
-
- def _Dump(self, results):
- json.dump(results, self.output_stream, indent=2, separators=(',', ': '))
- self.output_stream.write('\n')
diff --git a/telemetry/telemetry/internal/results/chart_json_output_formatter_unittest.py b/telemetry/telemetry/internal/results/chart_json_output_formatter_unittest.py
deleted file mode 100644
index d4ef1277a8..0000000000
--- a/telemetry/telemetry/internal/results/chart_json_output_formatter_unittest.py
+++ /dev/null
@@ -1,190 +0,0 @@
-# Copyright 2014 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import json
-import os
-import StringIO
-import unittest
-
-from py_utils import tempfile_ext
-
-from telemetry import story
-from telemetry.internal.results import chart_json_output_formatter
-from telemetry.internal.results import page_test_results
-from telemetry.internal.results import results_processor
-from telemetry import page as page_module
-from telemetry.value import list_of_scalar_values
-
-
-def _MakeStorySet():
- ps = story.StorySet(base_dir=os.path.dirname(__file__))
- ps.AddStory(page_module.Page(
- 'http://www.foo.com/', ps, ps.base_dir, name='http://www.foo.com/'))
- ps.AddStory(page_module.Page(
- 'http://www.bar.com/', ps, ps.base_dir, name='http://www.bar.com/'))
- return ps
-
-
-def _MakePageTestResults(
- description='benchmark_description', output_dir=None):
- return page_test_results.PageTestResults(
- benchmark_name='benchmark_name',
- benchmark_description=description,
- output_dir=output_dir)
-
-
-class ChartJsonTest(unittest.TestCase):
- def setUp(self):
- self._output = StringIO.StringIO()
- self._story_set = _MakeStorySet()
- self._formatter = chart_json_output_formatter.ChartJsonOutputFormatter(
- self._output)
-
- def testOutputAndParse(self):
- with _MakePageTestResults() as results:
- self._output.truncate(0)
-
- results.WillRunPage(self._story_set[0])
- results.AddMeasurement('foo', 'seconds', 3)
- results.DidRunPage(self._story_set[0])
-
- self._formatter.Format(results)
-
- d = json.loads(self._output.getvalue())
- self.assertIn('foo', d['charts'])
-
- def testOutputAndParseNoResults(self):
- with _MakePageTestResults() as results:
- self._formatter.Format(results)
-
- d = json.loads(self._output.getvalue())
- self.assertEquals(d['benchmark_name'], 'benchmark_name')
- self.assertFalse(d['enabled'])
-
- def testAsChartDictSerializable(self):
- with _MakePageTestResults() as results:
- results.WillRunPage(self._story_set[0])
- results.AddMeasurement('foo', 'seconds', 3)
- results.DidRunPage(self._story_set[0])
-
- d = chart_json_output_formatter.ResultsAsChartDict(results)
- json.dumps(d)
-
- def testAsChartDictBaseKeys(self):
- with _MakePageTestResults() as results:
- d = chart_json_output_formatter.ResultsAsChartDict(results)
-
- self.assertEquals(d['format_version'], '0.1')
- self.assertEquals(d['next_version'], '0.2')
- self.assertEquals(d['benchmark_metadata']['name'], 'benchmark_name')
- self.assertEquals(d['benchmark_metadata']['description'],
- 'benchmark_description')
- self.assertEquals(d['benchmark_metadata']['type'], 'telemetry_benchmark')
- self.assertFalse(d['enabled'])
-
- def testAsChartDictNoDescription(self):
- with _MakePageTestResults(description=None) as results:
- d = chart_json_output_formatter.ResultsAsChartDict(results)
-
- self.assertEquals('', d['benchmark_metadata']['description'])
-
- def testAsChartDictPageSpecificValuesSamePageWithGroupingLabel(self):
- self._story_set[0].grouping_keys['temperature'] = 'cold'
- with _MakePageTestResults() as results:
- results.WillRunPage(self._story_set[0])
- results.AddMeasurement('foo', 'seconds', 3)
- results.AddMeasurement('foo', 'seconds', 4)
- results.DidRunPage(self._story_set[0])
- d = chart_json_output_formatter.ResultsAsChartDict(results)
-
- self.assertIn('cold@@foo', d['charts'])
- self.assertIn('http://www.foo.com/', d['charts']['cold@@foo'])
- self.assertTrue(d['enabled'])
-
- def testAsChartDictPageSpecificValuesSamePageWithoutGroupingLabel(self):
- with _MakePageTestResults() as results:
- results.WillRunPage(self._story_set[0])
- results.AddMeasurement('foo', 'seconds', 3)
- results.AddMeasurement('foo', 'seconds', 4)
- results.DidRunPage(self._story_set[0])
- d = chart_json_output_formatter.ResultsAsChartDict(results)
-
- self.assertIn('foo', d['charts'])
- self.assertIn('http://www.foo.com/', d['charts']['foo'])
- self.assertTrue(d['enabled'])
-
- def testAsChartDictPageSpecificValuesAndComputedSummaryWithTraceName(self):
- with _MakePageTestResults() as results:
- results.WillRunPage(self._story_set[0])
- results.AddMeasurement('foo.bar', 'seconds', 3)
- results.DidRunPage(self._story_set[0])
- results.WillRunPage(self._story_set[1])
- results.AddMeasurement('foo.bar', 'seconds', 4)
- results.DidRunPage(self._story_set[1])
- d = chart_json_output_formatter.ResultsAsChartDict(results)
-
- self.assertIn('foo', d['charts'])
- self.assertIn('http://www.foo.com/', d['charts']['foo'])
- self.assertIn('http://www.bar.com/', d['charts']['foo'])
- self.assertIn('bar', d['charts']['foo'])
- self.assertTrue(d['enabled'])
-
- def testAsChartDictPageSpecificValuesAndComputedSummaryWithoutTraceName(self):
- with _MakePageTestResults() as results:
- results.WillRunPage(self._story_set[0])
- results.AddMeasurement('foo', 'seconds', 3)
- results.DidRunPage(self._story_set[0])
- results.WillRunPage(self._story_set[1])
- results.AddMeasurement('foo', 'seconds', 4)
- results.DidRunPage(self._story_set[1])
- d = chart_json_output_formatter.ResultsAsChartDict(results)
-
- self.assertIn('foo', d['charts'])
- self.assertIn('http://www.foo.com/', d['charts']['foo'])
- self.assertIn('http://www.bar.com/', d['charts']['foo'])
- self.assertIn('summary', d['charts']['foo'])
- self.assertTrue(d['enabled'])
-
- def testAsChartDictSummaryValueWithTraceName(self):
- v0 = list_of_scalar_values.ListOfScalarValues(
- None, 'foo.bar', 'seconds', [3, 4])
- with _MakePageTestResults() as results:
- results.AddSummaryValue(v0)
- d = chart_json_output_formatter.ResultsAsChartDict(results)
-
- self.assertIn('bar', d['charts']['foo'])
- self.assertTrue(d['enabled'])
-
- def testAsChartDictSummaryValueWithoutTraceName(self):
- v0 = list_of_scalar_values.ListOfScalarValues(
- None, 'foo', 'seconds', [3, 4])
- with _MakePageTestResults() as results:
- results.AddSummaryValue(v0)
- d = chart_json_output_formatter.ResultsAsChartDict(results)
-
- self.assertIn('summary', d['charts']['foo'])
- self.assertTrue(d['enabled'])
-
- def testAsChartDictWithTracesInArtifacts(self):
- with tempfile_ext.NamedTemporaryDirectory() as tempdir:
- with _MakePageTestResults(output_dir=tempdir) as results:
- results.WillRunPage(self._story_set[0])
- with results.CreateArtifact(results_processor.HTML_TRACE_NAME):
- pass
- results.DidRunPage(self._story_set[0])
-
- d = chart_json_output_formatter.ResultsAsChartDict(results)
-
- self.assertIn('trace', d['charts'])
- self.assertIn('http://www.foo.com/', d['charts']['trace'])
- self.assertTrue(d['enabled'])
-
- def testAsChartDictValueSmokeTest(self):
- v0 = list_of_scalar_values.ListOfScalarValues(
- None, 'foo.bar', 'seconds', [3, 4])
- with _MakePageTestResults() as results:
- results.AddSummaryValue(v0)
- d = chart_json_output_formatter.ResultsAsChartDict(results)
-
- self.assertEquals(d['charts']['foo']['bar']['values'], [3, 4])
diff --git a/telemetry/telemetry/internal/results/csv_output_formatter.py b/telemetry/telemetry/internal/results/csv_output_formatter.py
deleted file mode 100644
index d9dc885e86..0000000000
--- a/telemetry/telemetry/internal/results/csv_output_formatter.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright 2017 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import collections
-import csv
-import json
-import os
-import tempfile
-
-from telemetry.internal.results import output_formatter
-from tracing.value import histograms_to_csv
-
-
-def _ReadCsv(text):
- dicts = []
- header = None
- for row in csv.reader(text.split('\n')):
- if header is None:
- header = row
- elif row:
- dicts.append(collections.OrderedDict(zip(header, row)))
- return dicts
-
-
-def _WriteCsv(dicts, fileobj):
- header = []
- for d in dicts:
- for k in d.iterkeys():
- if k not in header:
- header.append(k)
- rows = [header]
- for d in dicts:
- rows.append([d.get(k, '') for k in header])
- csv.writer(fileobj).writerows(rows)
-
-
-class CsvOutputFormatter(output_formatter.OutputFormatter):
- def __init__(self, output_stream, reset_results=False):
- super(CsvOutputFormatter, self).__init__(output_stream)
- self._reset_results = reset_results
-
- def Format(self, page_test_results):
- histograms = page_test_results.AsHistogramDicts()
- file_descriptor, json_path = tempfile.mkstemp()
- os.close(file_descriptor)
- json.dump(histograms, file(json_path, 'w'))
- vinn_result = histograms_to_csv.HistogramsToCsv(json_path)
- dicts = _ReadCsv(vinn_result.stdout)
-
- self._output_stream.seek(0)
- if not self._reset_results:
- dicts += _ReadCsv(self._output_stream.read())
- self._output_stream.seek(0)
- _WriteCsv(dicts, self._output_stream)
- self._output_stream.truncate()
diff --git a/telemetry/telemetry/internal/results/csv_output_formatter_unittest.py b/telemetry/telemetry/internal/results/csv_output_formatter_unittest.py
deleted file mode 100644
index 4b2f2dbce0..0000000000
--- a/telemetry/telemetry/internal/results/csv_output_formatter_unittest.py
+++ /dev/null
@@ -1,101 +0,0 @@
-# Copyright 2014 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import csv
-import os
-import shutil
-import StringIO
-import tempfile
-import unittest
-
-import mock
-
-from telemetry import story
-from telemetry.internal.results import csv_output_formatter
-from telemetry.internal.results import page_test_results
-from telemetry import page as page_module
-from tracing.trace_data import trace_data
-
-
-def _MakeStorySet():
- story_set = story.StorySet(base_dir=os.path.dirname(__file__))
- story_set.AddStory(page_module.Page(
- 'http://www.foo.com/', story_set, story_set.base_dir,
- name='http://www.foo.com/'))
- story_set.AddStory(page_module.Page(
- 'http://www.bar.com/', story_set, story_set.base_dir,
- name='http://www.bar.com/'))
- return story_set
-
-
-class CsvOutputFormatterTest(unittest.TestCase):
-
- def setUp(self):
- self._output = StringIO.StringIO()
- self._story_set = _MakeStorySet()
- self._temp_dir = tempfile.mkdtemp()
- with mock.patch('time.time', return_value=15e8):
- self._results = page_test_results.PageTestResults(
- benchmark_name='benchmark',
- benchmark_description='foo',
- output_dir=self._temp_dir,
- upload_bucket='fake_bucket')
- self._formatter = csv_output_formatter.CsvOutputFormatter(self._output)
-
- def tearDown(self):
- self._results.Finalize()
- shutil.rmtree(self._temp_dir)
-
- def Format(self):
- self._results.PopulateHistogramSet()
- self._formatter.Format(self._results)
- return self._output.getvalue()
-
- def testSimple(self):
- # Test a simple benchmark with only one value:
- self._results.WillRunPage(self._story_set[0])
- self._results.AddMeasurement('foo', 'seconds', 3)
- self._results.DidRunPage(self._story_set[0])
-
- actual = list(zip(*csv.reader(self.Format().splitlines())))
- expected = [
- ('name', 'foo'), ('unit', 'ms'), ('avg', '3000'), ('count', '1'),
- ('max', '3000'), ('min', '3000'), ('std', '0'), ('sum', '3000'),
- ('architectures', ''), ('benchmarks', 'benchmark'),
- ('benchmarkStart', '2017-07-14 02:40:00'), ('bots', ''), ('builds', ''),
- ('deviceIds', ''), ('displayLabel', 'benchmark 2017-07-14 02:40:00'),
- ('masters', ''), ('memoryAmounts', ''), ('osNames', ''),
- ('osVersions', ''), ('productVersions', ''),
- ('stories', 'http://www.foo.com/'), ('storysetRepeats', ''),
- ('traceStart', ''), ('traceUrls', '')
- ]
- self.assertEqual(actual, expected)
-
- @mock.patch('py_utils.cloud_storage.Insert')
- def testMultiplePagesAndValues(self, cloud_storage_insert_patch):
- cloud_storage_insert_patch.return_value = 'fake_url'
-
- self._results.WillRunPage(self._story_set[0])
- self._results.AddMeasurement('foo', 'seconds', 4)
- self._results.DidRunPage(self._story_set[0])
-
- self._results.WillRunPage(self._story_set[1])
- self._results.AddMeasurement('foo', 'seconds', 3.4)
- self._results.AddTraces(trace_data.CreateTestTrace())
- self._results.AddMeasurement('bar', 'km', 10)
- self._results.AddMeasurement('baz', 'count', 5)
- self._results.DidRunPage(self._story_set[1])
-
- # Parse CSV output into list of lists.
- values = list(csv.reader(self.Format().splitlines()))[1:]
- values.sort()
-
- self.assertEquals(len(values), 4)
- self.assertEquals(len(set((v[1] for v in values))), 2) # 2 pages.
- self.assertEquals(len(set((v[2] for v in values))), 4) # 4 value names.
- sample_row = values[2]
- self.assertEquals(sample_row, [
- 'foo', 'ms', '3400', '1', '3400', '3400', '0', '3400', '', 'benchmark',
- '2017-07-14 02:40:00', '', '', '', 'benchmark 2017-07-14 02:40:00', '',
- '', '', '', '', 'http://www.bar.com/', '', '', 'fake_url'])
diff --git a/telemetry/telemetry/internal/results/histogram_set_json_output_formatter.py b/telemetry/telemetry/internal/results/histogram_set_json_output_formatter.py
deleted file mode 100644
index 7f32683e03..0000000000
--- a/telemetry/telemetry/internal/results/histogram_set_json_output_formatter.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright 2016 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import json
-import logging
-
-from telemetry.internal.results import output_formatter
-
-
-class HistogramSetJsonOutputFormatter(output_formatter.OutputFormatter):
- def __init__(self, output_stream, reset_results=True):
- super(HistogramSetJsonOutputFormatter, self).__init__(output_stream)
- self._reset_results = reset_results
-
- def Format(self, page_test_results):
- histograms = page_test_results.AsHistogramDicts()
- self._output_stream.seek(0)
- if not self._reset_results:
- existing = self._output_stream.read()
- self._output_stream.seek(0)
- if existing:
- try:
- histograms += json.loads(existing)
- except ValueError:
- logging.warn('Found existing histograms json but failed to parse it.')
- json.dump(histograms, self._output_stream)
diff --git a/telemetry/telemetry/internal/results/html_output_formatter.py b/telemetry/telemetry/internal/results/html_output_formatter.py
deleted file mode 100644
index c7669b9c5c..0000000000
--- a/telemetry/telemetry/internal/results/html_output_formatter.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2016 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import datetime
-import logging
-import os
-
-from py_utils import cloud_storage
-
-from telemetry.internal.results import output_formatter
-
-from tracing_build import vulcanize_histograms_viewer
-
-
-class HtmlOutputFormatter(output_formatter.OutputFormatter):
- def __init__(self, output_stream, reset_results=True, upload_bucket=None):
- super(HtmlOutputFormatter, self).__init__(output_stream)
- self._upload_bucket = upload_bucket
- self._reset_results = reset_results
-
- def Format(self, page_test_results):
- histograms = page_test_results.AsHistogramDicts()
-
- vulcanize_histograms_viewer.VulcanizeAndRenderHistogramsViewer(
- histograms, self._output_stream, self._reset_results)
- if self._upload_bucket:
- file_path = os.path.abspath(self._output_stream.name)
- remote_path = ('html-results/results-%s' %
- datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
- try:
- url = cloud_storage.Insert(self._upload_bucket, remote_path, file_path)
- print 'View HTML results online at %s' % url
- except cloud_storage.PermissionError as e:
- logging.error('Cannot upload files to cloud storage due to '
- ' permission error: %s' % e.message)
diff --git a/telemetry/telemetry/internal/results/html_output_formatter_unittest.py b/telemetry/telemetry/internal/results/html_output_formatter_unittest.py
deleted file mode 100644
index b8f59957c6..0000000000
--- a/telemetry/telemetry/internal/results/html_output_formatter_unittest.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright 2018 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import os
-import StringIO
-import unittest
-
-from telemetry import story
-from telemetry.internal.results import html_output_formatter
-from telemetry.internal.results import page_test_results
-from telemetry import page as page_module
-from tracing.value import histogram_set
-from tracing_build import render_histograms_viewer
-
-
-def _MakeStorySet():
- story_set = story.StorySet(base_dir=os.path.dirname(__file__))
- story_set.AddStory(
- page_module.Page('http://www.foo.com/', story_set, story_set.base_dir,
- name='Foo'))
- story_set.AddStory(
- page_module.Page('http://www.bar.com/', story_set, story_set.base_dir,
- name='Bar'))
- story_set.AddStory(
- page_module.Page('http://www.baz.com/', story_set, story_set.base_dir,
- name='Baz',
- grouping_keys={'case': 'test', 'type': 'key'}))
- return story_set
-
-
-class HtmlOutputFormatterTest(unittest.TestCase):
-
- def setUp(self):
- self._output = StringIO.StringIO()
- self._story_set = _MakeStorySet()
-
- def testBasic(self):
- formatter = html_output_formatter.HtmlOutputFormatter(
- self._output, reset_results=False)
- results = page_test_results.PageTestResults()
-
- results.WillRunPage(self._story_set[0])
- results.AddMeasurement('foo', 'seconds', 3)
- results.DidRunPage(self._story_set[0])
- results.PopulateHistogramSet()
-
- formatter.Format(results)
- html = self._output.getvalue()
- dicts = render_histograms_viewer.ReadExistingResults(html)
- histograms = histogram_set.HistogramSet()
- histograms.ImportDicts(dicts)
-
- self.assertEqual(len(histograms), 1)
- self.assertEqual(histograms.GetFirstHistogram().name, 'foo')
diff --git a/telemetry/telemetry/internal/results/json_3_output_formatter.py b/telemetry/telemetry/internal/results/json_3_output_formatter.py
deleted file mode 100644
index d3a1cd6b3d..0000000000
--- a/telemetry/telemetry/internal/results/json_3_output_formatter.py
+++ /dev/null
@@ -1,114 +0,0 @@
-# Copyright 2017 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-"""Output formatter for JSON Test Results Format.
-
-See
-https://chromium.googlesource.com/chromium/src/+/master/docs/testing/json_test_results_format.md
-for details.
-"""
-
-import collections
-import json
-import os
-
-from telemetry.internal.results import output_formatter
-
-
-def _mk_dict(d, *args):
- for key in args:
- if key not in d:
- d[key] = {}
- d = d[key]
- return d
-
-
-def ResultsAsDict(results):
- """Takes PageTestResults to a dict in the JSON test results format.
-
- To serialize results as JSON we first convert them to a dict that can be
- serialized by the json module.
-
- Args:
- results: a PageTestResults object
- """
- result_dict = {
- 'interrupted': results.benchmark_interrupted,
- 'path_delimiter': '/',
- 'version': 3,
- 'seconds_since_epoch': results.benchmark_start_us / 1e6,
- 'tests': {},
- }
- status_counter = collections.Counter()
- for run in results.IterStoryRuns():
- status = run.status
- expected = status if run.is_expected else 'PASS'
- status_counter[status] += 1
-
- test = _mk_dict(
- result_dict, 'tests', results.benchmark_name, run.story.name)
- if 'actual' not in test:
- test['actual'] = status
- else:
- test['actual'] += (' ' + status)
-
- if 'expected' not in test:
- test['expected'] = expected
- else:
- if expected not in test['expected']:
- test['expected'] += (' ' + expected)
-
- if 'is_unexpected' not in test:
- test['is_unexpected'] = status != expected
- else:
- test['is_unexpected'] = test['is_unexpected'] or status != expected
-
- if 'time' not in test:
- test['time'] = run.duration
- test['times'] = [run.duration]
- else:
- test['times'].append(run.duration)
-
- for artifact in run.IterArtifacts():
- if artifact.url is not None:
- artifact_path = artifact.url
- else:
- # Paths in json format should be relative to the output directory and
- # '/'-delimited on all platforms according to the spec.
- relative_path = os.path.relpath(os.path.realpath(artifact.local_path),
- os.path.realpath(results.output_dir))
- artifact_path = relative_path.replace(os.sep, '/')
-
- test.setdefault('artifacts', {}).setdefault(artifact.name, []).append(
- artifact_path)
-
- # Shard index is really only useful for failed tests. See crbug.com/960951
- # for details.
- if run.failed and 'GTEST_SHARD_INDEX' in os.environ:
- test['shard'] = int(os.environ['GTEST_SHARD_INDEX'])
-
- # The following logic can interfere with calculating flakiness percentages.
- # The logic does allow us to re-run tests without them automatically
- # being marked as flaky by the flakiness dashboard and milo.
- # Note that it does not change the total number of passes in
- # num_failures_by_type
- # crbug.com/754825
- for _, stories in result_dict['tests'].iteritems():
- for _, story_results in stories.iteritems():
- deduped_results = set(story_results['actual'].split(' '))
- if deduped_results == {'PASS'}:
- story_results['actual'] = 'PASS'
- elif deduped_results == {'SKIP'}:
- story_results['actual'] = 'SKIP'
-
- result_dict['num_failures_by_type'] = dict(status_counter)
- return result_dict
-
-
-class JsonOutputFormatter(output_formatter.OutputFormatter):
- def Format(self, results):
- """Serialize page test results in JSON Test Results format."""
- json.dump(
- ResultsAsDict(results),
- self.output_stream, indent=2, sort_keys=True, separators=(',', ': '))
- self.output_stream.write('\n')
diff --git a/telemetry/telemetry/internal/results/json_3_output_formatter_unittest.py b/telemetry/telemetry/internal/results/json_3_output_formatter_unittest.py
deleted file mode 100644
index 9edcf1e66b..0000000000
--- a/telemetry/telemetry/internal/results/json_3_output_formatter_unittest.py
+++ /dev/null
@@ -1,233 +0,0 @@
-# Copyright 2014 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-import json
-import os
-import shutil
-import tempfile
-import time
-import unittest
-
-import mock
-
-from telemetry import page as page_module
-from telemetry import story
-from telemetry.internal.results import json_3_output_formatter
-from telemetry.internal.results import page_test_results
-from telemetry.internal.results import results_options
-from telemetry.testing import options_for_unittests
-
-
-def _MakeStorySet():
- story_set = story.StorySet()
- story_set.AddStory(
- page_module.Page('http://www.foo.com/', story_set, name='Foo'))
- story_set.AddStory(
- page_module.Page('http://www.bar.com/', story_set, name='Bar'))
- story_set.AddStory(
- page_module.Page('http://www.baz.com/', story_set, name='Baz',
- grouping_keys={'case': 'test', 'type': 'key'}))
- return story_set
-
-
-def _HasBenchmark(tests_dict, benchmark_name):
- return tests_dict.get(benchmark_name, None) != None
-
-
-def _HasStory(benchmark_dict, story_name):
- return benchmark_dict.get(story_name) != None
-
-
-class Json3OutputFormatterTest(unittest.TestCase):
- def setUp(self):
- self._story_set = _MakeStorySet()
- self._output_dir = tempfile.mkdtemp()
-
- def tearDown(self):
- shutil.rmtree(self._output_dir)
-
- def _MakeResults(self, **kwargs):
- kwargs.setdefault('benchmark_name', 'benchmark_name')
- kwargs.setdefault('output_dir', self._output_dir)
- with mock.patch('time.time', return_value=1501773200):
- return page_test_results.PageTestResults(**kwargs)
-
- def testAsDictBaseKeys(self):
- with self._MakeResults() as results:
- pass
- d = json_3_output_formatter.ResultsAsDict(results)
-
- self.assertEquals(d['interrupted'], False)
- self.assertEquals(d['num_failures_by_type'], {})
- self.assertEquals(d['path_delimiter'], '/')
- self.assertEquals(d['seconds_since_epoch'], 1501773200)
- self.assertEquals(d['tests'], {})
- self.assertEquals(d['version'], 3)
-
- def testAsDictWithOnePage(self):
- with self._MakeResults() as results:
- results.WillRunPage(self._story_set[0])
- results.DidRunPage(self._story_set[0])
-
- d = json_3_output_formatter.ResultsAsDict(results)
-
- self.assertTrue(_HasBenchmark(d['tests'], 'benchmark_name'))
- self.assertTrue(_HasStory(d['tests']['benchmark_name'], 'Foo'))
- story_result = d['tests']['benchmark_name']['Foo']
- self.assertEquals(story_result['actual'], 'PASS')
- self.assertEquals(story_result['expected'], 'PASS')
- self.assertEquals(d['num_failures_by_type'], {'PASS': 1})
-
- def testAsDictWithTwoPages(self):
- with self._MakeResults() as results:
- results.WillRunPage(self._story_set[0])
- results.DidRunPage(self._story_set[0])
-
- results.WillRunPage(self._story_set[1])
- results.DidRunPage(self._story_set[1])
-
- d = json_3_output_formatter.ResultsAsDict(results)
-
- self.assertTrue(_HasBenchmark(d['tests'], 'benchmark_name'))
- self.assertTrue(_HasStory(d['tests']['benchmark_name'], 'Foo'))
- story_result = d['tests']['benchmark_name']['Foo']
- self.assertEquals(story_result['actual'], 'PASS')
- self.assertEquals(story_result['expected'], 'PASS')
-
- self.assertTrue(_HasBenchmark(d['tests'], 'benchmark_name'))
- self.assertTrue(_HasStory(d['tests']['benchmark_name'], 'Bar'))
- story_result = d['tests']['benchmark_name']['Bar']
- self.assertEquals(story_result['actual'], 'PASS')
- self.assertEquals(story_result['expected'], 'PASS')
-
- self.assertEquals(d['num_failures_by_type'], {'PASS': 2})
-
- def testAsDictWithRepeatedTests(self):
- with self._MakeResults() as results:
- results.WillRunPage(self._story_set[0])
- results.DidRunPage(self._story_set[0])
-
- results.WillRunPage(self._story_set[1])
- results.Skip('fake_skip')
- results.DidRunPage(self._story_set[1])
-
- results.WillRunPage(self._story_set[0])
- results.DidRunPage(self._story_set[0])
-
- results.WillRunPage(self._story_set[1])
- results.Skip('fake_skip')
- results.DidRunPage(self._story_set[1])
-
- d = json_3_output_formatter.ResultsAsDict(results)
- foo_story_result = d['tests']['benchmark_name']['Foo']
- self.assertEquals(foo_story_result['actual'], 'PASS')
- self.assertEquals(foo_story_result['expected'], 'PASS')
-
- bar_story_result = d['tests']['benchmark_name']['Bar']
- self.assertEquals(bar_story_result['actual'], 'SKIP')
- self.assertEquals(bar_story_result['expected'], 'SKIP')
-
- self.assertEquals(d['num_failures_by_type'], {'SKIP': 2, 'PASS': 2})
-
- def testArtifactsWithRepeatedRuns(self):
- with self._MakeResults() as results:
- results.WillRunPage(self._story_set[0])
- with results.CreateArtifact('log.txt'):
- pass
- results.DidRunPage(self._story_set[0])
-
- results.WillRunPage(self._story_set[0])
- with results.CreateArtifact('log.txt'):
- pass
- with results.CreateArtifact('trace.json'):
- pass
- results.DidRunPage(self._story_set[0])
-
- d = json_3_output_formatter.ResultsAsDict(results)
- foo_story_artifacts = d['tests']['benchmark_name']['Foo']['artifacts']
- self.assertEquals(len(foo_story_artifacts['log.txt']), 2)
- self.assertEquals(len(foo_story_artifacts['trace.json']), 1)
-
- def testAsDictWithSkippedAndFailedTests_AlsoShardIndex(self):
- shard_index = 42
- with mock.patch.dict(os.environ, {'GTEST_SHARD_INDEX': str(shard_index)}):
- with self._MakeResults() as results:
- results.WillRunPage(self._story_set[0])
- results.DidRunPage(self._story_set[0])
-
- results.WillRunPage(self._story_set[1])
- results.DidRunPage(self._story_set[1])
-
- results.WillRunPage(self._story_set[0])
- results.Skip('fake_skip')
- results.DidRunPage(self._story_set[0])
-
- results.WillRunPage(self._story_set[0])
- results.Skip('unexpected_skip', False)
- results.DidRunPage(self._story_set[0])
-
- results.WillRunPage(self._story_set[1])
- results.Fail('fake_failure')
- results.DidRunPage(self._story_set[1])
-
- d = json_3_output_formatter.ResultsAsDict(results)
-
- foo_story_result = d['tests']['benchmark_name']['Foo']
- self.assertEquals(foo_story_result['actual'], 'PASS SKIP SKIP')
- self.assertEquals(foo_story_result['expected'], 'PASS SKIP')
- self.assertTrue(foo_story_result['is_unexpected'])
-
- bar_story_result = d['tests']['benchmark_name']['Bar']
- self.assertEquals(bar_story_result['actual'], 'PASS FAIL')
- self.assertEquals(bar_story_result['expected'], 'PASS')
- self.assertEquals(bar_story_result['shard'], shard_index)
- self.assertTrue(bar_story_result['is_unexpected'])
-
- self.assertEquals(
- d['num_failures_by_type'], {'PASS': 2, 'FAIL': 1, 'SKIP': 2})
-
- def testIntegrationCreateJsonTestResultsWithNoResults(self):
- options = options_for_unittests.GetRunOptions(output_dir=self._output_dir)
- options.output_formats = ['json-test-results']
- with results_options.CreateResults(options):
- pass
-
- output_file = os.path.join(self._output_dir, 'test-results.json')
- with open(output_file) as f:
- json_test_results = json.load(f)
-
- self.assertEquals(json_test_results['interrupted'], False)
- self.assertEquals(json_test_results['num_failures_by_type'], {})
- self.assertEquals(json_test_results['path_delimiter'], '/')
- self.assertAlmostEqual(json_test_results['seconds_since_epoch'],
- time.time(), 1)
- self.assertEquals(json_test_results['tests'], {})
- self.assertEquals(json_test_results['version'], 3)
-
- @mock.patch('telemetry.internal.results.story_run.time')
- def testIntegrationCreateJsonTestResults(self, time_module):
- time_module.time.side_effect = [1.0, 6.0123]
-
- options = options_for_unittests.GetRunOptions(output_dir=self._output_dir)
- options.output_formats = ['json-test-results']
- with results_options.CreateResults(
- options, benchmark_name='test_benchmark') as results:
- results.WillRunPage(self._story_set[0])
- results.DidRunPage(self._story_set[0])
-
- output_file = os.path.join(self._output_dir, 'test-results.json')
- with open(output_file) as f:
- json_test_results = json.load(f)
-
- self.assertEquals(json_test_results['interrupted'], False)
- self.assertEquals(json_test_results['num_failures_by_type'], {'PASS': 1})
- self.assertEquals(json_test_results['path_delimiter'], '/')
- self.assertAlmostEqual(json_test_results['seconds_since_epoch'],
- time.time(), delta=1)
- testBenchmarkFoo = json_test_results['tests']['test_benchmark']['Foo']
- self.assertEquals(testBenchmarkFoo['actual'], 'PASS')
- self.assertEquals(testBenchmarkFoo['expected'], 'PASS')
- self.assertFalse(testBenchmarkFoo['is_unexpected'])
- self.assertEquals(testBenchmarkFoo['time'], 5.0123)
- self.assertEquals(testBenchmarkFoo['times'][0], 5.0123)
- self.assertEquals(json_test_results['version'], 3)
diff --git a/telemetry/telemetry/internal/results/output_formatter.py b/telemetry/telemetry/internal/results/output_formatter.py
deleted file mode 100644
index fa207a0ac3..0000000000
--- a/telemetry/telemetry/internal/results/output_formatter.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# Copyright 2014 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import os
-
-from telemetry.value import summary as summary_module
-
-class OutputFormatter(object):
- """A formatter for PageTestResults.
-
- An OutputFormatter takes PageTestResults, formats the results
- (telemetry.value.Value instances), and output the formatted results
- in the given output stream.
-
- Examples of output formatter: CsvOutputFormatter produces results in
- CSV format."""
-
- def __init__(self, output_stream):
- """Constructs a new formatter that writes to the output_stream.
-
- Args:
- output_stream: The stream to write the formatted output to.
- """
- self._output_stream = output_stream
-
- def Format(self, page_test_results):
- """Formats the given PageTestResults into the output stream.
-
- This will be called once at the end of a benchmark.
-
- Args:
- page_test_results: A PageTestResults object containing all results
- from the current benchmark run.
- """
- raise NotImplementedError()
-
- def PrintViewResults(self):
- print 'View result at file://' + os.path.abspath(self.output_stream.name)
-
- @property
- def output_stream(self):
- return self._output_stream
-
-
-def SummarizePageSpecificValues(results):
- """Summarize results appropriately for TBM and legacy benchmarks.
-
- For benchmarks that are timeline-based, we need to summarize not once, but
- twice, once by name and grouping_label (default) and again by name only. But
- for benchmarks that are not timeline-based, we only summarize once by name.
- """
- # Default summary uses merge_values.DefaultKeyFunc to summarize both by name
- # and grouping_label.
- summary = summary_module.Summary(results)
- values = summary.interleaved_computed_per_page_values_and_summaries
-
- if any(v.grouping_label for v in results.IterAllLegacyValues()):
- summary_by_name_only = summary_module.Summary(
- results, key_func=lambda v: v.name)
- values.extend(
- summary_by_name_only.interleaved_computed_per_page_values_and_summaries
- )
- return values
diff --git a/telemetry/telemetry/internal/results/page_test_results.py b/telemetry/telemetry/internal/results/page_test_results.py
index 41ed007512..354b677785 100644
--- a/telemetry/telemetry/internal/results/page_test_results.py
+++ b/telemetry/telemetry/internal/results/page_test_results.py
@@ -2,45 +2,32 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-import datetime
import json
import logging
import os
import posixpath
import shutil
-import tempfile
import time
import traceback
-from telemetry import value as value_module
-from telemetry.internal.results import chart_json_output_formatter
from telemetry.internal.results import gtest_progress_reporter
-from telemetry.internal.results import results_processor
from telemetry.internal.results import story_run
-from telemetry.value import list_of_scalar_values
-from telemetry.value import scalar
-from tracing.value import convert_chart_json
-from tracing.value import histogram_set
-from tracing.value.diagnostics import all_diagnostics
from tracing.value.diagnostics import reserved_infos
-from tracing.value.diagnostics import generic_set
-TELEMETRY_RESULTS = '_telemetry_results.jsonl'
-HISTOGRAM_DICTS_NAME = 'histogram_dicts.json'
+TEST_RESULTS = '_test_results.jsonl'
+DIAGNOSTICS_NAME = 'diagnostics.json'
class PageTestResults(object):
- def __init__(self, output_formatters=None, progress_stream=None,
- output_dir=None, intermediate_dir=None,
- benchmark_name=None, benchmark_description=None,
- upload_bucket=None, results_label=None):
- """
+ def __init__(self, progress_stream=None, output_dir=None,
+ intermediate_dir=None, benchmark_name=None,
+ benchmark_description=None, upload_bucket=None,
+ results_label=None):
+ """Object to hold story run results while a benchmark is executed.
+
Args:
- output_formatters: A list of output formatters. The output
- formatters are typically used to format the test results, such
- as CsvOutputFormatter, which output the test results as CSV.
progress_stream: A file-like object where to write progress reports as
stories are being run. Can be None to suppress progress reporting.
output_dir: A string specifying the directory where to store the test
@@ -56,8 +43,6 @@ def __init__(self, output_formatters=None, progress_stream=None,
super(PageTestResults, self).__init__()
self._progress_reporter = gtest_progress_reporter.GTestProgressReporter(
progress_stream)
- self._output_formatters = (
- output_formatters if output_formatters is not None else [])
self._output_dir = output_dir
self._intermediate_dir = intermediate_dir
if intermediate_dir is None and output_dir is not None:
@@ -66,11 +51,10 @@ def __init__(self, output_formatters=None, progress_stream=None,
self._current_story_run = None
self._all_story_runs = []
- self._all_stories = set()
- self._representative_value_for_each_value_name = {}
- self._all_summary_values = []
- self._histograms = histogram_set.HistogramSet()
+ # This is used to validate that measurements accross story runs use units
+ # consistently.
+ self._measurement_units = {}
self._benchmark_name = benchmark_name or '(unknown benchmark)'
self._benchmark_description = benchmark_description or ''
@@ -95,8 +79,7 @@ def __init__(self, output_formatters=None, progress_stream=None,
if not os.path.exists(self._intermediate_dir):
os.makedirs(self._intermediate_dir)
self._results_stream = open(
- os.path.join(self._intermediate_dir, TELEMETRY_RESULTS), 'w')
- self._RecordBenchmarkStart()
+ os.path.join(self._intermediate_dir, TEST_RESULTS), 'w')
@property
def benchmark_name(self):
@@ -119,10 +102,6 @@ def benchmark_interruption(self):
"""Returns a string explaining why the benchmark was interrupted."""
return self._interruption
- @property
- def start_datetime(self):
- return datetime.datetime.utcfromtimestamp(self._start_time)
-
@property
def label(self):
return self._results_label
@@ -139,43 +118,6 @@ def upload_bucket(self):
def finalized(self):
return self._finalized
- def AsHistogramDicts(self):
- return self._histograms.AsDicts()
-
- def PopulateHistogramSet(self):
- if len(self._histograms):
- return
-
- # We ensure that html traces are serialized and uploaded if necessary
- results_processor.SerializeAndUploadHtmlTraces(self)
-
- chart_json = chart_json_output_formatter.ResultsAsChartDict(self)
- chart_json['label'] = self.label
- chart_json['benchmarkStartMs'] = self.benchmark_start_us / 1000.0
-
- file_descriptor, chart_json_path = tempfile.mkstemp()
- os.close(file_descriptor)
- json.dump(chart_json, file(chart_json_path, 'w'))
-
- vinn_result = convert_chart_json.ConvertChartJson(chart_json_path)
-
- os.remove(chart_json_path)
-
- if vinn_result.returncode != 0:
- logging.error('Error converting chart json to Histograms:\n' +
- vinn_result.stdout)
- return []
- self._histograms.ImportDicts(json.loads(vinn_result.stdout))
-
- @property
- def all_summary_values(self):
- return self._all_summary_values
-
- @property
- def current_page(self):
- """DEPRECATED: Use current_story instead."""
- return self.current_story
-
@property
def current_story(self):
assert self._current_story_run, 'Not currently running test.'
@@ -198,7 +140,7 @@ def num_successful(self):
@property
def num_expected(self):
"""Number of stories that succeeded or were expected skips."""
- return sum(1 for run in self._all_story_runs if run.is_expected)
+ return sum(1 for run in self._all_story_runs if run.expected)
@property
def had_failures(self):
@@ -213,7 +155,7 @@ def num_failed(self):
@property
def had_skips(self):
"""If there where any skipped stories."""
- return any(run.skipped for run in self._IterAllStoryRuns())
+ return any(run.skipped for run in self._all_story_runs)
@property
def num_skipped(self):
@@ -230,43 +172,19 @@ def _IterAllStoryRuns(self):
@property
def empty(self):
- """Whether there were any story runs or results."""
- return not self._all_story_runs and not self._all_summary_values
+ """Whether there were any story runs."""
+ return not self._all_story_runs
- def _WriteJsonLine(self, data, close=False):
+ def _WriteJsonLine(self, data):
if self._results_stream is not None:
# Use a compact encoding and sort keys to get deterministic outputs.
self._results_stream.write(
json.dumps(data, sort_keys=True, separators=(',', ':')) + '\n')
- if close:
- self._results_stream.close()
- else:
- self._results_stream.flush()
-
- def _RecordBenchmarkStart(self):
- self._WriteJsonLine({
- 'benchmarkRun': {
- 'startTime': self.start_datetime.isoformat() + 'Z',
- }
- })
-
- def _RecordBenchmarkFinish(self):
- self._WriteJsonLine({
- 'benchmarkRun': {
- 'finalized': self.finalized,
- 'interrupted': self.benchmark_interrupted,
- 'diagnostics': self._diagnostics,
- }
- }, close=True)
+ self._results_stream.flush()
def IterStoryRuns(self):
return iter(self._all_story_runs)
- def IterAllLegacyValues(self):
- for run in self._IterAllStoryRuns():
- for value in run.values:
- yield value
-
def __enter__(self):
return self
@@ -279,6 +197,8 @@ def WillRunPage(self, page, story_run_index=0):
self._current_story_run = story_run.StoryRun(
page, test_prefix=self.benchmark_name, index=story_run_index,
intermediate_dir=self._intermediate_dir)
+ with self.CreateArtifact(DIAGNOSTICS_NAME) as f:
+ json.dump({'diagnostics': self._diagnostics}, f, indent=4)
self._progress_reporter.WillRunStory(self)
def DidRunPage(self, page): # pylint: disable=unused-argument
@@ -290,34 +210,8 @@ def DidRunPage(self, page): # pylint: disable=unused-argument
self._current_story_run.Finish()
self._progress_reporter.DidRunStory(self)
self._all_story_runs.append(self._current_story_run)
- story = self._current_story_run.story
- self._all_stories.add(story)
self._current_story_run = None
- def AddMetricPageResults(self, result):
- """Add results from metric computation.
-
- Args:
- result: A dict produced by results_processor._ComputeMetricsInPool.
- """
- self._current_story_run = result['run']
- try:
- for fail in result['fail']:
- self.Fail(fail)
- if result['histogram_dicts']:
- self._histograms.ImportDicts(result['histogram_dicts'])
- # Saving histograms as an artifact is a temporary hack to keep
- # things working while we gradually move code from Telemetry to
- # Results Processor.
- # TODO(crbug.com/981349): Remove this after metrics running is
- # implemented in Results Processor.
- with self.CreateArtifact(HISTOGRAM_DICTS_NAME) as f:
- json.dump(result['histogram_dicts'], f)
- for value in result['scalars']:
- self.AddValue(value)
- finally:
- self._current_story_run = None
-
def InterruptBenchmark(self, reason):
"""Mark the benchmark as interrupted.
@@ -333,30 +227,7 @@ def InterruptBenchmark(self, reason):
logging.fatal(reason)
self._interruption = self._interruption or reason
- def AddHistogram(self, hist):
- diags = self._GetDiagnostics()
- for diag in diags.itervalues():
- self._histograms.AddSharedDiagnostic(diag)
- self._histograms.AddHistogram(hist, diags)
-
- def _GetDiagnostics(self):
- """Get benchmark and current story details as histogram diagnostics.
-
- Diagnostics of the DateRange type are converted to milliseconds.
- """
- return dict(_WrapDiagnostics([
- (reserved_infos.BENCHMARKS, self.benchmark_name),
- (reserved_infos.BENCHMARK_START, self.benchmark_start_us / 1e3),
- (reserved_infos.BENCHMARK_DESCRIPTIONS, self.benchmark_description),
- (reserved_infos.LABELS, self.label),
- (reserved_infos.HAD_FAILURES, self.current_story_run.failed),
- (reserved_infos.STORIES, self.current_story.name),
- (reserved_infos.STORY_TAGS, self.current_story.GetStoryTagsList()),
- (reserved_infos.STORYSET_REPEATS, self.current_story_run.index),
- (reserved_infos.TRACE_START, self.current_story_run.start_us / 1e3),
- ]))
-
- def AddMeasurement(self, name, unit, samples):
+ def AddMeasurement(self, name, unit, samples, description=None):
"""Record a measurement of the currently running story.
Measurements are numeric values obtained directly by a benchmark while
@@ -365,11 +236,6 @@ def AddMeasurement(self, name, unit, samples):
measurements obtained by running metrics on collected traces (if any)
after the benchmark run has finished.
- TODO(crbug.com/999484): Currently measurements are stored as legacy
- Telemetry values. This will allow clients to switch to this new API while
- preserving the existing behavior. When no more clients create legacy
- values on their own, the implementation details below can be changed.
-
Args:
name: A string with the name of the measurement (e.g. 'score', 'runtime',
etc).
@@ -377,16 +243,18 @@ def AddMeasurement(self, name, unit, samples):
'count', etc).
samples: Either a single numeric value or a list of numeric values to
record as part of this measurement.
+ description: An optional string with a short human readable description
+ of the measurement.
"""
assert self._current_story_run, 'Not currently running a story.'
- value = _MeasurementToValue(self.current_story, name, unit, samples)
- self.AddValue(value)
-
- def AddValue(self, value):
- """DEPRECATED: Use AddMeasurement instead."""
- assert self._current_story_run, 'Not currently running a story.'
- self._ValidateValue(value)
- self._current_story_run.AddLegacyValue(value)
+ old_unit = self._measurement_units.get(name)
+ if old_unit is not None:
+ if unit != old_unit:
+ raise ValueError('Unit for measurement %r changed from %s to %s.' % (
+ name, old_unit, unit))
+ else:
+ self._measurement_units[name] = unit
+ self.current_story_run.AddMeasurement(name, unit, samples, description)
def AddSharedDiagnostics(self,
owners=None,
@@ -396,7 +264,7 @@ def AddSharedDiagnostics(self,
device_id=None,
os_name=None,
os_version=None):
- """Add diagnostics to all histograms and save it to intermediate results."""
+ """Save diagnostics to intermediate results."""
diag_values = [
(reserved_infos.OWNERS, owners),
(reserved_infos.BUG_COMPONENTS, bug_components),
@@ -406,12 +274,14 @@ def AddSharedDiagnostics(self,
(reserved_infos.OS_NAMES, os_name),
(reserved_infos.OS_VERSIONS, os_version),
]
-
- for name, value in _WrapDiagnostics(diag_values):
- self._histograms.AddSharedDiagnosticToAllHistograms(name, value)
+ for info, value in diag_values:
+ if value is None or value == []:
+ continue
# Results Processor supports only GenericSet diagnostics for now.
- assert isinstance(value, generic_set.GenericSet)
- self._diagnostics[name] = list(value)
+ assert info.type == 'GenericSet'
+ if not isinstance(value, list):
+ value = [value]
+ self._diagnostics[info.name] = value
def Fail(self, failure):
"""Mark the current story run as failed.
@@ -432,9 +302,9 @@ def Fail(self, failure):
logging.error(failure_str)
self._current_story_run.SetFailed(failure_str)
- def Skip(self, reason, is_expected=True):
+ def Skip(self, reason, expected=True):
assert self._current_story_run, 'Not currently running test.'
- self._current_story_run.Skip(reason, is_expected)
+ self._current_story_run.Skip(reason, expected)
def CreateArtifact(self, name):
assert self._current_story_run, 'Not currently running test.'
@@ -461,20 +331,6 @@ def AddTraces(self, traces, tbm_metrics=None):
if tbm_metrics:
self._current_story_run.SetTbmMetrics(tbm_metrics)
- def AddSummaryValue(self, value):
- assert not self.finalized, 'Results are finalized, cannot add values.'
- assert value.page is None
- self._ValidateValue(value)
- self._all_summary_values.append(value)
-
- def _ValidateValue(self, value):
- assert isinstance(value, value_module.Value)
- if value.name not in self._representative_value_for_each_value_name:
- self._representative_value_for_each_value_name[value.name] = value
- representative_value = self._representative_value_for_each_value_name[
- value.name]
- assert value.IsMergableWith(representative_value)
-
def Finalize(self, exc_value=None):
"""Finalize this object to prevent more results from being recorded.
@@ -496,11 +352,6 @@ def Finalize(self, exc_value=None):
self._finalized = True
self._progress_reporter.DidFinishAllStories(self)
- # Make sure that html traces are recorded as artifacts.
- # TODO(crbug.com/981349): Remove this after trace serialization is
- # implemented in Results Processor.
- results_processor.SerializeAndUploadHtmlTraces(self)
-
# TODO(crbug.com/981349): Ideally we want to write results for each story
# run individually at DidRunPage when the story finished executing. For
# now, however, we need to wait until this point after html traces have
@@ -510,46 +361,10 @@ def Finalize(self, exc_value=None):
# and write results instead at the end of each story run.
for run in self._all_story_runs:
self._WriteJsonLine(run.AsDict())
- self._RecordBenchmarkFinish()
-
- for output_formatter in self._output_formatters:
- output_formatter.Format(self)
- output_formatter.PrintViewResults()
- output_formatter.output_stream.close()
-
- def FindAllPageSpecificValuesNamed(self, value_name):
- """DEPRECATED: New benchmarks should not use legacy values."""
- return [v for v in self.IterAllLegacyValues() if v.name == value_name]
+ if self._results_stream is not None:
+ self._results_stream.close()
def IterRunsWithTraces(self):
for run in self._IterAllStoryRuns():
if run.HasArtifactsInDir('trace/'):
yield run
-
-
-def _MeasurementToValue(story, name, unit, samples):
- if isinstance(samples, list):
- return list_of_scalar_values.ListOfScalarValues(
- story, name=name, units=unit, values=samples)
- else:
- return scalar.ScalarValue(story, name=name, units=unit, value=samples)
-
-
-def _WrapDiagnostics(info_value_pairs):
- """Wrap diagnostic values in corresponding Diagnostics classes.
-
- Args:
- info_value_pairs: any iterable of pairs (info, value), where info is one
- of reserved infos defined in tracing.value.diagnostics.reserved_infos,
- and value can be any json-serializable object.
-
- Returns:
- An iterator over pairs (diagnostic name, diagnostic value).
- """
- for info, value in info_value_pairs:
- if value is None or value == []:
- continue
- if info.type == 'GenericSet' and not isinstance(value, list):
- value = [value]
- diag_class = all_diagnostics.GetDiagnosticClassForName(info.type)
- yield info.name, diag_class(value)
diff --git a/telemetry/telemetry/internal/results/page_test_results_unittest.py b/telemetry/telemetry/internal/results/page_test_results_unittest.py
index 06dbe07729..75d6f9c2aa 100644
--- a/telemetry/telemetry/internal/results/page_test_results_unittest.py
+++ b/telemetry/telemetry/internal/results/page_test_results_unittest.py
@@ -2,7 +2,6 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-import codecs
import json
import os
import shutil
@@ -14,21 +13,10 @@
from telemetry import story
from telemetry.core import exceptions
-from telemetry.internal.results import chart_json_output_formatter
-from telemetry.internal.results import histogram_set_json_output_formatter
-from telemetry.internal.results import html_output_formatter
from telemetry.internal.results import page_test_results
-from telemetry.internal.results import results_processor
+from telemetry.internal.results import results_options
from telemetry import page as page_module
-from telemetry.value import improvement_direction
-from telemetry.value import list_of_scalar_values
-from telemetry.value import scalar
from tracing.trace_data import trace_data
-from tracing.value import histogram as histogram_module
-from tracing.value import histogram_set
-from tracing.value.diagnostics import diagnostic
-from tracing.value.diagnostics import generic_set
-from tracing.value.diagnostics import reserved_infos
def _CreateException():
@@ -76,7 +64,7 @@ def CreateResults(self, **kwargs):
def GetResultRecords(self):
results_file = os.path.join(
- self.intermediate_dir, page_test_results.TELEMETRY_RESULTS)
+ self.intermediate_dir, page_test_results.TEST_RESULTS)
with open(results_file) as f:
return [json.loads(line) for line in f]
@@ -152,62 +140,36 @@ def testPassesNoSkips(self):
self.assertTrue(all_story_runs[1].ok)
self.assertTrue(all_story_runs[2].skipped)
- def testAddScalarValueSameAsMeasurement(self):
- # Test that AddValue for legacy scalar values still works, and is
- # equivalent to adding measurements.
+ def testAddMeasurementAsScalar(self):
with self.CreateResults() as results:
results.WillRunPage(self.pages[0])
- results.AddValue(scalar.ScalarValue(self.pages[0], 'a', 'seconds', 3))
results.AddMeasurement('a', 'seconds', 3)
results.DidRunPage(self.pages[0])
- values = list(results.IterAllLegacyValues())
- self.assertEqual(len(values), 2)
- self.assertEqual(values[0], values[1])
+ test_results = results_options.ReadTestResults(self.intermediate_dir)
+ self.assertTrue(len(test_results), 1)
+ measurements = results_options.ReadMeasurements(test_results[0])
+ self.assertEqual(measurements, {'a': {'unit': 'seconds', 'samples': [3]}})
- def testAddListOfScalarValuesSameAsMeasurement(self):
- # Test that AddValue for legacy lists of scalar values still works, and is
- # equivalent to adding measurements.
+ def testAddMeasurementAsList(self):
with self.CreateResults() as results:
results.WillRunPage(self.pages[0])
- results.AddValue(list_of_scalar_values.ListOfScalarValues(
- self.pages[0], 'a', 'seconds', [1, 2, 3]))
results.AddMeasurement('a', 'seconds', [1, 2, 3])
results.DidRunPage(self.pages[0])
- values = list(results.IterAllLegacyValues())
- self.assertEqual(len(values), 2)
- self.assertEqual(values[0], values[1])
-
- def testAddMeasurementWithStoryGroupingKeys(self):
- with self.CreateResults() as results:
- self.pages[0].grouping_keys['foo'] = 'bar'
- self.pages[0].grouping_keys['answer'] = '42'
- results.WillRunPage(self.pages[0])
- results.AddMeasurement('a', 'seconds', 3)
- results.DidRunPage(self.pages[0])
-
- values = list(results.IterAllLegacyValues())
- self.assertEqual(1, len(values))
- self.assertEqual(values[0].grouping_label, '42_bar')
+ test_results = results_options.ReadTestResults(self.intermediate_dir)
+ self.assertTrue(len(test_results), 1)
+ measurements = results_options.ReadMeasurements(test_results[0])
+ self.assertEqual(measurements,
+ {'a': {'unit': 'seconds', 'samples': [1, 2, 3]}})
def testNonNumericMeasurementIsInvalid(self):
with self.CreateResults() as results:
results.WillRunPage(self.pages[0])
- with self.assertRaises(AssertionError):
+ with self.assertRaises(TypeError):
results.AddMeasurement('url', 'string', 'foo')
results.DidRunPage(self.pages[0])
- def testAddSummaryValueWithPageSpecified(self):
- with self.CreateResults() as results:
- results.WillRunPage(self.pages[0])
- with self.assertRaises(AssertionError):
- # Invalid because should have no page.
- results.AddSummaryValue(scalar.ScalarValue(
- self.pages[0], 'a', 'units', 3,
- improvement_direction=improvement_direction.UP))
- results.DidRunPage(self.pages[0])
-
def testMeasurementUnitChangeRaises(self):
with self.CreateResults() as results:
results.WillRunPage(self.pages[0])
@@ -215,7 +177,7 @@ def testMeasurementUnitChangeRaises(self):
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
- with self.assertRaises(AssertionError):
+ with self.assertRaises(ValueError):
results.AddMeasurement('a', 'foobgrobbers', 3)
results.DidRunPage(self.pages[1])
@@ -231,28 +193,6 @@ def testNoSuccessesWhenAllPagesFailOrSkip(self):
self.assertFalse(results.had_successes)
- def testIterAllLegacyValuesForSuccessfulPages(self):
- with self.CreateResults() as results:
- results.WillRunPage(self.pages[0])
- results.AddMeasurement('a', 'seconds', 3)
- results.DidRunPage(self.pages[0])
-
- results.WillRunPage(self.pages[1])
- results.AddMeasurement('a', 'seconds', 3)
- results.DidRunPage(self.pages[1])
-
- results.WillRunPage(self.pages[2])
- results.AddMeasurement('a', 'seconds', 3)
- results.DidRunPage(self.pages[2])
-
- expected = [
- (v.page, v.name, v.units, v.value)
- for v in results.IterAllLegacyValues()]
- self.assertEqual([
- (self.pages[0], 'a', 'seconds', 3),
- (self.pages[1], 'a', 'seconds', 3),
- (self.pages[2], 'a', 'seconds', 3)], expected)
-
def testAddTraces(self):
with self.CreateResults() as results:
results.WillRunPage(self.pages[0])
@@ -276,113 +216,9 @@ def testAddTracesForSamePage(self):
runs = list(results.IterRunsWithTraces())
self.assertEqual(1, len(runs))
- def testOutputEmptyResults_ChartJSON(self):
- output_file = os.path.join(self._output_dir, 'chart.json')
- with open(output_file, 'w') as stream:
- formatter = chart_json_output_formatter.ChartJsonOutputFormatter(stream)
- with self.CreateResults(
- output_formatters=[formatter],
- benchmark_name='fake_benchmark_name'):
- pass
-
- with open(output_file) as f:
- chartjson_output = json.load(f)
-
- self.assertFalse(chartjson_output['enabled'])
- self.assertEqual(chartjson_output['benchmark_name'], 'fake_benchmark_name')
-
- def testOutputEmptyResults_HTML(self):
- output_file = os.path.join(self._output_dir, 'results.html')
- with codecs.open(output_file, 'w', encoding='utf-8') as stream:
- formatter = html_output_formatter.HtmlOutputFormatter(stream)
- with self.CreateResults(output_formatters=[formatter]):
- pass
-
- self.assertGreater(os.stat(output_file).st_size, 0)
-
- def testOutputEmptyResults_Histograms(self):
- output_file = os.path.join(self._output_dir, 'histograms.json')
- with open(output_file, 'w') as stream:
- formatter = histogram_set_json_output_formatter.\
- HistogramSetJsonOutputFormatter(stream)
- with self.CreateResults(output_formatters=[formatter]):
- pass
-
- with open(output_file) as f:
- self.assertEqual(f.read(), '[]')
-
- def testAddMetricPageResults(self):
- hs = histogram_set.HistogramSet()
- hs.AddHistogram(histogram_module.Histogram('foo', 'count'))
- hs.AddSharedDiagnosticToAllHistograms(
- 'bar', generic_set.GenericSet(['baz']))
- histogram_dicts = hs.AsDicts()
-
- with self.CreateResults() as results:
- results.WillRunPage(self.pages[0])
- run = results.current_story_run
- results.DidRunPage(self.pages[0])
-
- # Pretend we got some results by running metrics.
- results.AddMetricPageResults({
- 'run': run,
- 'fail': [],
- 'histogram_dicts': histogram_dicts,
- 'scalars': []
- })
-
- self.assertEqual(results.AsHistogramDicts(), histogram_dicts)
-
- def testAddSharedDiagnostics(self):
- with self.CreateResults(benchmark_name='benchmark_name') as results:
- results.WillRunPage(self.pages[0])
- results.DidRunPage(self.pages[0])
- results.AddSharedDiagnostics(os_name='linux')
- results.PopulateHistogramSet()
-
- histogram_dicts = results.AsHistogramDicts()
- self.assertEqual(1, len(histogram_dicts))
-
- diag = diagnostic.Diagnostic.FromDict(histogram_dicts[0])
- self.assertIsInstance(diag, generic_set.GenericSet)
-
- def testPopulateHistogramSet_UsesScalarValueData(self):
- with self.CreateResults() as results:
- results.WillRunPage(self.pages[0])
- results.AddMeasurement('a', 'seconds', 3)
- results.DidRunPage(self.pages[0])
- results.PopulateHistogramSet()
-
- hs = histogram_set.HistogramSet()
- hs.ImportDicts(results.AsHistogramDicts())
- self.assertEqual(1, len(hs))
- self.assertEqual('a', hs.GetFirstHistogram().name)
-
- def testPopulateHistogramSet_UsesHistogramSetData(self):
- with self.CreateResults(benchmark_name='benchmark_name') as results:
- results.WillRunPage(self.pages[0])
- results.AddHistogram(histogram_module.Histogram('foo', 'count'))
- results.DidRunPage(self.pages[0])
- results.PopulateHistogramSet()
-
- histogram_dicts = results.AsHistogramDicts()
- self.assertEqual(8, len(histogram_dicts))
-
- hs = histogram_set.HistogramSet()
- hs.ImportDicts(histogram_dicts)
-
- hist = hs.GetHistogramNamed('foo')
- self.assertItemsEqual(hist.diagnostics[reserved_infos.BENCHMARKS.name],
- ['benchmark_name'])
-
- def testBeginFinishBenchmarkRecords(self):
- self.mock_time.side_effect = [1234567890.987]
+ def testDiagnosticsAsArtifact(self):
with self.CreateResults(benchmark_name='some benchmark',
benchmark_description='a description') as results:
- results.WillRunPage(self.pages[0])
- results.DidRunPage(self.pages[0])
- results.WillRunPage(self.pages[1])
- results.DidRunPage(self.pages[1])
results.AddSharedDiagnostics(
owners=['test'],
bug_components=['1', '2'],
@@ -392,105 +228,32 @@ def testBeginFinishBenchmarkRecords(self):
os_name='os',
os_version='ver',
)
-
- records = self.GetResultRecords()
- self.assertEqual(len(records), 4) # Start, Result, Result, Finish.
- self.assertEqual(records[0], {
- 'benchmarkRun': {
- 'startTime': '2009-02-13T23:31:30.987000Z',
- }
- })
- self.assertEqual(records[1]['testResult']['status'], 'PASS')
- self.assertEqual(records[2]['testResult']['status'], 'PASS')
- self.assertEqual(records[3], {
- 'benchmarkRun': {
- 'finalized': True,
- 'interrupted': False,
- 'diagnostics': {
- 'benchmarks': ['some benchmark'],
- 'benchmarkDescriptions': ['a description'],
- 'owners': ['test'],
- 'bugComponents': ['1', '2'],
- 'documentationLinks': [['documentation', 'url']],
- 'architectures': ['arch'],
- 'deviceIds': ['id'],
- 'osNames': ['os'],
- 'osVersions': ['ver'],
- },
- },
- })
-
- def testBeginFinishBenchmarkRecords_interrupted(self):
- self.mock_time.side_effect = [1234567890.987]
- with self.CreateResults(benchmark_name='some benchmark',
- benchmark_description='a description') as results:
- results.WillRunPage(self.pages[0])
- results.Fail('fatal error')
- results.DidRunPage(self.pages[0])
- results.InterruptBenchmark('some reason')
-
- records = self.GetResultRecords()
- self.assertEqual(len(records), 3) # Start, Result, Finish.
- self.assertEqual(records[0], {
- 'benchmarkRun': {
- 'startTime': '2009-02-13T23:31:30.987000Z',
- }
- })
- self.assertEqual(records[1]['testResult']['status'], 'FAIL')
- self.assertEqual(records[2], {
- 'benchmarkRun': {
- 'finalized': True,
- 'interrupted': True,
- 'diagnostics': {
- 'benchmarks': ['some benchmark'],
- 'benchmarkDescriptions': ['a description'],
- },
- }
- })
-
- @mock.patch('py_utils.cloud_storage.Insert')
- def testUploadArtifactsToCloud(self, cs_insert_mock):
- cs_path_name = 'https://cs_foo'
- cs_insert_mock.return_value = cs_path_name
- with self.CreateResults(upload_bucket='abc') as results:
- results.WillRunPage(self.pages[0])
- with results.CreateArtifact('screenshot.png') as screenshot1:
- pass
- results.DidRunPage(self.pages[0])
-
- results.WillRunPage(self.pages[1])
- with results.CreateArtifact('log.txt') as log2:
- pass
- results.DidRunPage(self.pages[1])
-
- results_processor.UploadArtifactsToCloud(results)
-
- cs_insert_mock.assert_has_calls(
- [mock.call('abc', mock.ANY, screenshot1.name),
- mock.call('abc', mock.ANY, log2.name)],
- any_order=True)
-
- # Assert that the path is now the cloud storage path
- for run in results.IterStoryRuns():
- for artifact in run.IterArtifacts():
- self.assertEqual(cs_path_name, artifact.url)
-
- @mock.patch('py_utils.cloud_storage.Insert')
- def testUploadArtifactsToCloud_withNoOpArtifact(self, _):
- with self.CreateResults(
- upload_bucket='abc', output_dir=None, intermediate_dir=None) as results:
results.WillRunPage(self.pages[0])
- with results.CreateArtifact('screenshot.png'):
- pass
results.DidRunPage(self.pages[0])
-
results.WillRunPage(self.pages[1])
- with results.CreateArtifact('log.txt'):
- pass
results.DidRunPage(self.pages[1])
- # Just make sure that this does not crash
- results_processor.UploadArtifactsToCloud(results)
+ records = self.GetResultRecords()
+ self.assertEqual(len(records), 2)
+ for record in records:
+ self.assertEqual(record['testResult']['status'], 'PASS')
+ artifacts = record['testResult']['outputArtifacts']
+ self.assertIn(page_test_results.DIAGNOSTICS_NAME, artifacts)
+ with open(artifacts[page_test_results.DIAGNOSTICS_NAME]['filePath']) as f:
+ diagnostics = json.load(f)
+ self.assertEqual(diagnostics, {
+ 'diagnostics': {
+ 'benchmarks': ['some benchmark'],
+ 'benchmarkDescriptions': ['a description'],
+ 'owners': ['test'],
+ 'bugComponents': ['1', '2'],
+ 'documentationLinks': [['documentation', 'url']],
+ 'architectures': ['arch'],
+ 'deviceIds': ['id'],
+ 'osNames': ['os'],
+ 'osVersions': ['ver'],
+ },
+ })
def testCreateArtifactsForDifferentPages(self):
with self.CreateResults() as results:
diff --git a/telemetry/telemetry/internal/results/results_options.py b/telemetry/telemetry/internal/results/results_options.py
index 45bf6ae3b6..8264aae332 100644
--- a/telemetry/telemetry/internal/results/results_options.py
+++ b/telemetry/telemetry/internal/results/results_options.py
@@ -2,51 +2,17 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-import codecs
+import json
import os
import sys
-from telemetry.internal.results import chart_json_output_formatter
-from telemetry.internal.results import csv_output_formatter
-from telemetry.internal.results import histogram_set_json_output_formatter
-from telemetry.internal.results import html_output_formatter
-from telemetry.internal.results import json_3_output_formatter
from telemetry.internal.results import page_test_results
# List of formats supported by our legacy output formatters.
# TODO(crbug.com/981349): Should be eventually replaced entirely by the results
# processor in tools/perf.
-LEGACY_OUTPUT_FORMATS = (
- 'chartjson',
- 'csv',
- 'histograms',
- 'html',
- 'json-test-results',
- 'none')
-
-
-# Filenames to use for given output formats.
-_OUTPUT_FILENAME_LOOKUP = {
- 'chartjson': 'results-chart.json',
- 'csv': 'results.csv',
- 'histograms': 'histograms.json',
- 'html': 'results.html',
- 'json-test-results': 'test-results.json',
-}
-
-
-def _GetOutputStream(output_format, output_dir):
- assert output_format in _OUTPUT_FILENAME_LOOKUP, (
- "Cannot set stream for '%s' output format." % output_format)
- output_file = os.path.join(output_dir, _OUTPUT_FILENAME_LOOKUP[output_format])
-
- # TODO(eakuefner): Factor this hack out after we rewrite HTMLOutputFormatter.
- if output_format in ['html', 'csv']:
- open(output_file, 'a').close() # Create file if it doesn't exist.
- return codecs.open(output_file, mode='r+', encoding='utf-8')
- else:
- return open(output_file, mode='w+')
+LEGACY_OUTPUT_FORMATS = ('none')
def CreateResults(options, benchmark_name=None, benchmark_description=None,
@@ -69,39 +35,7 @@ def CreateResults(options, benchmark_name=None, benchmark_description=None,
if not os.path.exists(options.output_dir):
os.makedirs(options.output_dir)
- if options.external_results_processor:
- output_formats = options.legacy_output_formats
- else:
- output_formats = options.output_formats
-
- output_formatters = []
- for output_format in output_formats:
- if output_format == 'none':
- continue
- output_stream = _GetOutputStream(output_format, options.output_dir)
- if output_format == 'html':
- output_formatters.append(html_output_formatter.HtmlOutputFormatter(
- output_stream, options.reset_results, options.upload_bucket))
- elif output_format == 'json-test-results':
- output_formatters.append(json_3_output_formatter.JsonOutputFormatter(
- output_stream))
- elif output_format == 'chartjson':
- output_formatters.append(
- chart_json_output_formatter.ChartJsonOutputFormatter(output_stream))
- elif output_format == 'csv':
- output_formatters.append(
- csv_output_formatter.CsvOutputFormatter(
- output_stream, options.reset_results))
- elif output_format == 'histograms':
- output_formatters.append(
- histogram_set_json_output_formatter.HistogramSetJsonOutputFormatter(
- output_stream, options.reset_results))
- else:
- # Should never be reached. The parser enforces the choices.
- raise NotImplementedError(output_format)
-
return page_test_results.PageTestResults(
- output_formatters=output_formatters,
progress_stream=sys.stdout if report_progress else None,
output_dir=options.output_dir,
intermediate_dir=options.intermediate_dir,
@@ -109,3 +43,23 @@ def CreateResults(options, benchmark_name=None, benchmark_description=None,
benchmark_description=benchmark_description,
upload_bucket=options.upload_bucket,
results_label=options.results_label)
+
+
+def ReadTestResults(intermediate_dir):
+ """Read results from an intermediate_dir into a single list."""
+ results = []
+ with open(os.path.join(
+ intermediate_dir, page_test_results.TEST_RESULTS)) as f:
+ for line in f:
+ results.append(json.loads(line)['testResult'])
+ return results
+
+
+def ReadMeasurements(test_result):
+ """Read ad hoc measurements recorded on a test result."""
+ try:
+ artifact = test_result['outputArtifacts']['measurements.json']
+ except KeyError:
+ return {}
+ with open(artifact['filePath']) as f:
+ return json.load(f)['measurements']
diff --git a/telemetry/telemetry/internal/results/results_processor.py b/telemetry/telemetry/internal/results/results_processor.py
index 58d6d341a9..cfd1339086 100644
--- a/telemetry/telemetry/internal/results/results_processor.py
+++ b/telemetry/telemetry/internal/results/results_processor.py
@@ -3,59 +3,26 @@
# found in the LICENSE file.
import logging
-import os
-import random
-import sys
-import time
-import uuid
import multiprocessing
from multiprocessing.dummy import Pool as ThreadPool
-from py_utils import cloud_storage # pylint: disable=import-error
-
-from telemetry.value import common_value_helpers
-from tracing.metrics import metric_runner
from tracing.trace_data import trace_data
HTML_TRACE_NAME = 'trace.html'
-_TEN_MINUTES = 60*10
-
-
-#TODO(crbug.com/772216): Remove this once the uploading is done by Chromium
-# test recipe.
-def UploadArtifactsToCloud(results):
- """Upload all artifacts of the test to cloud storage.
-
- Sets 'url' attribute of each artifact to its cloud URL.
- """
- for run in results.IterStoryRuns():
- for artifact in run.IterArtifacts():
- if artifact.url is None:
- remote_name = str(uuid.uuid1())
- cloud_url = cloud_storage.Insert(
- results.upload_bucket, remote_name, artifact.local_path)
- logging.info('Uploading %s of page %s to %s\n' % (
- artifact.name, run.story.name, cloud_url))
- artifact.SetUrl(cloud_url)
-
-def SerializeAndUploadHtmlTraces(results):
- """Creates and uploads html trace files for each story run, if necessary.
+def SerializeHtmlTraces(results):
+ """Creates html trace files for each story run, if necessary.
For each story run, takes all trace files from individual trace agents
- and runs trace2html on them. Then uploads the resulting html to cloud.
- This is done only once, subsequent calls to this function will not
- do anything.
- """
- for run in results.IterRunsWithTraces():
- _SerializeAndUploadHtmlTrace(run, results.label, results.upload_bucket)
-
+ and runs trace2html on them. This is done only once, subsequent calls to this
+ function will not do anything.
-def ComputeTimelineBasedMetrics(results):
- """Compute TBMv2 metrics on all story runs in parallel."""
- assert not results.current_story_run, 'Cannot compute metrics while running.'
+ TODO(crbug.com/981349): Remove this function entirely when trace
+ serialization has been handed over to results processor.
+ """
+ assert not results.current_story_run, 'Cannot serialize traces while running.'
def _GetCpuCount():
try:
return multiprocessing.cpu_count()
@@ -64,8 +31,7 @@ def _GetCpuCount():
logging.warn('cpu_count() not implemented.')
return 8
- available_runs = list(run for run in results.IterRunsWithTraces()
- if run.tbm_metrics)
+ available_runs = list(run for run in results.IterRunsWithTraces())
if not available_runs:
return
@@ -73,98 +39,24 @@ def _GetCpuCount():
# crbug.com/953365.
threads_count = min(_GetCpuCount()/2 or 1, len(available_runs))
pool = ThreadPool(threads_count)
- metrics_runner = lambda run: _ComputeMetricsInPool(
- run, results.label, results.upload_bucket)
-
try:
- for result in pool.imap_unordered(metrics_runner, available_runs):
- results.AddMetricPageResults(result)
+ for _ in pool.imap_unordered(_SerializeHtmlTraceInPool, available_runs):
+ pass
finally:
pool.terminate()
pool.join()
-def _TraceCanonicalName(run, label):
- parts = [
- run.story.file_safe_name,
- label,
- run.start_datetime.strftime('%Y-%m-%d_%H-%M-%S'),
- random.randint(1, 1e5)]
- return '_'.join(str(p) for p in parts if p) + '.html'
-
-
-def _SerializeAndUploadHtmlTrace(run, label, bucket):
- html_trace = run.GetArtifact(HTML_TRACE_NAME)
- if html_trace is None:
- trace_files = [art.local_path for art in run.IterArtifacts('trace')]
- with run.CaptureArtifact(HTML_TRACE_NAME) as html_path:
- trace_data.SerializeAsHtml(trace_files, html_path)
-
- html_trace = run.GetArtifact(HTML_TRACE_NAME)
- if bucket is not None and html_trace.url is None:
- remote_name = _TraceCanonicalName(run, label)
- cloud_url = cloud_storage.Insert(bucket, remote_name, html_trace.local_path)
- sys.stderr.write(
- 'View generated trace files online at %s for story %s\n' % (
- cloud_url, run.story.name))
- html_trace.SetUrl(cloud_url)
-
- return html_trace
-
-
-def _ComputeMetricsInPool(run, label, bucket):
- story_name = run.story.name
+def _SerializeHtmlTraceInPool(run):
try:
- retvalue = {
- 'run': run,
- 'fail': [],
- 'histogram_dicts': None,
- 'scalars': []
- }
- extra_import_options = {
- 'trackDetailedModelStats': True
- }
-
- html_trace = _SerializeAndUploadHtmlTrace(run, label, bucket)
- trace_size_in_mib = os.path.getsize(html_trace.local_path) / (2 ** 20)
- # Bails out on trace that are too big. See crbug.com/812631 for more
- # details.
- if trace_size_in_mib > 400:
- retvalue['fail'].append(
- '%s: Trace size is too big: %s MiB' % (story_name, trace_size_in_mib))
- return retvalue
-
- logging.info('%s: Starting to compute metrics on trace.', story_name)
- start = time.time()
- # This timeout needs to be coordinated with the Swarming IO timeout for the
- # task that runs this code. If this timeout is longer or close in length
- # to the swarming IO timeout then we risk being forcibly killed for not
- # producing any output. Note that this could be fixed by periodically
- # outputing logs while waiting for metrics to be calculated.
- timeout = _TEN_MINUTES
- mre_result = metric_runner.RunMetricOnSingleTrace(
- html_trace.local_path, run.tbm_metrics,
- extra_import_options, canonical_url=html_trace.url,
- timeout=timeout)
- logging.info('%s: Computing metrics took %.3f seconds.' % (
- story_name, time.time() - start))
-
- if mre_result.failures:
- for f in mre_result.failures:
- retvalue['fail'].append('%s: %s' % (story_name, str(f)))
-
- histogram_dicts = mre_result.pairs.get('histograms', [])
- retvalue['histogram_dicts'] = histogram_dicts
-
- scalars = []
- for d in mre_result.pairs.get('scalars', []):
- scalars.append(common_value_helpers.TranslateScalarValue(
- d, run.story))
- retvalue['scalars'] = scalars
- return retvalue
+ html_trace = run.GetArtifact(HTML_TRACE_NAME)
+ if html_trace is None:
+ trace_files = [art.local_path for art in run.IterArtifacts('trace')]
+ with run.CaptureArtifact(HTML_TRACE_NAME) as html_path:
+ trace_data.SerializeAsHtml(trace_files, html_path)
except Exception: # pylint: disable=broad-except
# logging exception here is the only way to get a stack trace since
# multiprocessing's pool implementation does not save that data. See
# crbug.com/953365.
- logging.exception('%s: Exception while calculating metric', story_name)
+ logging.exception('%s: Exception while aggregating traces', run.story.name)
raise
diff --git a/telemetry/telemetry/internal/results/story_run.py b/telemetry/telemetry/internal/results/story_run.py
index 312474c9b2..996c85649b 100644
--- a/telemetry/telemetry/internal/results/story_run.py
+++ b/telemetry/telemetry/internal/results/story_run.py
@@ -4,7 +4,9 @@
import contextlib
import datetime
+import json
import logging
+import numbers
import os
import posixpath
import time
@@ -15,6 +17,7 @@
FAIL = 'FAIL'
SKIP = 'SKIP'
+MEASUREMENTS_NAME = 'measurements.json'
_CONTENT_TYPES = {
'.dat': 'application/octet-stream', # Generic data blob.
@@ -104,7 +107,6 @@ def __init__(self, story, test_prefix=None, index=0, intermediate_dir=None):
self._story = story
self._test_prefix = test_prefix
self._index = index
- self._values = []
self._tbm_metrics = []
self._skip_reason = None
self._skip_expected = False
@@ -113,6 +115,7 @@ def __init__(self, story, test_prefix=None, index=0, intermediate_dir=None):
self._start_time = time.time()
self._end_time = None
self._artifacts = {}
+ self._measurements = {}
if intermediate_dir is None:
self._artifacts_dir = None
@@ -123,8 +126,22 @@ def __init__(self, story, test_prefix=None, index=0, intermediate_dir=None):
if not os.path.exists(self._artifacts_dir):
os.makedirs(self._artifacts_dir)
- def AddLegacyValue(self, value):
- self._values.append(value)
+ def AddMeasurement(self, name, unit, samples, description=None):
+ """Record an add hoc measurement associated with this story run."""
+ assert self._measurements is not None, (
+ 'Measurements have already been collected')
+ if not isinstance(name, basestring):
+ raise TypeError('name must be a string, got %s' % name)
+ assert name not in self._measurements, (
+ 'Already have measurement with the name %s' % name)
+ self._measurements[name] = _MeasurementToDict(unit, samples, description)
+
+ def _WriteMeasurementsArtifact(self):
+ if self._measurements:
+ with self.CreateArtifact(MEASUREMENTS_NAME) as f:
+ json.dump({'measurements': self._measurements}, f)
+ # It's an error to record more measurements after this point.
+ self._measurements = None
def SetTbmMetrics(self, metrics):
assert not self._tbm_metrics, 'Metrics have already been set'
@@ -135,7 +152,7 @@ def SetFailed(self, failure_str):
self._failed = True
self._failure_str = failure_str
- def Skip(self, reason, is_expected=True):
+ def Skip(self, reason, expected=True):
if not reason:
raise ValueError('A skip reason must be given')
# TODO(#4254): Turn this into a hard failure.
@@ -143,11 +160,12 @@ def Skip(self, reason, is_expected=True):
logging.warning(
'Story was already skipped with reason: %s', self.skip_reason)
self._skip_reason = reason
- self._skip_expected = is_expected
+ self._skip_expected = expected
def Finish(self):
assert not self.finished, 'story run had already finished'
self._end_time = time.time()
+ self._WriteMeasurementsArtifact()
def AsDict(self):
"""Encode as TestResultEntry dict in LUCI Test Results format.
@@ -158,11 +176,12 @@ def AsDict(self):
return {
'testResult': {
'testPath': self.test_path,
+ 'resultId': str(self.index),
'status': self.status,
- 'isExpected': self.is_expected,
+ 'expected': self.expected,
'startTime': self.start_datetime.isoformat() + 'Z',
'runDuration': _FormatDuration(self.duration),
- 'artifacts': {
+ 'outputArtifacts': {
name: artifact.AsDict()
for name, artifact in self._artifacts.items()
},
@@ -178,6 +197,8 @@ def _IterTags(self):
yield 'tbmv2', metric
if 'GTEST_SHARD_INDEX' in os.environ:
yield 'shard', os.environ['GTEST_SHARD_INDEX']
+ for tag in self.story.GetStoryTagsList():
+ yield 'story_tag', tag
@property
def story(self):
@@ -199,11 +220,6 @@ def test_path(self):
else:
return story_name
- @property
- def values(self):
- """The values that correspond to this story run."""
- return self._values
-
@property
def tbm_metrics(self):
"""The TBMv2 metrics that will computed on this story run."""
@@ -232,7 +248,7 @@ def skip_reason(self):
return self._skip_reason
@property
- def is_expected(self):
+ def expected(self):
"""Whether the test status is expected."""
return self._skip_expected or self.ok
@@ -352,3 +368,21 @@ def GetArtifact(self, name):
Returns an Artifact object or None, if there's no artifact with this name.
"""
return self._artifacts.get(name)
+
+
+def _MeasurementToDict(unit, samples, description):
+ """Validate a measurement and encode as a JSON serializable dict."""
+ if not isinstance(unit, basestring):
+ # TODO(crbug.com/999484): Also validate that this is a known unit.
+ raise TypeError('unit must be a string, got %s' % unit)
+ if not isinstance(samples, list):
+ samples = [samples]
+ if not all(isinstance(v, numbers.Number) for v in samples):
+ raise TypeError(
+ 'samples must be a list of numeric values, got %s' % samples)
+ measurement = {'unit': unit, 'samples': samples}
+ if description is not None:
+ if not isinstance(description, basestring):
+ raise TypeError('description must be a string, got %s' % description)
+ measurement['description'] = description
+ return measurement
diff --git a/telemetry/telemetry/internal/results/story_run_unittest.py b/telemetry/telemetry/internal/results/story_run_unittest.py
index 013e889251..f7f8c6e2a1 100644
--- a/telemetry/telemetry/internal/results/story_run_unittest.py
+++ b/telemetry/telemetry/internal/results/story_run_unittest.py
@@ -2,6 +2,7 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+import json
import os
import unittest
@@ -14,15 +15,8 @@
from py_utils import tempfile_ext
-# splitdrive returns '' on systems which don't have drives, like linux.
-ROOT_CHAR = os.path.splitdrive(__file__)[0] + os.sep
-def _abs_join(*args):
- """Helper to do a path join that's an absolute path."""
- return ROOT_CHAR + os.path.join(*args)
-
-
-def TestStory(name):
- return story_module.Story(shared_state.SharedState, name=name)
+def TestStory(name, **kwargs):
+ return story_module.Story(shared_state.SharedState, name=name, **kwargs)
class StoryRunTest(unittest.TestCase):
@@ -47,19 +41,19 @@ def testStoryRunFailed(self):
def testStoryRunSkipped(self):
run = story_run.StoryRun(self.story)
run.SetFailed('oops')
- run.Skip('test', is_expected=True)
+ run.Skip('test', expected=True)
self.assertFalse(run.ok)
self.assertFalse(run.failed)
self.assertTrue(run.skipped)
- self.assertTrue(run.is_expected)
+ self.assertTrue(run.expected)
self.assertEquals(run.failure_str, 'oops')
run = story_run.StoryRun(self.story)
- run.Skip('test', is_expected=False)
+ run.Skip('test', expected=False)
self.assertFalse(run.ok)
self.assertFalse(run.failed)
self.assertTrue(run.skipped)
- self.assertFalse(run.is_expected)
+ self.assertFalse(run.expected)
self.assertEquals(run.failure_str, None)
def testStoryRunSucceeded(self):
@@ -83,7 +77,9 @@ def testAsDict(self, time_module):
1234567900.987]
with tempfile_ext.NamedTemporaryDirectory() as tempdir:
run = story_run.StoryRun(
- story=TestStory(name='http://example.com'), test_prefix='benchmark',
+ story=TestStory(name='http://example.com', tags=['tag1', 'tag2']),
+ test_prefix='benchmark',
+ index=2,
intermediate_dir=tempdir)
with run.CreateArtifact('logs.txt') as log_file:
log_file.write('hello\n')
@@ -95,11 +91,12 @@ def testAsDict(self, time_module):
{
'testResult': {
'testPath': 'benchmark/http%3A%2F%2Fexample.com',
+ 'resultId': '2',
'status': 'PASS',
- 'isExpected': True,
+ 'expected': True,
'startTime': '2009-02-13T23:31:30.987000Z',
'runDuration': '10.00s',
- 'artifacts': {
+ 'outputArtifacts': {
'logs.txt' : {
'filePath': mock.ANY,
'contentType': 'text/plain'
@@ -108,18 +105,70 @@ def testAsDict(self, time_module):
'tags': [
{'key': 'tbmv2', 'value': 'metric1'},
{'key': 'tbmv2', 'value': 'metric2'},
- {'key': 'shard', 'value': '7'}
+ {'key': 'shard', 'value': '7'},
+ {'key': 'story_tag', 'value': 'tag1'},
+ {'key': 'story_tag', 'value': 'tag2'},
],
}
}
)
# Log file is in the {intermediate_dir}/ directory, and file name
# extension is preserved.
- logs_file = entry['testResult']['artifacts']['logs.txt']['filePath']
+ logs_file = entry['testResult']['outputArtifacts']['logs.txt']['filePath']
intermediate_dir = os.path.join(tempdir, '')
self.assertTrue(logs_file.startswith(intermediate_dir))
self.assertTrue(logs_file.endswith('.txt'))
+ def testAddMeasurements(self):
+ with tempfile_ext.NamedTemporaryDirectory() as tempdir:
+ run = story_run.StoryRun(self.story, intermediate_dir=tempdir)
+ run.AddMeasurement('run_time', 'ms', [1, 2, 3])
+ run.AddMeasurement('foo_bars', 'count', 4,
+ description='number of foo_bars found')
+ run.Finish()
+
+ artifact = run.GetArtifact(story_run.MEASUREMENTS_NAME)
+ with open(artifact.local_path) as f:
+ measurements = json.load(f)
+
+ self.assertEqual(measurements, {
+ 'measurements': {
+ 'run_time': {
+ 'unit': 'ms',
+ 'samples': [1, 2, 3],
+ },
+ 'foo_bars' : {
+ 'unit': 'count',
+ 'samples': [4],
+ 'description': 'number of foo_bars found'
+ }
+ }
+ })
+
+ def testAddMeasurementValidation(self):
+ run = story_run.StoryRun(self.story)
+ with self.assertRaises(TypeError):
+ run.AddMeasurement(name=None, unit='ms', samples=[1, 2, 3])
+ with self.assertRaises(TypeError):
+ run.AddMeasurement(name='run_time', unit=7, samples=[1, 2, 3])
+ with self.assertRaises(TypeError):
+ run.AddMeasurement(name='run_time', unit='ms', samples=[1, None, 3])
+ with self.assertRaises(TypeError):
+ run.AddMeasurement(name='run_time', unit='ms', samples=[1, 2, 3],
+ description=['not', 'a', 'string'])
+
+ def testAddMeasurementRaisesAfterFinish(self):
+ run = story_run.StoryRun(self.story)
+ run.AddMeasurement('run_time', 'ms', [1, 2, 3])
+ run.Finish()
+ with self.assertRaises(AssertionError):
+ run.AddMeasurement('foo_bars', 'count', 4)
+
+ def testAddMeasurementTwiceRaises(self):
+ run = story_run.StoryRun(self.story)
+ run.AddMeasurement('foo_bars', 'ms', [1])
+ with self.assertRaises(AssertionError):
+ run.AddMeasurement('foo_bars', 'ms', [2])
def testCreateArtifact(self):
with tempfile_ext.NamedTemporaryDirectory() as tempdir:
diff --git a/telemetry/telemetry/internal/story_runner.py b/telemetry/telemetry/internal/story_runner.py
index 0a388f0491..b349eee794 100644
--- a/telemetry/telemetry/internal/story_runner.py
+++ b/telemetry/telemetry/internal/story_runner.py
@@ -24,7 +24,7 @@
from telemetry.internal.util import exception_formatter
from telemetry import page
from telemetry.page import legacy_page_test
-from telemetry import story as story_module
+from telemetry.story import story_filter as story_filter_module
from telemetry.util import wpr_modes
from telemetry.web_perf import story_test
@@ -45,7 +45,7 @@ class ArchiveError(Exception):
def AddCommandLineArgs(parser):
- story_module.StoryFilter.AddCommandLineArgs(parser)
+ story_filter_module.StoryFilterFactory.AddCommandLineArgs(parser)
group = optparse.OptionGroup(parser, 'Story runner options')
# Note that the default for pageset-repeat is 1 unless the benchmark
@@ -82,10 +82,6 @@ def AddCommandLineArgs(parser):
help='Run against live sites and ignore the Web Page Replay archives.')
parser.add_option_group(group)
- parser.add_option('-d', '--also-run-disabled-tests',
- dest='run_disabled_tests',
- action='store_true', default=False,
- help='Ignore expectations.config disabling.')
parser.add_option('-p', '--print-only', dest='print_only',
choices=['stories', 'tags', 'both'], default=None)
parser.add_option('-w', '--wait-for-cpu-temp',
@@ -95,15 +91,11 @@ def AddCommandLineArgs(parser):
'until the device CPU has cooled down. If '
'not specified, this wait is disabled. '
'Device must be supported. ')
- parser.add_option('--run-full-story-set', action='store_true', default=False,
- help='Whether to run the complete set of stories instead '
- 'of an abridged version. Note that if the story set '
- 'does not provide the information required to abridge it, '
- 'then this argument will have no impact.')
-def ProcessCommandLineArgs(parser, args):
- story_module.StoryFilter.ProcessCommandLineArgs(parser, args)
+def ProcessCommandLineArgs(parser, args, environment=None):
+ story_filter_module.StoryFilterFactory.ProcessCommandLineArgs(
+ parser, args, environment)
if args.pageset_repeat < 1:
parser.error('--pageset-repeat must be a positive integer.')
@@ -201,8 +193,7 @@ def _GetPossibleBrowser(finder_options):
return possible_browser
-def RunStorySet(test, story_set, finder_options, results, max_failures=None,
- expectations=None, max_num_values=sys.maxint):
+def RunStorySet(test, story_set, finder_options, results, max_failures=None):
"""Runs a test against a story_set with the given options.
Stop execution for unexpected exceptions such as KeyboardInterrupt. Some
@@ -218,15 +209,22 @@ def RunStorySet(test, story_set, finder_options, results, max_failures=None,
the entire story run. It's overriden by finder_options.max_failures
if given.
expectations: Benchmark expectations used to determine disabled stories.
- max_num_values: Max number of legacy values allowed before aborting the
- story run.
"""
stories = story_set.stories
for s in stories:
ValidateStory(s)
- # Filter page set based on options.
- stories = story_module.StoryFilter.FilterStories(stories)
+ # TODO(crbug.com/1013630): A possible_browser object was already created in
+ # Run() method, so instead of creating a new one here we should be
+ # using that one.
+ possible_browser = _GetPossibleBrowser(finder_options)
+ platform_tags = possible_browser.GetTypExpectationsTags()
+ logging.info('The following expectations condition tags were generated %s',
+ str(platform_tags))
+ abridged_story_set_tag = story_set.GetAbridgedStorySetTagFilter()
+ story_filter = story_filter_module.StoryFilterFactory.BuildStoryFilter(
+ results.benchmark_name, platform_tags, abridged_story_set_tag)
+ stories = story_filter.FilterStories(stories)
wpr_archive_info = story_set.wpr_archive_info
# Sort the stories based on the archive name, to minimize how often the
# network replay-server needs to be restarted.
@@ -272,18 +270,7 @@ def RunStorySet(test, story_set, finder_options, results, max_failures=None,
if effective_max_failures is None:
effective_max_failures = max_failures
- possible_browser = _GetPossibleBrowser(finder_options)
-
- if not finder_options.run_full_story_set:
- tag_filter = story_set.GetAbridgedStorySetTagFilter()
- if tag_filter:
- logging.warn('Running an abridged set of stories (tagged {%s}), '
- 'use --run-full-story-set if you need to run all stories' %
- tag_filter)
- stories = [story for story in stories if tag_filter in story.tags]
-
state = None
- device_info_diags = {}
# TODO(crbug.com/866458): unwind the nested blocks
# pylint: disable=too-many-nested-blocks
try:
@@ -300,19 +287,14 @@ def RunStorySet(test, story_set, finder_options, results, max_failures=None,
results.WillRunPage(story, storyset_repeat_counter)
- if expectations:
- disabled = expectations.IsStoryDisabled(story)
- if disabled:
- if finder_options.run_disabled_tests:
- logging.warning('Force running a disabled story: %s' %
- story.name)
- else:
- results.Skip(disabled)
- results.DidRunPage(story)
- continue
+ skip_reason = story_filter.ShouldSkip(story)
+ if skip_reason:
+ results.Skip(skip_reason)
+ results.DidRunPage(story)
+ continue
if results.benchmark_interrupted:
- results.Skip(results.benchmark_interruption, is_expected=False)
+ results.Skip(results.benchmark_interruption, expected=False)
results.DidRunPage(story)
continue
@@ -323,15 +305,6 @@ def RunStorySet(test, story_set, finder_options, results, max_failures=None,
state.platform.WaitForCpuTemperature(38.0)
_WaitForThermalThrottlingIfNeeded(state.platform)
_RunStoryAndProcessErrorIfNeeded(story, results, state, test)
-
- num_values = sum(1 for _ in results.IterAllLegacyValues())
- # TODO(#4259): Convert this to an exception-based failure
- if num_values > max_num_values:
- msg = 'Too many values: %d > %d' % (num_values, max_num_values)
- logging.error(msg)
- results.Fail(msg)
-
- device_info_diags = _MakeDeviceInfoDiagnostics(state)
except _UNHANDLEABLE_ERRORS as exc:
interruption = (
'Benchmark execution interrupted by a fatal exception: %r' % exc)
@@ -366,9 +339,7 @@ def RunStorySet(test, story_set, finder_options, results, max_failures=None,
'Too many stories failed. Aborting the rest of the stories.')
results.InterruptBenchmark(interruption)
finally:
- results_processor.ComputeTimelineBasedMetrics(results)
- results.PopulateHistogramSet()
- results.AddSharedDiagnostics(**device_info_diags)
+ results_processor.SerializeHtmlTraces(results)
if state:
has_existing_exception = sys.exc_info() != (None, None, None)
@@ -392,7 +363,7 @@ def ValidateStory(story):
def _ShouldRunBenchmark(benchmark, possible_browser, finder_options):
if finder_options.print_only:
return True # Should always run on print-only mode.
- if benchmark._CanRunOnPlatform(possible_browser.platform, finder_options):
+ if benchmark.CanRunOnPlatform(possible_browser.platform, finder_options):
return True
print ('Benchmark "%s" is not supported on the current platform. If this '
"is in error please add it to the benchmark's SUPPORTED_PLATFORMS."
@@ -421,10 +392,6 @@ def RunBenchmark(benchmark, finder_options):
print ('No browser of type "%s" found for running benchmark "%s".' % (
finder_options.browser_options.browser_type, benchmark.Name()))
return -1
- typ_expectation_tags = possible_browser.GetTypExpectationsTags()
- logging.info('The following expectations condition tags were generated %s',
- str(typ_expectation_tags))
- benchmark.expectations.SetTags(typ_expectation_tags)
if not _ShouldRunBenchmark(benchmark, possible_browser, finder_options):
return -1
@@ -438,11 +405,20 @@ def RunBenchmark(benchmark, finder_options):
raise Exception(
'PageTest must be used with StorySet containing only '
'telemetry.page.Page stories.')
+
+ results.AddSharedDiagnostics(
+ architecture=possible_browser.platform.GetArchName(),
+ device_id=possible_browser.platform.GetDeviceId(),
+ os_name=possible_browser.platform.GetOSName(),
+ os_version=possible_browser.platform.GetOSVersionName(),
+ owners=benchmark.GetOwners(),
+ bug_components=benchmark.GetBugComponents(),
+ documentation_urls=benchmark.GetDocumentationLinks(),
+ )
+
try:
RunStorySet(
- test, story_set, finder_options, results, benchmark.max_failures,
- expectations=benchmark.expectations,
- max_num_values=benchmark.MAX_NUM_VALUES)
+ test, story_set, finder_options, results, benchmark.max_failures)
if results.benchmark_interrupted:
return_code = 2
elif results.had_failures:
@@ -456,17 +432,6 @@ def RunBenchmark(benchmark, finder_options):
results.InterruptBenchmark(interruption)
exception_formatter.PrintFormattedException()
return_code = 2
-
- # TODO(crbug.com/981349): merge two calls to AddSharedDiagnostics
- # (see RunStorySet() method for the second one).
- results.AddSharedDiagnostics(
- owners=benchmark.GetOwners(),
- bug_components=benchmark.GetBugComponents(),
- documentation_urls=benchmark.GetDocumentationLinks(),
- )
-
- if finder_options.upload_results:
- results_processor.UploadArtifactsToCloud(results)
return return_code
def _UpdateAndCheckArchives(archive_data_file, wpr_archive_info,
@@ -554,17 +519,3 @@ def _CheckThermalThrottling(platform):
if platform.HasBeenThermallyThrottled():
logging.warning('Device has been thermally throttled during '
'performance tests, results will vary.')
-
-def _MakeDeviceInfoDiagnostics(state):
- if not state or not state.platform:
- return {}
-
- # This used to include data for reserved_infos.MEMORY_AMOUNTS, but it was
- # found that platform.GetSystemTotalPhysicalMemory() does not give
- # consistent results. See crbug.com/854676 for details.
- return {
- 'architecture': state.platform.GetArchName(),
- 'device_id': state.platform.GetDeviceId(),
- 'os_name': state.platform.GetOSName(),
- 'os_version': state.platform.GetOSVersionName(),
- }
diff --git a/telemetry/telemetry/internal/story_runner_unittest.py b/telemetry/telemetry/internal/story_runner_unittest.py
index 4a792e4edf..115bf85d27 100644
--- a/telemetry/telemetry/internal/story_runner_unittest.py
+++ b/telemetry/telemetry/internal/story_runner_unittest.py
@@ -3,7 +3,6 @@
# found in the LICENSE file.
import json
-import math
import os
import shutil
import sys
@@ -18,645 +17,188 @@
from telemetry import benchmark
from telemetry.core import exceptions
from telemetry.core import util
-from telemetry import decorators
from telemetry.internal.actions import page_action
from telemetry.internal.results import page_test_results
from telemetry.internal.results import results_options
from telemetry.internal import story_runner
-from telemetry.page import page as page_module
from telemetry.page import legacy_page_test
from telemetry import story as story_module
+from telemetry.story import story_filter
from telemetry.testing import fakes
from telemetry.testing import options_for_unittests
-from telemetry.testing import system_stub
-from telemetry.util import wpr_modes
-from telemetry.value import list_of_scalar_values
-from telemetry.value import summary as summary_module
+from telemetry.testing import test_stories
from telemetry.web_perf import story_test
-from telemetry.web_perf import timeline_based_measurement
from telemetry.wpr import archive_info
-from tracing.value import histogram as histogram_module
-from tracing.value import histogram_set
-from tracing.value.diagnostics import generic_set
-from tracing.value.diagnostics import reserved_infos
-
-# This linter complains if we define classes nested inside functions.
-# pylint: disable=bad-super-call
-
-# pylint: disable=too-many-lines
-
-class FakePlatform(object):
- def CanMonitorThermalThrottling(self):
- return False
-
- def WaitForBatteryTemperature(self, _):
- pass
-
- def GetDeviceTypeName(self):
- return 'GetDeviceTypeName'
-
- def GetArchName(self):
- return 'amd64'
-
- def GetOSName(self):
- return 'win'
-
- def GetOSVersionName(self):
- return 'win10'
-
- def GetSystemTotalPhysicalMemory(self):
- return 8 * (1024 ** 3)
-
- def GetDeviceId(self):
- return None
-
-
-class TestSharedState(story_module.SharedState):
-
- _platform = FakePlatform()
-
- @classmethod
- def SetTestPlatform(cls, platform):
- cls._platform = platform
-
- def __init__(self, test, options, story_set, possible_browser):
- super(TestSharedState, self).__init__(
- test, options, story_set, possible_browser)
- self._test = test
- self._current_story = None
-
- @property
- def platform(self):
- return self._platform
-
- def WillRunStory(self, story):
- self._current_story = story
-
- def CanRunStory(self, story):
- return True
-
- def RunStory(self, results):
- if isinstance(self._test, legacy_page_test.LegacyPageTest):
- # TODO(crbug.com/1008852): The RunPage method does not exist any more in
- # LegacyPageTest. This should be refactored to better reflect reality.
- self._test.RunPage(self._current_story, results)
- else:
- self._current_story.Run(self)
-
- def DidRunStory(self, results):
- pass
-
- def TearDownState(self):
- pass
-
- def DumpStateUponStoryRunFailure(self, results):
- pass
-
-
-class DummyTest(legacy_page_test.LegacyPageTest):
- def RunPage(self, *_):
- pass
-
- def ValidateAndMeasurePage(self, page, tab, results):
- pass
-
-
-class DummyLocalStory(story_module.Story):
- def __init__(self, shared_state_class, name='', tags=None):
- if name == '':
- name = 'dummy local story'
- super(DummyLocalStory, self).__init__(
- shared_state_class, name=name, tags=tags)
-
- def Run(self, shared_state):
- pass
-
- @property
- def is_local(self):
- return True
-
- @property
- def url(self):
- return 'data:,'
-
-
-class TestOnlyException(Exception):
- pass
-
-
-class _Measurement(legacy_page_test.LegacyPageTest):
- i = 0
- def RunPage(self, page, results):
- del page # Unused.
- self.i += 1
- results.AddMeasurement('metric', 'unit', self.i)
-
- def ValidateAndMeasurePage(self, page, tab, results):
- del page, tab # Unused.
- self.i += 1
- results.AddMeasurement('metric', 'unit', self.i)
-
class RunStorySetTest(unittest.TestCase):
- """Tests that run dummy story sets with no real browser involved.
+ """Tests that run dummy story sets with a mock StoryTest.
- All these tests:
- - Use story sets containing DummyLocalStory objects.
- - Call story_runner.RunStorySet as entry point.
+ The main entry point for these tests is story_runner.RunStorySet.
"""
def setUp(self):
self.options = options_for_unittests.GetRunOptions(
output_dir=tempfile.mkdtemp())
- self.results = results_options.CreateResults(self.options)
+ # We use a mock platform and story set, so tests can inspect which methods
+ # were called and easily override their behavior.
+ self.mock_platform = test_stories.TestSharedState.mock_platform
+ self.mock_story_test = mock.Mock(spec=story_test.StoryTest)
def tearDown(self):
- self.results.Finalize()
shutil.rmtree(self.options.output_dir)
+ def RunStories(self, stories, **kwargs):
+ story_set = test_stories.DummyStorySet(stories)
+ with results_options.CreateResults(
+ self.options, benchmark_name='benchmark') as results:
+ story_runner.RunStorySet(
+ self.mock_story_test, story_set, self.options, results, **kwargs)
+
+ def ReadTestResults(self):
+ return results_options.ReadTestResults(self.options.intermediate_dir)
+
def testRunStorySet(self):
- number_stories = 3
- story_set = story_module.StorySet()
- for i in xrange(number_stories):
- story_set.AddStory(DummyLocalStory(TestSharedState, name='story_%d' % i))
- test = DummyTest()
- story_runner.RunStorySet(test, story_set, self.options, self.results)
- self.assertFalse(self.results.had_failures)
- self.assertEquals(number_stories, self.results.num_successful)
- self.assertEquals(story_set.stories[0].wpr_mode, wpr_modes.WPR_REPLAY)
+ self.RunStories(['story1', 'story2', 'story3'])
+ test_results = self.ReadTestResults()
+ self.assertTrue(['PASS', 'PASS', 'PASS'],
+ [test['status'] for test in test_results])
def testRunStoryWithLongName(self):
- story_set = story_module.StorySet()
- story_set.AddStory(DummyLocalStory(TestSharedState, name='l' * 182))
- test = DummyTest()
with self.assertRaises(ValueError):
- story_runner.RunStorySet(test, story_set, self.options, self.results)
-
- def testSuccessfulTimelineBasedMeasurementTest(self):
- """Check that PageTest is not required for story_runner.RunStorySet.
-
- Any PageTest related calls or attributes need to only be called
- for PageTest tests.
- """
- class TestSharedTbmState(TestSharedState):
- def RunStory(self, results):
- pass
-
- TEST_WILL_RUN_STORY = 'test.WillRunStory'
- TEST_MEASURE = 'test.Measure'
- TEST_DID_RUN_STORY = 'test.DidRunStory'
-
- EXPECTED_CALLS_IN_ORDER = [TEST_WILL_RUN_STORY,
- TEST_MEASURE,
- TEST_DID_RUN_STORY]
-
- test = timeline_based_measurement.TimelineBasedMeasurement(
- timeline_based_measurement.Options())
-
- manager = mock.MagicMock()
- test.WillRunStory = mock.MagicMock()
- test.Measure = mock.MagicMock()
- test.DidRunStory = mock.MagicMock()
- manager.attach_mock(test.WillRunStory, TEST_WILL_RUN_STORY)
- manager.attach_mock(test.Measure, TEST_MEASURE)
- manager.attach_mock(test.DidRunStory, TEST_DID_RUN_STORY)
-
- story_set = story_module.StorySet()
- story_set.AddStory(DummyLocalStory(TestSharedTbmState, name='foo'))
- story_set.AddStory(DummyLocalStory(TestSharedTbmState, name='bar'))
- story_set.AddStory(DummyLocalStory(TestSharedTbmState, name='baz'))
- story_runner.RunStorySet(test, story_set, self.options, self.results)
- self.assertFalse(self.results.had_failures)
- self.assertEquals(3, self.results.num_successful)
-
- self.assertEquals(3*EXPECTED_CALLS_IN_ORDER,
- [call[0] for call in manager.mock_calls])
-
- def testCallOrderBetweenStoryTestAndSharedState(self):
- """Check that the call order between StoryTest and SharedState is correct.
- """
- TEST_WILL_RUN_STORY = 'test.WillRunStory'
- TEST_MEASURE = 'test.Measure'
- TEST_DID_RUN_STORY = 'test.DidRunStory'
- STATE_WILL_RUN_STORY = 'state.WillRunStory'
- STATE_RUN_STORY = 'state.RunStory'
- STATE_DID_RUN_STORY = 'state.DidRunStory'
-
- EXPECTED_CALLS_IN_ORDER = [TEST_WILL_RUN_STORY,
- STATE_WILL_RUN_STORY,
- STATE_RUN_STORY,
- TEST_MEASURE,
- TEST_DID_RUN_STORY,
- STATE_DID_RUN_STORY]
-
- class TestStoryTest(story_test.StoryTest):
- def WillRunStory(self, platform):
- pass
-
- def Measure(self, platform, results):
- pass
-
- def DidRunStory(self, platform, results):
- pass
-
- class TestSharedStateForStoryTest(TestSharedState):
- def RunStory(self, results):
- pass
-
- @mock.patch.object(TestStoryTest, 'WillRunStory')
- @mock.patch.object(TestStoryTest, 'Measure')
- @mock.patch.object(TestStoryTest, 'DidRunStory')
- @mock.patch.object(TestSharedStateForStoryTest, 'WillRunStory')
- @mock.patch.object(TestSharedStateForStoryTest, 'RunStory')
- @mock.patch.object(TestSharedStateForStoryTest, 'DidRunStory')
- def GetCallsInOrder(state_DidRunStory, state_RunStory, state_WillRunStory,
- test_DidRunStory, test_Measure, test_WillRunStory):
- manager = mock.MagicMock()
- manager.attach_mock(test_WillRunStory, TEST_WILL_RUN_STORY)
- manager.attach_mock(test_Measure, TEST_MEASURE)
- manager.attach_mock(test_DidRunStory, TEST_DID_RUN_STORY)
- manager.attach_mock(state_WillRunStory, STATE_WILL_RUN_STORY)
- manager.attach_mock(state_RunStory, STATE_RUN_STORY)
- manager.attach_mock(state_DidRunStory, STATE_DID_RUN_STORY)
-
- test = TestStoryTest()
- story_set = story_module.StorySet()
- story_set.AddStory(DummyLocalStory(TestSharedStateForStoryTest))
- story_runner.RunStorySet(test, story_set, self.options, self.results)
- return [call[0] for call in manager.mock_calls]
-
- calls_in_order = GetCallsInOrder() # pylint: disable=no-value-for-parameter
- self.assertEquals(EXPECTED_CALLS_IN_ORDER, calls_in_order)
+ self.RunStories(['l' * 182])
+
+ def testCallOrderInStoryTest(self):
+ """Check the call order of StoryTest methods is as expected."""
+ self.RunStories(['foo', 'bar', 'baz'])
+ self.assertEqual([call[0] for call in self.mock_story_test.mock_calls],
+ ['WillRunStory', 'Measure', 'DidRunStory'] * 3)
+
+ @mock.patch.object(test_stories.TestSharedState, 'DidRunStory')
+ @mock.patch.object(test_stories.TestSharedState, 'RunStory')
+ @mock.patch.object(test_stories.TestSharedState, 'WillRunStory')
+ def testCallOrderBetweenStoryTestAndSharedState(
+ self, will_run_story, run_story, did_run_story):
+ """Check the call order between StoryTest and SharedState is correct."""
+ root_mock = mock.MagicMock()
+ root_mock.attach_mock(self.mock_story_test, 'test')
+ root_mock.attach_mock(will_run_story, 'state.WillRunStory')
+ root_mock.attach_mock(run_story, 'state.RunStory')
+ root_mock.attach_mock(did_run_story, 'state.DidRunStory')
+
+ self.RunStories(['story1'])
+ self.assertEqual([call[0] for call in root_mock.mock_calls], [
+ 'test.WillRunStory',
+ 'state.WillRunStory',
+ 'state.RunStory',
+ 'test.Measure',
+ 'test.DidRunStory',
+ 'state.DidRunStory'
+ ])
def testAppCrashExceptionCausesFailure(self):
- story_set = story_module.StorySet()
- class SharedStoryThatCausesAppCrash(TestSharedState):
- def WillRunStory(self, story):
- raise exceptions.AppCrashException(msg='App Foo crashes')
-
- story_set.AddStory(DummyLocalStory(SharedStoryThatCausesAppCrash))
- story_runner.RunStorySet(DummyTest(), story_set, self.options, self.results)
- self.assertTrue(self.results.had_failures)
- self.assertEquals(0, self.results.num_successful)
+ self.RunStories([test_stories.DummyStory(
+ 'story',
+ run_side_effect=exceptions.AppCrashException(msg='App Foo crashes'))])
+ test_results = self.ReadTestResults()
+ self.assertEqual(['FAIL'],
+ [test['status'] for test in test_results])
self.assertIn('App Foo crashes', sys.stderr.getvalue())
- def testExceptionRaisedInSharedStateTearDown(self):
- story_set = story_module.StorySet()
- class SharedStoryThatCausesAppCrash(TestSharedState):
- def TearDownState(self):
- raise TestOnlyException()
+ @mock.patch.object(test_stories.TestSharedState, 'TearDownState')
+ def testExceptionRaisedInSharedStateTearDown(self, tear_down_state):
+ class TestOnlyException(Exception):
+ pass
- story_set.AddStory(DummyLocalStory(
- SharedStoryThatCausesAppCrash))
+ tear_down_state.side_effect = TestOnlyException()
with self.assertRaises(TestOnlyException):
- story_runner.RunStorySet(
- DummyTest(), story_set, self.options, self.results)
+ self.RunStories(['story'])
def testUnknownExceptionIsNotFatal(self):
- story_set = story_module.StorySet()
-
class UnknownException(Exception):
pass
- # This erroneous test is set up to raise exception for the 1st story
- # run.
- class Test(legacy_page_test.LegacyPageTest):
- def __init__(self):
- super(Test, self).__init__()
- self.run_count = 0
-
- def RunPage(self, *_):
- old_run_count = self.run_count
- self.run_count += 1
- if old_run_count == 0:
- raise UnknownException('FooBarzException')
-
- def ValidateAndMeasurePage(self, page, tab, results):
- pass
-
- s1 = DummyLocalStory(TestSharedState, name='foo')
- s2 = DummyLocalStory(TestSharedState, name='bar')
- story_set.AddStory(s1)
- story_set.AddStory(s2)
- test = Test()
- story_runner.RunStorySet(test, story_set, self.options, self.results)
- all_story_runs = list(self.results.IterStoryRuns())
- self.assertEqual(2, len(all_story_runs))
- self.assertTrue(all_story_runs[0].failed)
- self.assertTrue(all_story_runs[1].ok)
- self.assertIn('FooBarzException', sys.stderr.getvalue())
+ self.RunStories([
+ test_stories.DummyStory(
+ 'foo', run_side_effect=UnknownException('FooException')),
+ test_stories.DummyStory('bar')])
+ test_results = self.ReadTestResults()
+ self.assertEqual(['FAIL', 'PASS'],
+ [test['status'] for test in test_results])
+ self.assertIn('FooException', sys.stderr.getvalue())
def testRaiseBrowserGoneExceptionFromRunPage(self):
- story_set = story_module.StorySet()
-
- class Test(legacy_page_test.LegacyPageTest):
- def __init__(self):
- super(Test, self).__init__()
- self.run_count = 0
-
- def RunPage(self, *_):
- old_run_count = self.run_count
- self.run_count += 1
- if old_run_count == 0:
- raise exceptions.BrowserGoneException(
- None, 'i am a browser crash message')
-
- def ValidateAndMeasurePage(self, page, tab, results):
- pass
-
- story_set.AddStory(DummyLocalStory(TestSharedState, name='foo'))
- story_set.AddStory(DummyLocalStory(TestSharedState, name='bar'))
- test = Test()
- story_runner.RunStorySet(test, story_set, self.options, self.results)
- self.assertEquals(2, test.run_count)
- self.assertTrue(self.results.had_failures)
- self.assertEquals(1, self.results.num_successful)
-
- def testAppCrashThenRaiseInTearDown_Interrupted(self):
- story_set = story_module.StorySet()
-
- unit_test_events = [] # track what was called when
- class DidRunTestError(Exception):
+ self.RunStories([
+ test_stories.DummyStory(
+ 'foo', run_side_effect=exceptions.BrowserGoneException(
+ None, 'i am a browser crash message')),
+ test_stories.DummyStory('bar')])
+ test_results = self.ReadTestResults()
+ self.assertEqual(['FAIL', 'PASS'],
+ [test['status'] for test in test_results])
+ self.assertIn('i am a browser crash message', sys.stderr.getvalue())
+
+ @mock.patch.object(test_stories.TestSharedState,
+ 'DumpStateUponStoryRunFailure')
+ @mock.patch.object(test_stories.TestSharedState, 'TearDownState')
+ def testAppCrashThenRaiseInTearDown_Interrupted(
+ self, tear_down_state, dump_state_upon_story_run_failure):
+ class TearDownStateException(Exception):
pass
- class TestTearDownSharedState(TestSharedState):
- def TearDownState(self):
- unit_test_events.append('tear-down-state')
- raise DidRunTestError
-
- def DumpStateUponStoryRunFailure(self, results):
- unit_test_events.append('dump-state')
-
-
- class Test(legacy_page_test.LegacyPageTest):
- def __init__(self):
- super(Test, self).__init__()
- self.run_count = 0
-
- def RunPage(self, *_):
- old_run_count = self.run_count
- self.run_count += 1
- if old_run_count == 0:
- unit_test_events.append('app-crash')
- raise exceptions.AppCrashException
-
- def ValidateAndMeasurePage(self, page, tab, results):
- pass
-
- story_set.AddStory(DummyLocalStory(TestTearDownSharedState, name='foo'))
- story_set.AddStory(DummyLocalStory(TestTearDownSharedState, name='bar'))
- test = Test()
- story_runner.RunStorySet(test, story_set, self.options, self.results)
- self.assertEqual([
- 'app-crash', 'dump-state',
- # This event happens because of the app crash.
- 'tear-down-state',
- # This event happens since state must be reopened to check whether
+ tear_down_state.side_effect = TearDownStateException()
+ root_mock = mock.Mock()
+ root_mock.attach_mock(tear_down_state, 'state.TearDownState')
+ root_mock.attach_mock(dump_state_upon_story_run_failure,
+ 'state.DumpStateUponStoryRunFailure')
+ self.RunStories([
+ test_stories.DummyStory(
+ 'foo', run_side_effect=exceptions.AppCrashException(msg='crash!')),
+ test_stories.DummyStory('bar')])
+
+ self.assertEqual([call[0] for call in root_mock.mock_calls], [
+ 'state.DumpStateUponStoryRunFailure',
+ # This tear down happens because of the app crash.
+ 'state.TearDownState',
+ # This one happens since state must be re-created to check whether
# later stories should be skipped or unexpectedly skipped. Then
# state is torn down normally at the end of the runs.
- 'tear-down-state',
- ], unit_test_events)
- self.assertIn('DidRunTestError', self.results.benchmark_interruption)
- story_runs = list(self.results.IterStoryRuns())
- self.assertEqual(len(story_runs), 2)
- self.assertTrue(story_runs[0].failed,
- 'It threw an exceptions.AppCrashException')
- self.assertTrue(
- story_runs[1].skipped,
- 'We should unexpectedly skip later runs since the DidRunTestError '
- 'during state teardown should cause the Benchmark to be marked as '
- 'interrupted.')
- self.assertFalse(
- story_runs[1].is_expected,
- 'We should unexpectedly skip later runs since the DidRunTestError '
- 'during state teardown should cause the Benchmark to be marked as '
- 'interrupted.')
-
- def testPagesetRepeat(self):
- story_set = story_module.StorySet()
+ 'state.TearDownState'
+ ])
- # TODO(eakuefner): Factor this out after flattening page ref in Value
- blank_story = DummyLocalStory(TestSharedState, name='blank')
- green_story = DummyLocalStory(TestSharedState, name='green')
- story_set.AddStory(blank_story)
- story_set.AddStory(green_story)
+ test_results = self.ReadTestResults()
+ self.assertEqual(len(test_results), 2)
+ # First story unexpectedly failed with AppCrashException.
+ self.assertEqual(test_results[0]['status'], 'FAIL')
+ self.assertFalse(test_results[0]['expected'])
+ # Second story unexpectedly skipped due to exception during tear down.
+ self.assertEqual(test_results[1]['status'], 'SKIP')
+ self.assertFalse(test_results[1]['expected'])
+ def testPagesetRepeat(self):
self.options.pageset_repeat = 2
- self.options.output_formats = []
- story_runner.RunStorySet(
- _Measurement(), story_set, self.options, self.results)
- summary = summary_module.Summary(self.results)
- values = summary.interleaved_computed_per_page_values_and_summaries
-
- blank_value = list_of_scalar_values.ListOfScalarValues(
- blank_story, 'metric', 'unit', [1, 3])
- green_value = list_of_scalar_values.ListOfScalarValues(
- green_story, 'metric', 'unit', [2, 4])
- merged_value = list_of_scalar_values.ListOfScalarValues(
- None, 'metric', 'unit',
- [1, 3, 2, 4], std=math.sqrt(2)) # Pooled standard deviation.
-
- self.assertEquals(4, self.results.num_successful)
- self.assertFalse(self.results.had_failures)
- self.assertEquals(3, len(values))
- self.assertIn(blank_value, values)
- self.assertIn(green_value, values)
- self.assertIn(merged_value, values)
-
- def testRepeatOnce(self):
- story_set = story_module.StorySet()
-
- blank_story = DummyLocalStory(TestSharedState, name='blank')
- green_story = DummyLocalStory(TestSharedState, name='green')
- story_set.AddStory(blank_story)
- story_set.AddStory(green_story)
-
- self.options.pageset_repeat = 1
- story_runner.RunStorySet(
- _Measurement(), story_set, self.options, self.results)
- summary = summary_module.Summary(self.results)
- values = summary.interleaved_computed_per_page_values_and_summaries
-
-
- self.assertEquals(2, self.results.num_successful)
- self.assertFalse(self.results.had_failures)
- self.assertEquals(3, len(values))
-
- def testRunStoryPopulatesHistograms(self):
- story_set = story_module.StorySet()
-
- class Test(legacy_page_test.LegacyPageTest):
- def RunPage(self, _, results):
- results.AddHistogram(
- histogram_module.Histogram('hist', 'count'))
-
- def ValidateAndMeasurePage(self, page, tab, results):
- pass
-
- s1 = DummyLocalStory(TestSharedState, name='foo')
- story_set.AddStory(s1)
- test = Test()
- story_runner.RunStorySet(test, story_set, self.options, self.results)
-
- dicts = self.results.AsHistogramDicts()
- hs = histogram_set.HistogramSet()
- hs.ImportDicts(dicts)
-
- self.assertEqual(1, len(hs))
- self.assertEqual('hist', hs.GetFirstHistogram().name)
-
- def testRunStoryAddsDeviceInfo(self):
- story_set = story_module.StorySet()
- story_set.AddStory(DummyLocalStory(TestSharedState, 'foo', ['bar']))
- story_runner.RunStorySet(DummyTest(), story_set, self.options, self.results)
-
- hs = histogram_set.HistogramSet()
- hs.ImportDicts(self.results.AsHistogramDicts())
-
- generic_diagnostics = hs.GetSharedDiagnosticsOfType(
- generic_set.GenericSet)
-
- generic_diagnostics_values = [
- list(diagnostic) for diagnostic in generic_diagnostics]
-
- self.assertGreater(len(generic_diagnostics), 2)
- self.assertIn(['win10'], generic_diagnostics_values)
- self.assertIn(['win'], generic_diagnostics_values)
- self.assertIn(['amd64'], generic_diagnostics_values)
-
- def testRunStoryAddsDeviceInfo_EvenInErrors(self):
- class ErrorRaisingDummyLocalStory(DummyLocalStory):
- def __init__(self, shared_state_class, name='', tags=None):
- if name == '':
- name = 'dummy local story'
- super(ErrorRaisingDummyLocalStory, self).__init__(
- shared_state_class, name=name, tags=tags)
-
- def Run(self, shared_state):
- raise BaseException('foo')
-
- @property
- def is_local(self):
- return True
-
- @property
- def url(self):
- return 'data:,'
-
- story_set = story_module.StorySet()
- story_set.AddStory(ErrorRaisingDummyLocalStory(
- TestSharedState, 'foo', ['bar']))
- story_runner.RunStorySet(DummyTest(), story_set, self.options, self.results)
-
- hs = histogram_set.HistogramSet()
- hs.ImportDicts(self.results.AsHistogramDicts())
-
- generic_diagnostics = hs.GetSharedDiagnosticsOfType(
- generic_set.GenericSet)
-
- generic_diagnostics_values = [
- list(diagnostic) for diagnostic in generic_diagnostics]
-
- self.assertGreater(len(generic_diagnostics), 2)
- self.assertIn(['win10'], generic_diagnostics_values)
- self.assertIn(['win'], generic_diagnostics_values)
- self.assertIn(['amd64'], generic_diagnostics_values)
-
- def testRunStoryAddsDeviceInfo_OnePerStorySet(self):
- class Test(legacy_page_test.LegacyPageTest):
- def RunPage(self, _, results):
- results.AddHistogram(
- histogram_module.Histogram('hist', 'count'))
-
- def ValidateAndMeasurePage(self, page, tab, results):
- pass
-
- story_set = story_module.StorySet()
- story_set.AddStory(DummyLocalStory(TestSharedState, 'foo', ['bar']))
- story_set.AddStory(DummyLocalStory(TestSharedState, 'abc', ['def']))
- story_runner.RunStorySet(Test(), story_set, self.options, self.results)
-
- hs = histogram_set.HistogramSet()
- hs.ImportDicts(self.results.AsHistogramDicts())
-
- generic_diagnostics = hs.GetSharedDiagnosticsOfType(
- generic_set.GenericSet)
-
- generic_diagnostics_values = [
- list(diagnostic) for diagnostic in generic_diagnostics]
-
- self.assertGreater(len(generic_diagnostics), 2)
- self.assertIn(['win10'], generic_diagnostics_values)
- self.assertIn(['win'], generic_diagnostics_values)
- self.assertIn(['amd64'], generic_diagnostics_values)
-
- self.assertEqual(1, len(
- [value for value in generic_diagnostics_values if value == ['win']]))
-
- first_histogram_diags = hs.GetFirstHistogram().diagnostics
- self.assertIn(reserved_infos.ARCHITECTURES.name, first_histogram_diags)
- self.assertIn(reserved_infos.OS_NAMES.name, first_histogram_diags)
- self.assertIn(reserved_infos.OS_VERSIONS.name, first_histogram_diags)
+ self.RunStories(['story1', 'story2'])
+ test_results = self.ReadTestResults()
+ self.assertEqual(['benchmark/story1', 'benchmark/story2'] * 2,
+ [test['testPath'] for test in test_results])
+ self.assertEqual(['PASS', 'PASS', 'PASS', 'PASS'],
+ [test['status'] for test in test_results])
def _testMaxFailuresOptionIsRespectedAndOverridable(
self, num_failing_stories, runner_max_failures, options_max_failures,
expected_num_failures, expected_num_skips):
- class SimpleSharedState(story_module.SharedState):
- _fake_platform = FakePlatform()
- _current_story = None
-
- @property
- def platform(self):
- return self._fake_platform
-
- def WillRunStory(self, story):
- self._current_story = story
-
- def RunStory(self, results):
- self._current_story.Run(self)
-
- def DidRunStory(self, results):
- pass
-
- def CanRunStory(self, story):
- return True
-
- def TearDownState(self):
- pass
-
- def DumpStateUponStoryRunFailure(self, results):
- pass
-
- class FailingStory(story_module.Story):
- def __init__(self, name):
- super(FailingStory, self).__init__(
- shared_state_class=SimpleSharedState,
- is_local=True, name=name)
- self.was_run = False
-
- def Run(self, shared_state):
- self.was_run = True
- raise legacy_page_test.Failure
-
- @property
- def url(self):
- return 'data:,'
-
- story_set = story_module.StorySet()
- for i in range(num_failing_stories):
- story_set.AddStory(FailingStory(name='failing%d' % i))
-
- self.options.output_formats = ['none']
if options_max_failures:
self.options.max_failures = options_max_failures
-
- story_runner.RunStorySet(
- DummyTest(), story_set, self.options,
- self.results, max_failures=runner_max_failures)
- self.assertEquals(expected_num_skips, self.results.num_skipped)
- self.assertTrue(self.results.had_failures)
- for ii, story in enumerate(story_set.stories):
- self.assertEqual(story.was_run, ii < expected_num_failures)
+ self.RunStories([
+ test_stories.DummyStory(
+ 'failing_%d' % i, run_side_effect=Exception('boom!'))
+ for i in range(num_failing_stories)
+ ], max_failures=runner_max_failures)
+ test_results = self.ReadTestResults()
+ self.assertEqual(len(test_results),
+ expected_num_failures + expected_num_skips)
+ for i, test in enumerate(test_results):
+ expected_status = 'FAIL' if i < expected_num_failures else 'SKIP'
+ self.assertEqual(test['status'], expected_status)
def testMaxFailuresNotSpecified(self):
self._testMaxFailuresOptionIsRespectedAndOverridable(
@@ -682,127 +224,68 @@ def testMaxFailuresOption(self):
options_max_failures=1, expected_num_failures=2,
expected_num_skips=3)
- def testRunBenchmark_TooManyValues(self):
- story_set = story_module.StorySet()
- story_set.AddStory(DummyLocalStory(TestSharedState, name='story'))
- story_runner.RunStorySet(
- _Measurement(), story_set, self.options, self.results, max_num_values=0)
- self.assertTrue(self.results.had_failures)
- self.assertEquals(0, self.results.num_successful)
- self.assertIn('Too many values: 1 > 0', sys.stderr.getvalue())
-
-class RunStorySetWithLegacyPagesTest(unittest.TestCase):
- """These tests run story sets that contain actual page_module.Page objects.
-
- Since pages use the shared_page_state_class, an actual browser is used for
- these tests.
-
- All these tests:
- - Use story sets with page_module.Page objects.
- - Call story_runner.RunStorySet as entry point.
- """
+class UpdateAndCheckArchivesTest(unittest.TestCase):
+ """Tests for the private _UpdateAndCheckArchives."""
def setUp(self):
- self.options = options_for_unittests.GetRunOptions(
- output_dir=tempfile.mkdtemp())
- self.results = results_options.CreateResults(self.options)
+ mock.patch.object(archive_info.WprArchiveInfo,
+ 'DownloadArchivesIfNeeded').start()
def tearDown(self):
- self.results.Finalize()
- shutil.rmtree(self.options.output_dir)
+ mock.patch.stopall()
- def testRunStoryWithMissingArchiveFile(self):
- story_set = story_module.StorySet(archive_data_file='data/hi.json')
- story_set.AddStory(page_module.Page(
- 'http://www.testurl.com', story_set, story_set.base_dir,
- name='http://www.testurl.com'))
- test = DummyTest()
+ def testMissingArchiveDataFile(self):
+ story_set = test_stories.DummyStorySet(['story'])
with self.assertRaises(story_runner.ArchiveError):
- story_runner.RunStorySet(test, story_set, self.options, self.results)
-
- def testRunStoryWithLongURLPage(self):
- story_set = story_module.StorySet()
- story_set.AddStory(page_module.Page('file://long' + 'g' * 180,
- story_set, name='test'))
- test = DummyTest()
- story_runner.RunStorySet(test, story_set, self.options, self.results)
-
- @decorators.Disabled('chromeos') # crbug.com/483212
- def testUpdateAndCheckArchives(self):
- usr_stub = system_stub.Override(story_runner, ['cloud_storage'])
- wpr_stub = system_stub.Override(archive_info, ['cloud_storage'])
- archive_data_dir = os.path.join(
- util.GetTelemetryDir(),
- 'telemetry', 'internal', 'testing', 'archive_files')
- try:
- story_set = story_module.StorySet()
- story_set.AddStory(page_module.Page(
- 'http://www.testurl.com', story_set, story_set.base_dir,
- name='http://www.testurl.com'))
- # Page set missing archive_data_file.
- self.assertRaises(
- story_runner.ArchiveError,
- story_runner._UpdateAndCheckArchives,
- story_set.archive_data_file,
- story_set.wpr_archive_info,
+ story_runner._UpdateAndCheckArchives(
+ story_set.archive_data_file, story_set.wpr_archive_info,
story_set.stories)
- story_set = story_module.StorySet(
- archive_data_file='missing_archive_data_file.json')
- story_set.AddStory(page_module.Page(
- 'http://www.testurl.com', story_set, story_set.base_dir,
- name='http://www.testurl.com'))
- # Page set missing json file specified in archive_data_file.
- self.assertRaises(
- story_runner.ArchiveError,
- story_runner._UpdateAndCheckArchives,
- story_set.archive_data_file,
- story_set.wpr_archive_info,
+ def testArchiveDataFileDoesNotExist(self):
+ story_set = test_stories.DummyStorySet(
+ ['story'], archive_data_file='does_not_exist.json')
+ with self.assertRaises(story_runner.ArchiveError):
+ story_runner._UpdateAndCheckArchives(
+ story_set.archive_data_file, story_set.wpr_archive_info,
story_set.stories)
- story_set = story_module.StorySet(
- archive_data_file=os.path.join(archive_data_dir, 'test.json'),
- cloud_storage_bucket=cloud_storage.PUBLIC_BUCKET)
- story_set.AddStory(page_module.Page(
- 'http://www.testurl.com', story_set, story_set.base_dir,
- name='http://www.testurl.com'))
- # Page set with valid archive_data_file.
- self.assertTrue(
- story_runner._UpdateAndCheckArchives(
- story_set.archive_data_file, story_set.wpr_archive_info,
- story_set.stories))
- story_set.AddStory(page_module.Page(
- 'http://www.google.com', story_set, story_set.base_dir,
- name='http://www.google.com'))
- # Page set with an archive_data_file which exists but is missing a page.
- self.assertRaises(
- story_runner.ArchiveError,
- story_runner._UpdateAndCheckArchives,
- story_set.archive_data_file,
- story_set.wpr_archive_info,
+ def testUpdateAndCheckArchivesSuccess(self):
+ # This test file has a recording for a 'http://www.testurl.com' story only.
+ archive_data_file = os.path.join(
+ util.GetUnittestDataDir(), 'archive_files', 'test.json')
+ story_set = test_stories.DummyStorySet(
+ ['http://www.testurl.com'], archive_data_file=archive_data_file)
+ success = story_runner._UpdateAndCheckArchives(
+ story_set.archive_data_file, story_set.wpr_archive_info,
+ story_set.stories)
+ self.assertTrue(success)
+
+ def testArchiveWithMissingStory(self):
+ # This test file has a recording for a 'http://www.testurl.com' story only.
+ archive_data_file = os.path.join(
+ util.GetUnittestDataDir(), 'archive_files', 'test.json')
+ story_set = test_stories.DummyStorySet(
+ ['http://www.testurl.com', 'http://www.google.com'],
+ archive_data_file=archive_data_file)
+ with self.assertRaises(story_runner.ArchiveError):
+ story_runner._UpdateAndCheckArchives(
+ story_set.archive_data_file, story_set.wpr_archive_info,
story_set.stories)
- story_set = story_module.StorySet(
- archive_data_file=os.path.join(
- archive_data_dir, 'test_missing_wpr_file.json'),
- cloud_storage_bucket=cloud_storage.PUBLIC_BUCKET)
- story_set.AddStory(page_module.Page(
- 'http://www.testurl.com', story_set, story_set.base_dir,
- name='http://www.testurl.com'))
- story_set.AddStory(page_module.Page(
- 'http://www.google.com', story_set, story_set.base_dir,
- name='http://www.google.com'))
- # Page set with an archive_data_file which exists and contains all pages
- # but fails to find a wpr file.
- self.assertRaises(
- story_runner.ArchiveError,
- story_runner._UpdateAndCheckArchives,
- story_set.archive_data_file,
- story_set.wpr_archive_info,
+ def testArchiveWithMissingWprFile(self):
+ # This test file claims to have recordings for both
+ # 'http://www.testurl.com' and 'http://www.google.com'; but the file with
+ # the wpr recording for the later story is actually missing.
+ archive_data_file = os.path.join(
+ util.GetUnittestDataDir(), 'archive_files',
+ 'test_missing_wpr_file.json')
+ story_set = test_stories.DummyStorySet(
+ ['http://www.testurl.com', 'http://www.google.com'],
+ archive_data_file=archive_data_file)
+ with self.assertRaises(story_runner.ArchiveError):
+ story_runner._UpdateAndCheckArchives(
+ story_set.archive_data_file, story_set.wpr_archive_info,
story_set.stories)
- finally:
- usr_stub.Restore()
- wpr_stub.Restore()
class RunStoryAndProcessErrorIfNeededTest(unittest.TestCase):
@@ -1083,83 +566,49 @@ def testRunStoryAndProcessErrorIfNeeded_tryUnhandlable_finallyException(self):
])
-class DummyStoryTest(story_test.StoryTest):
- def __init__(self, options):
- del options # Unused.
-
- def WillRunStory(self, platform):
- del platform # Unused.
-
- def Measure(self, platform, results):
- del platform, results # Unused.
-
- def DidRunStory(self, platform, results):
- del platform, results # Unused.
-
-
-class DummyStory(story_module.Story):
- def __init__(self, name, tags=None, serving_dir=None, run_side_effect=None):
- """A customize dummy story.
-
- Args:
- name: A string with the name of the story.
- tags: Optional sequence of tags for the story.
- serving_dir: Optional path from which (in a real local story) contents
- are served. Used in some tests but no local servers are actually set up.
- run_side_effect: Optional side effect of the story's Run method.
- It can be either an exception instance to raise, or a callable
- with no arguments.
- """
- super(DummyStory, self).__init__(TestSharedState, name=name, tags=tags)
- self._serving_dir = serving_dir
- self._run_side_effect = run_side_effect
-
- def Run(self, _):
- if self._run_side_effect is not None:
- if isinstance(self._run_side_effect, Exception):
- raise self._run_side_effect # pylint: disable=raising-bad-type
- else:
- self._run_side_effect()
-
- @property
- def serving_dir(self):
- return self._serving_dir
-
-
class FakeBenchmark(benchmark.Benchmark):
- test = DummyStoryTest
+ test = test_stories.DummyStoryTest
- def __init__(self, stories=None, cloud_bucket=None):
+ def __init__(self, stories=None, **kwargs):
"""A customizable fake_benchmark.
Args:
stories: Optional sequence of either story names or objects. Instances
of DummyStory are useful here. If omitted the benchmark will contain
a single DummyStory.
- cloud_bucket: Optional cloud storage bucket where (in a real benchmark)
- data for WPR recordings is stored. This is passed to the StorySet
- constructor and used in some tests; but interactions with cloud storage
- are mocked out.
+ other kwargs are passed to the test_stories.DummyStorySet constructor.
"""
super(FakeBenchmark, self).__init__()
- self._cloud_bucket = cloud_bucket
- self._stories = ['story'] if stories is None else list(stories)
+ self._story_set = test_stories.DummyStorySet(
+ stories if stories is not None else ['story'], **kwargs)
@classmethod
def Name(cls):
return 'fake_benchmark'
def CreateStorySet(self, _):
- story_set = story_module.StorySet(cloud_storage_bucket=self._cloud_bucket)
- for story in self._stories:
- if isinstance(story, basestring):
- story = DummyStory(story)
- story_set.AddStory(story)
- return story_set
+ return self._story_set
+
+
+class FakeStoryFilter(object):
+ def __init__(self, stories_to_filter_out=None, stories_to_skip=None):
+ self._stories_to_filter = stories_to_filter_out or []
+ self._stories_to_skip = stories_to_skip or []
+ assert isinstance(self._stories_to_filter, list)
+ assert isinstance(self._stories_to_skip, list)
+
+ def FilterStories(self, story_set):
+ return [story for story in story_set
+ if story.name not in self._stories_to_filter]
+
+ def ShouldSkip(self, story):
+ return 'fake_reason' if story.name in self._stories_to_skip else ''
+
- def SetExpectations(self, expectations_line):
- self.AugmentExpectationsWithFile(
- '# results: [ Skip ]\n%s\n' % expectations_line)
+def ReadDiagnostics(test_result):
+ artifact = test_result['outputArtifacts'][page_test_results.DIAGNOSTICS_NAME]
+ with open(artifact['filePath']) as f:
+ return json.load(f)['diagnostics']
class RunBenchmarkTest(unittest.TestCase):
@@ -1177,62 +626,61 @@ def tearDown(self):
shutil.rmtree(self.output_dir)
def GetFakeBrowserOptions(self, overrides=None):
- options = options_for_unittests.GetRunOptions(
+ return options_for_unittests.GetRunOptions(
output_dir=self.output_dir,
fake_browser=True, overrides=overrides)
- options.intermediate_dir = os.path.join(self.output_dir, 'artifacts')
- return options
-
- def ReadIntermediateResults(self):
- results = {'benchmarkRun': {}, 'testResults': []}
- with open(os.path.join(
- self.output_dir, 'artifacts', '_telemetry_results.jsonl')) as f:
- for line in f:
- record = json.loads(line)
- if 'benchmarkRun' in record:
- results['benchmarkRun'].update(record['benchmarkRun'])
- if 'testResult' in record:
- results['testResults'].append(record['testResult'])
- return results
+
+ def ReadTestResults(self):
+ return results_options.ReadTestResults(
+ os.path.join(self.output_dir, 'artifacts'))
def testDisabledBenchmarkViaCanRunOnPlatform(self):
fake_benchmark = FakeBenchmark()
fake_benchmark.SUPPORTED_PLATFORMS = []
options = self.GetFakeBrowserOptions()
story_runner.RunBenchmark(fake_benchmark, options)
- results = self.ReadIntermediateResults()
- self.assertFalse(results['testResults']) # No tests ran at all.
+ test_results = self.ReadTestResults()
+ self.assertFalse(test_results) # No tests ran at all.
- def testDisabledWithExpectations(self):
- fake_benchmark = FakeBenchmark()
- fake_benchmark.SetExpectations('fake_benchmark/* [ Skip ]')
+ def testSkippedWithStoryFilter(self):
+ fake_benchmark = FakeBenchmark(stories=['fake_story'])
options = self.GetFakeBrowserOptions()
- story_runner.RunBenchmark(fake_benchmark, options)
- results = self.ReadIntermediateResults()
- self.assertTrue(results['testResults']) # Some tests ran, but all skipped.
- self.assertTrue(all(t['status'] == 'SKIP' for t in results['testResults']))
-
- def testDisabledBenchmarkOverriddenByCommandLine(self):
- fake_benchmark = FakeBenchmark()
- fake_benchmark.SetExpectations('fake_benchmark/* [ Skip ]')
- options = self.GetFakeBrowserOptions()
- options.run_disabled_tests = True
- story_runner.RunBenchmark(fake_benchmark, options)
- results = self.ReadIntermediateResults()
- self.assertTrue(results['testResults']) # Some tests ran. all OK.
- self.assertTrue(all(t['status'] == 'PASS' for t in results['testResults']))
+ fake_story_filter = FakeStoryFilter(stories_to_skip=['fake_story'])
+ with mock.patch(
+ 'telemetry.story.story_filter.StoryFilterFactory.BuildStoryFilter',
+ return_value=fake_story_filter):
+ story_runner.RunBenchmark(fake_benchmark, options)
+ test_results = self.ReadTestResults()
+ self.assertTrue(test_results) # Some tests ran, but all skipped.
+ self.assertTrue(all(t['status'] == 'SKIP' for t in test_results))
- def testOneStoryDisabledOneNot(self):
+ def testOneStorySkippedOneNot(self):
+ fake_story_filter = FakeStoryFilter(stories_to_skip=['story1'])
fake_benchmark = FakeBenchmark(stories=['story1', 'story2'])
- fake_benchmark.SetExpectations('fake_benchmark/story1 [ Skip ]')
options = self.GetFakeBrowserOptions()
- story_runner.RunBenchmark(fake_benchmark, options)
- results = self.ReadIntermediateResults()
- status = [t['status'] for t in results['testResults']]
+ with mock.patch(
+ 'telemetry.story.story_filter.StoryFilterFactory.BuildStoryFilter',
+ return_value=fake_story_filter):
+ story_runner.RunBenchmark(fake_benchmark, options)
+ test_results = self.ReadTestResults()
+ status = [t['status'] for t in test_results]
self.assertEqual(len(status), 2)
self.assertIn('SKIP', status)
self.assertIn('PASS', status)
+ def testOneStoryFilteredOneNot(self):
+ fake_story_filter = FakeStoryFilter(stories_to_filter_out=['story1'])
+ fake_benchmark = FakeBenchmark(stories=['story1', 'story2'])
+ options = self.GetFakeBrowserOptions()
+ with mock.patch(
+ 'telemetry.story.story_filter.StoryFilterFactory.BuildStoryFilter',
+ return_value=fake_story_filter):
+ story_runner.RunBenchmark(fake_benchmark, options)
+ test_results = self.ReadTestResults()
+ self.assertEqual(len(test_results), 1)
+ self.assertEqual(test_results[0]['status'], 'PASS')
+ self.assertTrue(test_results[0]['testPath'].endswith('/story2'))
+
def testWithOwnerInfo(self):
@benchmark.Owner(emails=['alice@chromium.org', 'bob@chromium.org'],
@@ -1244,8 +692,8 @@ class FakeBenchmarkWithOwner(FakeBenchmark):
fake_benchmark = FakeBenchmarkWithOwner()
options = self.GetFakeBrowserOptions()
story_runner.RunBenchmark(fake_benchmark, options)
- results = self.ReadIntermediateResults()
- diagnostics = results['benchmarkRun']['diagnostics']
+ test_results = self.ReadTestResults()
+ diagnostics = ReadDiagnostics(test_results[0])
self.assertEqual(diagnostics['owners'],
['alice@chromium.org', 'bob@chromium.org'])
self.assertEqual(diagnostics['bugComponents'], ['fooBar'])
@@ -1261,16 +709,31 @@ class FakeBenchmarkWithOwner(FakeBenchmark):
fake_benchmark = FakeBenchmarkWithOwner()
options = self.GetFakeBrowserOptions()
story_runner.RunBenchmark(fake_benchmark, options)
- results = self.ReadIntermediateResults()
- diagnostics = results['benchmarkRun']['diagnostics']
+ test_results = self.ReadTestResults()
+ diagnostics = ReadDiagnostics(test_results[0])
self.assertEqual(diagnostics['owners'], ['alice@chromium.org'])
self.assertNotIn('documentationLinks', diagnostics)
+ def testDeviceInfo(self):
+ fake_benchmark = FakeBenchmark(stories=['fake_story'])
+ options = self.GetFakeBrowserOptions()
+ options.fake_possible_browser = fakes.FakePossibleBrowser(
+ arch_name='abc', os_name='win', os_version_name='win10')
+ story_runner.RunBenchmark(fake_benchmark, options)
+ test_results = self.ReadTestResults()
+ diagnostics = ReadDiagnostics(test_results[0])
+ self.assertEqual(diagnostics['architectures'], ['abc'])
+ self.assertEqual(diagnostics['osNames'], ['win'])
+ self.assertEqual(diagnostics['osVersions'], ['win10'])
+
def testReturnCodeDisabledStory(self):
- fake_benchmark = FakeBenchmark()
- fake_benchmark.SetExpectations('fake_benchmark/* [ Skip ]')
+ fake_benchmark = FakeBenchmark(stories=['fake_story'])
+ fake_story_filter = FakeStoryFilter(stories_to_skip=['fake_story'])
options = self.GetFakeBrowserOptions()
- return_code = story_runner.RunBenchmark(fake_benchmark, options)
+ with mock.patch(
+ 'telemetry.story.story_filter.StoryFilterFactory.BuildStoryFilter',
+ return_value=fake_story_filter):
+ return_code = story_runner.RunBenchmark(fake_benchmark, options)
self.assertEqual(return_code, -1)
def testReturnCodeSuccessfulRun(self):
@@ -1281,22 +744,33 @@ def testReturnCodeSuccessfulRun(self):
def testReturnCodeCaughtException(self):
fake_benchmark = FakeBenchmark(stories=[
- DummyStory('story', run_side_effect=exceptions.AppCrashException())])
+ test_stories.DummyStory(
+ 'story', run_side_effect=exceptions.AppCrashException())])
options = self.GetFakeBrowserOptions()
return_code = story_runner.RunBenchmark(fake_benchmark, options)
self.assertEqual(return_code, 1)
def testReturnCodeUnhandleableError(self):
fake_benchmark = FakeBenchmark(stories=[
- DummyStory('story', run_side_effect=MemoryError('Unhandleable'))])
+ test_stories.DummyStory(
+ 'story', run_side_effect=MemoryError('Unhandleable'))])
options = self.GetFakeBrowserOptions()
return_code = story_runner.RunBenchmark(fake_benchmark, options)
self.assertEqual(return_code, 2)
+ def testRunStoryWithMissingArchiveFile(self):
+ fake_benchmark = FakeBenchmark(archive_data_file='data/does-not-exist.json')
+ options = self.GetFakeBrowserOptions()
+ return_code = story_runner.RunBenchmark(fake_benchmark, options)
+ self.assertEqual(return_code, 2) # Benchmark was interrupted.
+ self.assertIn('ArchiveError', sys.stderr.getvalue())
+
def testDownloadMinimalServingDirs(self):
fake_benchmark = FakeBenchmark(stories=[
- DummyStory('story_foo', serving_dir='/files/foo', tags=['foo']),
- DummyStory('story_bar', serving_dir='/files/bar', tags=['bar']),
+ test_stories.DummyStory(
+ 'story_foo', serving_dir='/files/foo', tags=['foo']),
+ test_stories.DummyStory(
+ 'story_bar', serving_dir='/files/bar', tags=['bar']),
], cloud_bucket=cloud_storage.PUBLIC_BUCKET)
options = self.GetFakeBrowserOptions(overrides={'story_tag_filter': 'foo'})
with mock.patch(
@@ -1307,287 +781,138 @@ def testDownloadMinimalServingDirs(self):
self.assertEqual(get_files.call_count, 1)
get_files.assert_called_once_with('/files/foo', cloud_storage.PUBLIC_BUCKET)
-
-class AbridgeableStorySet(story_module.StorySet):
- def GetAbridgedStorySetTagFilter(self):
- return 'foo'
-
-
-class BenchmarkWithAbridgeableStorySet(benchmark.Benchmark):
- test = DummyTest
- def CreateStorySet(self, options):
- story_set = AbridgeableStorySet()
- story_set.AddStory(page_module.Page(
- 'file://foo/foo', name='foo', tags=['foo'],
- shared_page_state_class=TestSharedState))
- story_set.AddStory(page_module.Page(
- 'file://bar/bar', name='bar', tags=['bar'],
- shared_page_state_class=TestSharedState))
- return story_set
-
-
-class AbridgeableStorySetTest(unittest.TestCase):
- """Tests that run a fake benchmark with an abridgeable story set.
-
- All these tests:
- - Use an options object with a fake browser.
- - Use BenchmarkWithAbridgeableStorySet.
- - Call story_runner.RunBenchmark as entry point.
- """
- def setUp(self):
- self.benchmark = BenchmarkWithAbridgeableStorySet()
- self.options = options_for_unittests.GetRunOptions(
- output_dir=tempfile.mkdtemp(), fake_browser=True)
- self.options.output_formats = ['none']
-
- def tearDown(self):
- shutil.rmtree(self.options.output_dir)
-
def testAbridged(self):
- patch_method = (
- 'telemetry.internal.story_runner._RunStoryAndProcessErrorIfNeeded')
- with mock.patch(patch_method) as run_story_patch:
- story_runner.RunBenchmark(self.benchmark, self.options)
- self.assertEqual(run_story_patch.call_count, 1)
+ options = self.GetFakeBrowserOptions()
+ story_filter.StoryFilterFactory.ProcessCommandLineArgs(
+ parser=None, args=options)
+ fake_benchmark = FakeBenchmark(stories=[
+ test_stories.DummyStory('story1', tags=['important']),
+ test_stories.DummyStory('story2', tags=['other']),
+ ], abridging_tag='important')
+ story_runner.RunBenchmark(fake_benchmark, options)
+ test_results = self.ReadTestResults()
+ self.assertEqual(len(test_results), 1)
+ self.assertTrue(test_results[0]['testPath'].endswith('/story1'))
def testFullRun(self):
- self.options.run_full_story_set = True
- patch_method = (
- 'telemetry.internal.story_runner._RunStoryAndProcessErrorIfNeeded')
- with mock.patch(patch_method) as run_story_patch:
- story_runner.RunBenchmark(self.benchmark, self.options)
- self.assertEqual(run_story_patch.call_count, 2)
+ options = self.GetFakeBrowserOptions()
+ options.run_full_story_set = True
+ story_filter.StoryFilterFactory.ProcessCommandLineArgs(
+ parser=None, args=options)
+ fake_benchmark = FakeBenchmark(stories=[
+ test_stories.DummyStory('story1', tags=['important']),
+ test_stories.DummyStory('story2', tags=['other']),
+ ], abridging_tag='important')
+ story_runner.RunBenchmark(fake_benchmark, options)
+ test_results = self.ReadTestResults()
+ self.assertEqual(len(test_results), 2)
+ def testStoryFlag(self):
+ options = self.GetFakeBrowserOptions()
+ args = fakes.FakeParsedArgsForStoryFilter(stories=['story1', 'story3'])
+ story_filter.StoryFilterFactory.ProcessCommandLineArgs(
+ parser=None, args=args)
+ fake_benchmark = FakeBenchmark(stories=['story1', 'story2', 'story3'])
+ story_runner.RunBenchmark(fake_benchmark, options)
+ test_results = self.ReadTestResults()
+ self.assertEqual(len(test_results), 2)
+ self.assertTrue(test_results[0]['testPath'].endswith('/story1'))
+ self.assertTrue(test_results[1]['testPath'].endswith('/story3'))
-class BenchmarkJsonResultsTest(unittest.TestCase):
- """Tests that validate json-test-results output.
+ def testArtifactLogsContainHandleableException(self):
+ def failed_run():
+ logging.warning('This will fail gracefully')
+ raise exceptions.TimeoutException('karma!')
- All these tests:
- - Use custom benchmarks and story sets with actual page_module.Page objects.
- - Use an options object with a fake browser.
- - Call story_runner.RunBenchmark as entry point.
- """
- def setUp(self):
- self.options = options_for_unittests.GetRunOptions(
- output_dir=tempfile.mkdtemp(), fake_browser=True)
- self.options.output_formats = ['json-test-results']
+ fake_benchmark = FakeBenchmark(stories=[
+ test_stories.DummyStory('story1', run_side_effect=failed_run),
+ test_stories.DummyStory('story2')
+ ])
- def tearDown(self):
- shutil.rmtree(self.options.output_dir)
+ options = self.GetFakeBrowserOptions()
+ return_code = story_runner.RunBenchmark(fake_benchmark, options)
+ self.assertEqual(return_code, 1)
+ test_results = self.ReadTestResults()
+ self.assertEqual(len(test_results), 2)
- def GetFakeBrowserOptions(self, overrides=None):
- return options_for_unittests.GetRunOptions(
- output_dir=self.options.output_dir,
- fake_browser=True, overrides=overrides)
+ # First story failed.
+ self.assertEqual(test_results[0]['testPath'], 'fake_benchmark/story1')
+ self.assertEqual(test_results[0]['status'], 'FAIL')
+ self.assertIn('logs.txt', test_results[0]['outputArtifacts'])
- def testArtifactLogsContainHandleableException(self):
+ with open(test_results[0]['outputArtifacts']['logs.txt']['filePath']) as f:
+ test_log = f.read()
- class StoryFailureSharedState(TestSharedState):
- def RunStory(self, results):
- logging.warning('This will fail gracefully')
- raise exceptions.TimeoutException('karma!')
-
- class TestBenchmark(benchmark.Benchmark):
- test = DummyTest
-
- @classmethod
- def Name(cls):
- return 'TestBenchmark'
-
- def CreateStorySet(self, options):
- story_set = story_module.StorySet()
- story_set.AddStory(page_module.Page(
- 'http://foo', name='foo',
- shared_page_state_class=StoryFailureSharedState))
- story_set.AddStory(page_module.Page(
- 'http://bar', name='bar',
- shared_page_state_class=StoryFailureSharedState))
- return story_set
-
- story_failure_benchmark = TestBenchmark()
- return_code = story_runner.RunBenchmark(
- story_failure_benchmark, self.options)
- self.assertEquals(1, return_code)
- json_data = {}
- with open(os.path.join(self.options.output_dir, 'test-results.json')) as f:
- json_data = json.load(f)
- foo_artifacts = json_data['tests']['TestBenchmark']['foo']['artifacts']
- foo_artifact_log_path = os.path.join(
- self.options.output_dir, foo_artifacts['logs.txt'][0])
- with open(foo_artifact_log_path) as f:
- foo_log = f.read()
-
- self.assertIn('Handleable error', foo_log)
-
- # Ensure that foo_log contains the warning log message.
- self.assertIn('This will fail gracefully', foo_log)
-
- # Also the python crash stack.
- self.assertIn("raise exceptions.TimeoutException('karma!')", foo_log)
+ # Ensure that the log contains warning messages and python stack.
+ self.assertIn('Handleable error', test_log)
+ self.assertIn('This will fail gracefully', test_log)
+ self.assertIn("raise exceptions.TimeoutException('karma!')", test_log)
- def testArtifactLogsContainUnhandleableException(self):
- class UnhandledFailureSharedState(TestSharedState):
- def RunStory(self, results):
- logging.warning('This will fail badly')
- raise MemoryError('this is a fatal exception')
-
- class TestBenchmark(benchmark.Benchmark):
- test = DummyTest
-
- @classmethod
- def Name(cls):
- return 'TestBenchmark'
-
- def CreateStorySet(self, options):
- story_set = story_module.StorySet()
- story_set.AddStory(page_module.Page(
- 'http://foo', name='foo',
- shared_page_state_class=UnhandledFailureSharedState))
- story_set.AddStory(page_module.Page(
- 'http://bar', name='bar',
- shared_page_state_class=UnhandledFailureSharedState))
- return story_set
-
- unhandled_failure_benchmark = TestBenchmark()
- return_code = story_runner.RunBenchmark(
- unhandled_failure_benchmark, self.options)
- self.assertEquals(2, return_code)
+ # Second story ran fine.
+ self.assertEqual(test_results[1]['testPath'], 'fake_benchmark/story2')
+ self.assertEqual(test_results[1]['status'], 'PASS')
- json_data = {}
- with open(os.path.join(self.options.output_dir, 'test-results.json')) as f:
- json_data = json.load(f)
+ def testArtifactLogsContainUnhandleableException(self):
+ def failed_run():
+ logging.warning('This will fail badly')
+ raise MemoryError('this is a fatal exception')
- foo_artifacts = json_data['tests']['TestBenchmark']['foo']['artifacts']
- foo_artifact_log_path = os.path.join(
- self.options.output_dir, foo_artifacts['logs.txt'][0])
- with open(foo_artifact_log_path) as f:
- foo_log = f.read()
+ fake_benchmark = FakeBenchmark(stories=[
+ test_stories.DummyStory('story1', run_side_effect=failed_run),
+ test_stories.DummyStory('story2')
+ ])
- self.assertIn('Unhandleable error', foo_log)
+ options = self.GetFakeBrowserOptions()
+ return_code = story_runner.RunBenchmark(fake_benchmark, options)
+ self.assertEqual(return_code, 2)
+ test_results = self.ReadTestResults()
+ self.assertEqual(len(test_results), 2)
- # Ensure that foo_log contains the warning log message.
- self.assertIn('This will fail badly', foo_log)
+ # First story failed.
+ self.assertEqual(test_results[0]['testPath'], 'fake_benchmark/story1')
+ self.assertEqual(test_results[0]['status'], 'FAIL')
+ self.assertIn('logs.txt', test_results[0]['outputArtifacts'])
- # Also the python crash stack.
- self.assertIn('MemoryError: this is a fatal exception', foo_log)
- self.assertIn("raise MemoryError('this is a fatal exception')",
- foo_log)
+ with open(test_results[0]['outputArtifacts']['logs.txt']['filePath']) as f:
+ test_log = f.read()
- # Assert that the first test got marked as a failure.
- foo_result = json_data['tests']['TestBenchmark']['foo']
- self.assertEquals(foo_result['expected'], 'PASS')
- self.assertEquals(foo_result['actual'], 'FAIL')
+ # Ensure that the log contains warning messages and python stack.
+ self.assertIn('Unhandleable error', test_log)
+ self.assertIn('This will fail badly', test_log)
+ self.assertIn("raise MemoryError('this is a fatal exception')", test_log)
- # Assert that the second story got written as a SKIP as it failed
- # to run because of the exception.
- bar_result = json_data['tests']['TestBenchmark']['bar']
- self.assertEquals(bar_result['expected'], 'PASS')
- self.assertEquals(bar_result['actual'], 'SKIP')
+ # Second story was skipped.
+ self.assertEqual(test_results[1]['testPath'], 'fake_benchmark/story2')
+ self.assertEqual(test_results[1]['status'], 'SKIP')
def testUnexpectedSkipsWithFiltering(self):
- class UnhandledFailureSharedState(TestSharedState):
- def RunStory(self, results):
- if results.current_story.name in stories_to_crash:
- raise MemoryError('this is an unexpected exception')
-
- class TestBenchmark(benchmark.Benchmark):
- test = DummyTest
-
- @classmethod
- def Name(cls):
- return 'TestBenchmark'
-
- def CreateStorySet(self, options):
- story_set = story_module.StorySet()
- for i in range(50):
- story_set.AddStory(page_module.Page(
- 'http://foo_%s' % i, name='story_%s' % i,
- shared_page_state_class=UnhandledFailureSharedState))
- return story_set
-
- # Set up the test so that it throws unexpected crashes from any story
- # between story 30 to story 50.
- # Also set the filtering to only run from story 10 --> story 40
- stories_to_crash = set('story_%s' % i for i in range(30, 50))
+ # We prepare side effects for 50 stories, the first 30 run fine, the
+ # remaining 20 fail with a fatal error.
+ fatal_error = MemoryError('this is an unexpected exception')
+ side_effects = [None] * 30 + [fatal_error] * 20
+
+ fake_benchmark = FakeBenchmark(stories=(
+ test_stories.DummyStory('story_%i' % i, run_side_effect=effect)
+ for i, effect in enumerate(side_effects)))
+ # Set the filtering to only run from story_10 --> story_40
options = self.GetFakeBrowserOptions({
'story_shard_begin_index': 10,
'story_shard_end_index': 41})
- options.output_formats = ['json-test-results']
-
- unhandled_failure_benchmark = TestBenchmark()
- return_code = story_runner.RunBenchmark(
- unhandled_failure_benchmark, self.options)
+ return_code = story_runner.RunBenchmark(fake_benchmark, options)
self.assertEquals(2, return_code)
# The results should contain entries of story 10 --> story 40. Of those
# entries, story 31's actual result is 'FAIL' and
# stories from 31 to 40 will shows 'SKIP'.
- json_data = {}
- with open(os.path.join(options.output_dir, 'test-results.json')) as f:
- json_data = json.load(f)
- stories = json_data['tests']['TestBenchmark']
- self.assertEquals(len(stories.keys()), 31)
-
- for i in range(10, 30):
- self.assertEquals(stories['story_%s' % i]['actual'], 'PASS')
-
- self.assertEquals(stories['story_30']['actual'], 'FAIL')
-
- for i in range(31, 41):
- self.assertEquals(stories['story_%s' % i]['actual'], 'SKIP')
-
-
-class BenchmarkArtifactPathsTest(unittest.TestCase):
- """Test to validate the use of symlinks in output directory.
+ test_results = self.ReadTestResults()
+ self.assertEqual(len(test_results), 31)
- TODO(crbug.com/1008852): Should be merged into BenchmarkJsonResultsTest.
- """
- def setUp(self):
- self._temp_dir = tempfile.mkdtemp()
-
- self._real_output_dir = os.path.join(self._temp_dir, 'real', 'output')
- os.makedirs(self._real_output_dir)
+ expected = []
+ expected.extend(('story_%i' % i, 'PASS') for i in xrange(10, 30))
+ expected.append(('story_30', 'FAIL'))
+ expected.extend(('story_%i' % i, 'SKIP') for i in xrange(31, 41))
- self._symlinked_output_dir = os.path.join(self._temp_dir, 'output')
- os.symlink(self._real_output_dir, self._symlinked_output_dir)
-
- self._options = options_for_unittests.GetRunOptions(
- output_dir=self._symlinked_output_dir)
- self._options.suppress_gtest_report = True
- self._options.output_formats = ['json-test-results']
- self._options.output_dir = self._symlinked_output_dir
-
- def tearDown(self):
- shutil.rmtree(self._temp_dir)
-
- @decorators.Enabled('linux', 'mac')
- def testArtifactLogsHaveProperPathWithSymlinkedTmp(self):
- class TestBenchmark(benchmark.Benchmark):
- test = DummyTest
-
- @classmethod
- def Name(cls):
- return 'TestBenchmark'
-
- def CreateStorySet(self, options):
- story_set = story_module.StorySet()
- story_set.AddStory(page_module.Page(
- 'http://foo', name='foo', shared_page_state_class=TestSharedState))
- return story_set
-
- self.assertTrue(os.path.exists(self._symlinked_output_dir))
-
- test_benchmark = TestBenchmark()
- return_code = story_runner.RunBenchmark(test_benchmark, self._options)
- self.assertEquals(0, return_code)
-
- json_data = {}
- with open(os.path.join(self._options.output_dir,
- 'test-results.json')) as f:
- json_data = json.load(f)
-
- foo_artifacts = json_data['tests']['TestBenchmark']['foo']['artifacts']
- foo_artifact_log_path = os.path.join(
- self._options.output_dir, foo_artifacts['logs.txt'][0])
- assert os.path.isfile(foo_artifact_log_path)
+ for (story, status), result in zip(expected, test_results):
+ self.assertEqual(result['testPath'], 'fake_benchmark/%s' % story)
+ self.assertEqual(result['status'], status)
diff --git a/telemetry/telemetry/page/legacy_page_test_unittest.py b/telemetry/telemetry/page/legacy_page_test_unittest.py
new file mode 100644
index 0000000000..afda8f8e7e
--- /dev/null
+++ b/telemetry/telemetry/page/legacy_page_test_unittest.py
@@ -0,0 +1,45 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import urlparse
+
+from telemetry.page import legacy_page_test
+from telemetry.testing import legacy_page_test_case
+
+
+class LegacyPageTestTests(legacy_page_test_case.LegacyPageTestCase):
+
+ def testPageWasLoaded(self):
+ class ExamplePageTest(legacy_page_test.LegacyPageTest):
+ def ValidateAndMeasurePage(self, page, tab, results):
+ del page, results # Unused.
+ contents = tab.EvaluateJavaScript('document.body.textContent')
+ if contents.strip() != 'Hello world':
+ raise legacy_page_test.MeasurementFailure(
+ 'Page contents were: %r' % contents)
+
+ page_test = ExamplePageTest()
+ measurements = self.RunPageTest(page_test, 'file://blank.html')
+ self.assertFalse(measurements) # No measurements are recorded
+
+ def testPageWithQueryParamsAsMeasurements(self):
+ class PageTestWithMeasurements(legacy_page_test.LegacyPageTest):
+ def ValidateAndMeasurePage(self, page, tab, results):
+ del page # Unused.
+ query = tab.EvaluateJavaScript('window.location.search').lstrip('?')
+ for name, value in urlparse.parse_qsl(query):
+ results.AddMeasurement(name, 'count', int(value))
+
+ page_test = PageTestWithMeasurements()
+ measurements = self.RunPageTest(page_test, 'file://blank.html?foo=42')
+ self.assertEqual(measurements['foo']['samples'], [42])
+
+ def testPageWithFailure(self):
+ class PageTestThatFails(legacy_page_test.LegacyPageTest):
+ def ValidateAndMeasurePage(self, page, tab, results):
+ del page, tab, results # Unused.
+ raise legacy_page_test.Failure
+
+ page_test = PageTestThatFails()
+ self.RunPageTest(page_test, 'file://blank.html', expect_status='FAIL')
diff --git a/telemetry/telemetry/page/page_run_end_to_end_unittest.py b/telemetry/telemetry/page/page_run_end_to_end_unittest.py
index aa214d5033..5190ea6ce8 100644
--- a/telemetry/telemetry/page/page_run_end_to_end_unittest.py
+++ b/telemetry/telemetry/page/page_run_end_to_end_unittest.py
@@ -460,68 +460,3 @@ def RunNavigateSteps(self, action_runner):
actual_screenshot = image_util.FromPngFile(screenshot_file_path)
self.assertEquals(image_util.Pixels(chrome_version_screen_shot[0]),
image_util.Pixels(actual_screenshot))
-
-
-class FakePageRunEndToEndTests(unittest.TestCase):
-
- def setUp(self):
- self.options = options_for_unittests.GetRunOptions(
- output_dir=tempfile.mkdtemp(), fake_browser=True)
-
- def tearDown(self):
- shutil.rmtree(self.options.output_dir)
-
- def testNoScreenShotTakenForFailedPageDueToNoSupport(self):
-
- class FailingTestPage(page_module.Page):
-
- def RunNavigateSteps(self, action_runner):
- raise exceptions.AppCrashException
-
- story_set = story.StorySet()
- story_set.AddStory(page_module.Page('file://blank.html', story_set,
- name='blank.html'))
- failing_page = FailingTestPage('chrome://version', story_set,
- name='failing')
- story_set.AddStory(failing_page)
-
- self.options.browser_options.take_screenshot_for_failed_page = True
- results = RunStorySet(DummyTest(), story_set, self.options, max_failures=2)
-
- self.assertTrue(results.had_failures)
-
- def testScreenShotTakenForFailedPageOnSupportedPlatform(self):
- fake_platform = self.options.fake_possible_browser.returned_browser.platform
- expected_png_base64 = """
- iVBORw0KGgoAAAANSUhEUgAAAAIAAAACCAIAAAD91
- JpzAAAAFklEQVR4Xg3EAQ0AAABAMP1LY3YI7l8l6A
- T8tgwbJAAAAABJRU5ErkJggg==
-"""
- fake_platform.screenshot_png_data = expected_png_base64
-
- class FailingTestPage(page_module.Page):
-
- def RunNavigateSteps(self, action_runner):
- raise exceptions.AppCrashException
-
- story_set = story.StorySet()
- story_set.AddStory(page_module.Page('file://blank.html', story_set,
- name='blank.html'))
- failing_page = FailingTestPage('chrome://version', story_set,
- name='failing')
- story_set.AddStory(failing_page)
-
- self.options.browser_options.take_screenshot_for_failed_page = True
- self.options.output_formats = ['json-test-results']
- results = RunStorySet(DummyTest(), story_set, self.options, max_failures=2)
-
- self.assertTrue(results.had_failures)
- failed_run = next(run for run in results.IterStoryRuns()
- if run.story.name == failing_page.name)
- screenshot_file_path = failed_run.GetArtifact('screenshot.png').local_path
-
- actual_screenshot_img = image_util.FromPngFile(screenshot_file_path)
- self.assertTrue(
- image_util.AreEqual(
- image_util.FromBase64Png(expected_png_base64),
- actual_screenshot_img))
diff --git a/telemetry/telemetry/page/page_test_unittest.py b/telemetry/telemetry/page/page_test_unittest.py
deleted file mode 100644
index 0bd3bd0f3e..0000000000
--- a/telemetry/telemetry/page/page_test_unittest.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import shutil
-import tempfile
-
-from telemetry.page import page as page_module
-from telemetry.page import legacy_page_test
-from telemetry.testing import options_for_unittests
-from telemetry.testing import page_test_test_case
-
-
-class PageTestThatFails(legacy_page_test.LegacyPageTest):
-
- def ValidateAndMeasurePage(self, page, tab, results):
- raise legacy_page_test.Failure
-
-
-class PageTestForBlank(legacy_page_test.LegacyPageTest):
-
- def ValidateAndMeasurePage(self, page, tab, results):
- contents = tab.EvaluateJavaScript('document.body.textContent')
- if contents.strip() != 'Hello world':
- raise legacy_page_test.MeasurementFailure(
- 'Page contents were: ' + contents)
-
-
-class PageTestQueryParams(legacy_page_test.LegacyPageTest):
-
- def ValidateAndMeasurePage(self, page, tab, results):
- query = tab.EvaluateJavaScript('window.location.search')
- expected = '?foo=1'
- if query.strip() != expected:
- raise legacy_page_test.MeasurementFailure(
- 'query was %s, not %s.' % (query, expected))
-
-
-class PageTestWithAction(legacy_page_test.LegacyPageTest):
-
- def ValidateAndMeasurePage(self, page, tab, results):
- pass
-
-
-class PageWithAction(page_module.Page):
-
- def __init__(self, url, story_set):
- super(PageWithAction, self).__init__(url, story_set, story_set.base_dir,
- name=url)
- self.run_test_action_called = False
-
- def RunPageInteractions(self, _):
- self.run_test_action_called = True
-
-
-class PageTestUnitTest(page_test_test_case.PageTestTestCase):
-
- def setUp(self):
- self.options = options_for_unittests.GetRunOptions(
- output_dir=tempfile.mkdtemp())
-
- def tearDown(self):
- shutil.rmtree(self.options.output_dir)
-
- def testGotToBlank(self):
- story_set = self.CreateStorySetFromFileInUnittestDataDir('blank.html')
- measurement = PageTestForBlank()
- all_results = self.RunMeasurement(
- measurement, story_set, run_options=self.options)
- self.assertFalse(all_results.had_failures)
-
- def testGotQueryParams(self):
- story_set = self.CreateStorySetFromFileInUnittestDataDir(
- 'blank.html?foo=1')
- measurement = PageTestQueryParams()
- all_results = self.RunMeasurement(
- measurement, story_set, run_options=self.options)
- self.assertFalse(all_results.had_failures)
-
- def testFailure(self):
- story_set = self.CreateStorySetFromFileInUnittestDataDir('blank.html')
- measurement = PageTestThatFails()
- all_results = self.RunMeasurement(
- measurement, story_set, run_options=self.options)
- self.assertTrue(all_results.had_failures)
-
- def testRunActions(self):
- story_set = self.CreateEmptyPageSet()
- page = PageWithAction('file://blank.html', story_set)
- story_set.AddStory(page)
- measurement = PageTestWithAction()
- self.RunMeasurement(measurement, story_set, run_options=self.options)
- self.assertTrue(page.run_test_action_called)
diff --git a/telemetry/telemetry/page/shared_page_state_unittest.py b/telemetry/telemetry/page/shared_page_state_unittest.py
index 4c9bd059d8..ebebd8732f 100644
--- a/telemetry/telemetry/page/shared_page_state_unittest.py
+++ b/telemetry/telemetry/page/shared_page_state_unittest.py
@@ -2,15 +2,23 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+import shutil
+import tempfile
import unittest
+from telemetry.core import exceptions
from telemetry.core import platform as platform_module
from telemetry.internal.browser import browser_finder
+from telemetry.internal.results import results_options
+from telemetry.internal import story_runner
from telemetry.page import page
from telemetry.page import legacy_page_test
from telemetry.page import shared_page_state
from telemetry import story as story_module
from telemetry.testing import fakes
+from telemetry.testing import options_for_unittests
+from telemetry.testing import test_stories
+from telemetry.util import image_util
from telemetry.util import wpr_modes
@@ -110,3 +118,64 @@ def testPageStatesUserAgentType(self):
shared_page_state.Shared10InchTabletPageState, 'tablet_10_inch')
self.assertUserAgentSetCorrectly(
shared_page_state.SharedPageState, None)
+
+
+class FakeBrowserStorySetRunTests(unittest.TestCase):
+ """Tests that involve running story sets on a fake browser."""
+
+ def setUp(self):
+ self.options = options_for_unittests.GetRunOptions(
+ output_dir=tempfile.mkdtemp(), fake_browser=True)
+
+ def tearDown(self):
+ shutil.rmtree(self.options.output_dir)
+
+ @property
+ def fake_platform(self):
+ """The fake platform used by our fake browser."""
+ return self.options.fake_possible_browser.returned_browser.platform
+
+ def RunStorySetAndGetResults(self, story_set):
+ dummy_test = test_stories.DummyStoryTest()
+ with results_options.CreateResults(self.options) as results:
+ story_runner.RunStorySet(dummy_test, story_set, self.options, results)
+
+ test_results = results_options.ReadTestResults(
+ self.options.intermediate_dir)
+ self.assertEqual(len(test_results), 1)
+ return test_results[0]
+
+ def testNoScreenShotTakenForFailedPageDueToNoSupport(self):
+ # The default "FakePlatform" does not support taking screenshots.
+ self.assertFalse(self.fake_platform.CanTakeScreenshot())
+ self.options.browser_options.take_screenshot_for_failed_page = True
+
+ story_set = test_stories.SinglePageStorySet(
+ story_run_side_effect=exceptions.AppCrashException(msg='fake crash'))
+ results = self.RunStorySetAndGetResults(story_set)
+
+ self.assertEqual(results['status'], 'FAIL')
+ self.assertNotIn('screenshot.png', results['outputArtifacts'])
+
+ def testScreenShotTakenForFailedPageOnSupportedPlatform(self):
+ expected_png_base64 = ('iVBORw0KGgoAAAANSUhEUgAAAAIAAAACCAIAAAD91'
+ 'JpzAAAAFklEQVR4Xg3EAQ0AAABAMP1LY3YI7l8l6A'
+ 'T8tgwbJAAAAABJRU5ErkJggg==')
+ self.fake_platform.screenshot_png_data = expected_png_base64
+ # After setting up some fake data, now the platform supports screenshots.
+ self.assertTrue(self.fake_platform.CanTakeScreenshot())
+ self.options.browser_options.take_screenshot_for_failed_page = True
+
+ story_set = test_stories.SinglePageStorySet(
+ story_run_side_effect=exceptions.AppCrashException(msg='fake crash'))
+ results = self.RunStorySetAndGetResults(story_set)
+
+ self.assertEqual(results['status'], 'FAIL')
+ self.assertIn('screenshot.png', results['outputArtifacts'])
+
+ actual_screenshot_img = image_util.FromPngFile(
+ results['outputArtifacts']['screenshot.png']['filePath'])
+ self.assertTrue(
+ image_util.AreEqual(
+ image_util.FromBase64Png(expected_png_base64),
+ actual_screenshot_img))
diff --git a/telemetry/telemetry/story/story_filter.py b/telemetry/telemetry/story/story_filter.py
index 55b10f84df..5eaab45f26 100644
--- a/telemetry/telemetry/story/story_filter.py
+++ b/telemetry/telemetry/story/story_filter.py
@@ -3,20 +3,24 @@
# found in the LICENSE file.
import optparse
+import os
+import logging
import re
-from telemetry.internal.util import command_line
+from telemetry.story import typ_expectations
class _StoryMatcher(object):
def __init__(self, pattern):
self._regex = None
- self.has_compile_error = False
if pattern:
try:
self._regex = re.compile(pattern)
- except re.error:
- self.has_compile_error = True
+ except:
+ # Provide context since the error that re module provides
+ # is not user friendly.
+ logging.error('We failed to compile the regex "%s"', pattern)
+ raise
def __nonzero__(self):
return self._regex is not None
@@ -36,8 +40,31 @@ def HasLabelIn(self, story):
return self and bool(story.tags.intersection(self._tags))
-class StoryFilter(command_line.ArgumentHandlerMixIn):
- """Filters stories in the story set based on command-line flags."""
+class StoryFilterFactory(object):
+ """This factory reads static global configuration for a StoryFilter.
+
+ Static global configuration includes commandline flags and ProjectConfig.
+
+ It then provides a way to create a StoryFilter by only providing
+ the runtime configuration.
+ """
+
+ @classmethod
+ def BuildStoryFilter(cls, benchmark_name, platform_tags,
+ abridged_story_set_tag):
+ expectations = typ_expectations.StoryExpectations(benchmark_name)
+ expectations.SetTags(platform_tags or [])
+ if cls._expectations_file and os.path.exists(cls._expectations_file):
+ with open(cls._expectations_file) as fh:
+ expectations.GetBenchmarkExpectationsFromParser(fh.read())
+ if cls._run_full_story_set:
+ abridged_story_set_tag = None
+ return StoryFilter(
+ expectations, abridged_story_set_tag, cls._story_filter,
+ cls._story_filter_exclude,
+ cls._story_tag_filter, cls._story_tag_filter_exclude,
+ cls._shard_begin_index, cls._shard_end_index, cls._run_disabled_stories,
+ stories=cls._stories)
@classmethod
def AddCommandLineArgs(cls, parser):
@@ -68,38 +95,97 @@ def AddCommandLineArgs(cls, parser):
'rounded down to the number of stories. Negative values not'
'allowed. If this is ommited, the end index is the final story'
'of the benchmark. '+ common_story_shard_help))
-
+ # This should be renamed to --also-run-disabled-stories.
+ group.add_option('-d', '--also-run-disabled-tests',
+ dest='run_disabled_stories',
+ action='store_true', default=False,
+ help='Ignore expectations.config disabling.')
+ group.add_option(
+ '--run-full-story-set', action='store_true', default=False,
+ help='Whether to run the complete set of stories instead '
+ 'of an abridged version. Note that if the story set '
+ 'does not provide the information required to abridge it, '
+ 'then this argument will have no impact.')
+ group.add_option(
+ '--story', action='append', dest='stories',
+ help='An exact name of a story to run. These strings should be '
+ 'the exact values as stored in the name attribute of a story object. '
+ 'Passing in a story name this way will cause the story to run even '
+ 'if it is marked as "Skip" in the expectations config. '
+ 'This name does not include the benchmark name. This flag can be '
+ 'provided multiple times to chose to run multiple stories. '
+ 'The story flag overrides other story selection flags.')
parser.add_option_group(group)
@classmethod
- def ProcessCommandLineArgs(cls, parser, args):
- cls._include_regex = _StoryMatcher(args.story_filter)
- cls._exclude_regex = _StoryMatcher(args.story_filter_exclude)
-
- cls._include_tags = _StoryTagMatcher(args.story_tag_filter)
- cls._exclude_tags = _StoryTagMatcher(args.story_tag_filter_exclude)
-
- cls._begin_index = args.story_shard_begin_index or 0
- cls._end_index = args.story_shard_end_index
-
- if cls._end_index is not None:
- if cls._end_index < 0:
- raise parser.error(
- '--story-shard-end-index cannot be less than 0')
- if cls._begin_index is not None and cls._end_index <= cls._begin_index:
- raise parser.error(
- '--story-shard-end-index cannot be less than'
- ' or equal to --experimental-story-shard-begin-index')
-
- if cls._include_regex.has_compile_error:
- raise parser.error('--story-filter: Invalid regex.')
- if cls._exclude_regex.has_compile_error:
- raise parser.error('--story-filter-exclude: Invalid regex.')
+ def ProcessCommandLineArgs(cls, parser, args, environment=None):
+ del parser
+ cls._story_filter = args.story_filter
+ cls._story_filter_exclude = args.story_filter_exclude
+ cls._story_tag_filter = args.story_tag_filter
+ cls._story_tag_filter_exclude = args.story_tag_filter_exclude
+ cls._stories = args.stories
+ if cls._stories:
+ assert args.story_shard_begin_index is None, (
+ '--story and --story-shard-begin-index are mutually exclusive.')
+ assert args.story_shard_end_index is None, (
+ '--story and --story-shard-end-index are mutually exclusive.')
+ assert args.story_filter is None, (
+ '--story and --story-filter are mutually exclusive.')
+ assert args.story_filter_exclude is None, (
+ '--story and --story-filter-exclude are mutually exclusive.')
+ assert args.story_tag_filter is None, (
+ '--story and --story-tag-filter are mutually exclusive.')
+ assert args.story_tag_filter_exclude is None, (
+ '--story and --story-tag-filter-exclude are mutually exclusive.')
+ cls._shard_begin_index = args.story_shard_begin_index or 0
+ cls._shard_end_index = args.story_shard_end_index
+ if environment and environment.expectations_files:
+ assert len(environment.expectations_files) == 1
+ cls._expectations_file = environment.expectations_files[0]
+ else:
+ cls._expectations_file = None
+ cls._run_disabled_stories = args.run_disabled_stories
+ cls._run_full_story_set = args.run_full_story_set
- @classmethod
- def FilterStories(cls, stories):
+
+class StoryFilter(object):
+ """Logic to decide whether to run, skip, or ignore stories."""
+
+ def __init__(
+ self, expectations=None, abridged_story_set_tag=None, story_filter=None,
+ story_filter_exclude=None,
+ story_tag_filter=None, story_tag_filter_exclude=None,
+ shard_begin_index=0, shard_end_index=None, run_disabled_stories=False,
+ stories=None):
+ self._expectations = expectations
+ self._include_regex = _StoryMatcher(story_filter)
+ self._exclude_regex = _StoryMatcher(story_filter_exclude)
+ self._include_tags = _StoryTagMatcher(story_tag_filter)
+ self._exclude_tags = _StoryTagMatcher(story_tag_filter_exclude)
+ self._shard_begin_index = shard_begin_index
+ self._shard_end_index = shard_end_index
+ if self._shard_end_index is not None:
+ if self._shard_end_index < 0:
+ raise ValueError(
+ 'shard end index cannot be less than 0, since stories are indexed '
+ 'with positive numbers')
+ if (self._shard_begin_index is not None and
+ self._shard_end_index <= self._shard_begin_index):
+ raise ValueError(
+ 'shard end index cannot be less than or equal to shard begin index')
+ self._run_disabled_stories = run_disabled_stories
+ self._abridged_story_set_tag = abridged_story_set_tag
+ if stories:
+ assert isinstance(stories, list)
+ self._stories = stories
+
+ def FilterStories(self, stories):
"""Filters the given stories, using filters provided in the command line.
+ This filter causes stories to become completely ignored, and therefore
+ they will not show up in test results output.
+
Story sharding is done before exclusion and inclusion is done.
Args:
@@ -108,26 +194,65 @@ def FilterStories(cls, stories):
Returns:
A list of remaining stories.
"""
- if cls._begin_index < 0:
- cls._begin_index = 0
- if cls._end_index is None:
- cls._end_index = len(stories)
-
- stories = stories[cls._begin_index:cls._end_index]
+ if self._stories:
+ output_stories = []
+ output_stories_names = []
+ for story in stories:
+ if story.name in self._stories:
+ output_stories.append(story)
+ output_stories_names.append(story.name)
+ unmatched_stories = (
+ frozenset(self._stories) - frozenset(output_stories_names))
+ for story in unmatched_stories:
+ raise ValueError('story %s was asked for but does not exist.' % story)
+ return output_stories
+ if self._abridged_story_set_tag:
+ stories = [story for story in stories
+ if self._abridged_story_set_tag in story.tags]
+ if self._shard_begin_index < 0:
+ self._shard_begin_index = 0
+ if self._shard_end_index is None:
+ self._shard_end_index = len(stories)
+ stories = stories[self._shard_begin_index:self._shard_end_index]
final_stories = []
for story in stories:
# Exclude filters take priority.
- if cls._exclude_tags.HasLabelIn(story):
+ if self._exclude_tags.HasLabelIn(story):
continue
- if cls._exclude_regex.HasMatch(story):
+ if self._exclude_regex.HasMatch(story):
continue
-
- if cls._include_tags and not cls._include_tags.HasLabelIn(story):
+ if self._include_tags and not self._include_tags.HasLabelIn(story):
continue
- if cls._include_regex and not cls._include_regex.HasMatch(story):
+ if self._include_regex and not self._include_regex.HasMatch(story):
continue
-
final_stories.append(story)
-
return final_stories
+
+ def ShouldSkip(self, story):
+ """Decides whether a story should be marked skipped.
+
+ The difference between marking a story skipped and simply not running
+ it is important for tracking purposes. Officially skipped stories show
+ up in test results outputs.
+
+ Args:
+ story: A story.Story object.
+
+ Returns:
+ A skip reason string if the story should be skipped, otherwise an
+ empty string.
+ """
+ disabled = self._expectations.IsStoryDisabled(story)
+ if self._stories:
+ if story.name in self._stories:
+ logging.warn('Running story %s even though it is disabled because '
+ 'it was specifically asked for by name in the --story '
+ 'flag.', story.name)
+ return ''
+ if disabled and self._run_disabled_stories:
+ logging.warning(
+ 'Force running a disabled story %s even though it was disabled with '
+ 'the following reason: %s' % (story.name, disabled))
+ return ''
+ return disabled
diff --git a/telemetry/telemetry/story/story_filter_unittest.py b/telemetry/telemetry/story/story_filter_unittest.py
index 65420bdf88..cff2f12987 100644
--- a/telemetry/telemetry/story/story_filter_unittest.py
+++ b/telemetry/telemetry/story/story_filter_unittest.py
@@ -1,144 +1,281 @@
-# Copyright 2013 The Chromium Authors. All rights reserved.
+# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
+import re
-from telemetry import story
-from telemetry.page import page
from telemetry.story import story_filter as story_filter_module
+from telemetry.testing import fakes
-class FilterTest(unittest.TestCase):
+class StoryFilterInitUnittest(unittest.TestCase):
- def setUp(self):
- story_set = story.StorySet()
- self.p1 = page.Page(
- url='file://your/smile/widen.html', page_set=story_set,
- name='MayYour.smile_widen', tags=['tag1', 'tag2'])
- self.p2 = page.Page(
- url='file://share_a/smile/too.html', page_set=story_set,
- name='ShareA.smiles_too', tags=['tag1'])
- self.p3 = page.Page(
- url='file://share_a/smile/too.html', page_set=story_set,
- name='share_a/smile/too.html', tags=['tag2'])
- self.pages = [self.p1, self.p2, self.p3]
-
- @staticmethod
- def ProcessCommandLineArgs(parser=None, **kwargs):
- class Options(object):
- def __init__(
- self, story_filter=None, story_filter_exclude=None,
- story_tag_filter=None, story_tag_filter_exclude=None,
- story_shard_begin_index=None,
- story_shard_end_index=None):
- self.story_filter = story_filter
- self.story_filter_exclude = story_filter_exclude
- self.story_tag_filter = story_tag_filter
- self.story_tag_filter_exclude = story_tag_filter_exclude
- self.story_shard_begin_index = (
- story_shard_begin_index)
- self.story_shard_end_index = (
- story_shard_end_index)
- story_filter_module.StoryFilter.ProcessCommandLineArgs(
- parser, Options(**kwargs))
-
- def assertPagesSelected(self, expected):
- result = story_filter_module.StoryFilter.FilterStories(self.pages)
- self.assertEqual(expected, result)
-
- def testNoFilterMatchesAll(self):
- self.ProcessCommandLineArgs()
- self.assertPagesSelected(self.pages)
-
- def testBadRegexCallsParserError(self):
- class MockParserException(Exception):
- pass
- class MockParser(object):
- def error(self, _):
- raise MockParserException
- with self.assertRaises(MockParserException):
- self.ProcessCommandLineArgs(parser=MockParser(), story_filter='+')
+ def testBadStoryFilterRegexRaises(self):
+ with self.assertRaises(re.error):
+ story_filter_module.StoryFilter(story_filter='+')
+
+ def testBadStoryFilterExcludeRegexRaises(self):
+ with self.assertRaises(re.error):
+ story_filter_module.StoryFilter(story_filter_exclude='+')
def testBadStoryShardArgEnd(self):
- class MockParserException(Exception):
- pass
- class MockParser(object):
- def error(self, _):
- raise MockParserException
- with self.assertRaises(MockParserException):
- self.ProcessCommandLineArgs(
- parser=MockParser(), story_shard_end_index=-1)
-
- def testBadStoryShardArgEndAndBegin(self):
- class MockParserException(Exception):
- pass
- class MockParser(object):
- def error(self, _):
- raise MockParserException
- with self.assertRaises(MockParserException):
- self.ProcessCommandLineArgs(
- parser=MockParser(), story_shard_end_index=2,
- story_shard_begin_index=3)
-
- def testUniqueSubstring(self):
- self.ProcessCommandLineArgs(story_filter='smile_widen')
- self.assertPagesSelected([self.p1])
-
- def testSharedSubstring(self):
- self.ProcessCommandLineArgs(story_filter='smile')
- self.assertPagesSelected(self.pages)
+ with self.assertRaises(ValueError):
+ story_filter_module.StoryFilter(shard_end_index=-1)
+
+ def testMismatchedStoryShardArgEndAndBegin(self):
+ with self.assertRaises(ValueError):
+ story_filter_module.StoryFilter(
+ shard_end_index=2,
+ shard_begin_index=3)
+
+
+class ProcessCommandLineUnittest(unittest.TestCase):
+
+ def testStoryFlagExclusivity(self):
+ args = fakes.FakeParsedArgsForStoryFilter(
+ story_filter='blah', stories=['aa', 'bb'])
+ with self.assertRaises(AssertionError):
+ story_filter_module.StoryFilterFactory.ProcessCommandLineArgs(
+ parser=None, args=args)
+
+ def testStoryFlagSmoke(self):
+ args = fakes.FakeParsedArgsForStoryFilter(stories=['aa', 'bb'])
+ story_filter_module.StoryFilterFactory.ProcessCommandLineArgs(
+ parser=None, args=args)
+
+
+class FakeStory(object):
+ def __init__(self, name='fake_story_name', tags=None):
+ self.name = name
+ self.tags = tags or set()
+
+
+class FilterStoriesUnittest(unittest.TestCase):
+
+ def testStoryFlag(self):
+ a = FakeStory('a')
+ b = FakeStory('b')
+ c = FakeStory('c')
+ d = FakeStory('d')
+ stories = (a, b, c, d)
+ story_filter = story_filter_module.StoryFilter(stories=['a', 'c'])
+ output = story_filter.FilterStories(stories)
+ self.assertEqual([a, c], output)
+
+ def testStoryFlag_InvalidStory(self):
+ a = FakeStory('a')
+ stories = (a,)
+ story_filter = story_filter_module.StoryFilter(stories=['a', 'c'])
+ with self.assertRaises(ValueError):
+ story_filter.FilterStories(stories)
+
+ def testNoFilter(self):
+ a = FakeStory('a')
+ b = FakeStory('b')
+ stories = (a, b)
+ story_filter = story_filter_module.StoryFilter()
+ output = story_filter.FilterStories(stories)
+ self.assertEqual(list(stories), output)
+
+ def testSimple(self):
+ a = FakeStory('a')
+ foo = FakeStory('foo') # pylint: disable=blacklisted-name
+ stories = (a, foo)
+ story_filter = story_filter_module.StoryFilter(
+ story_filter='foo')
+ output = story_filter.FilterStories(stories)
+ self.assertEqual([foo], output)
+
+ def testMultimatch(self):
+ a = FakeStory('a')
+ foo = FakeStory('foo') # pylint: disable=blacklisted-name
+ foobar = FakeStory('foobar')
+ stories = (a, foo, foobar)
+ story_filter = story_filter_module.StoryFilter(
+ story_filter='foo')
+ output = story_filter.FilterStories(stories)
+ self.assertEqual([foo, foobar], output)
def testNoMatch(self):
- self.ProcessCommandLineArgs(story_filter='frown')
- self.assertPagesSelected([])
+ a = FakeStory('a')
+ foo = FakeStory('foo') # pylint: disable=blacklisted-name
+ foobar = FakeStory('foobar')
+ stories = (a, foo, foobar)
+ story_filter = story_filter_module.StoryFilter(
+ story_filter='1234')
+ output = story_filter.FilterStories(stories)
+ self.assertEqual([], output)
def testExclude(self):
- self.ProcessCommandLineArgs(story_filter_exclude='ShareA')
- self.assertPagesSelected([self.p1, self.p3])
+ a = FakeStory('a')
+ foo = FakeStory('foo') # pylint: disable=blacklisted-name
+ foobar = FakeStory('foobar')
+ stories = (a, foo, foobar)
+ story_filter = story_filter_module.StoryFilter(
+ story_filter_exclude='a')
+ output = story_filter.FilterStories(stories)
+ self.assertEqual([foo], output)
def testExcludeTakesPriority(self):
- self.ProcessCommandLineArgs(
- story_filter='smile',
- story_filter_exclude='wide')
- self.assertPagesSelected([self.p2, self.p3])
-
- def testNoNameMatchesDisplayName(self):
- self.ProcessCommandLineArgs(story_filter='share_a/smile')
- self.assertPagesSelected([self.p3])
+ a = FakeStory('a')
+ foo = FakeStory('foo') # pylint: disable=blacklisted-name
+ foobar = FakeStory('foobar')
+ stories = (a, foo, foobar)
+ story_filter = story_filter_module.StoryFilter(
+ story_filter='foo',
+ story_filter_exclude='bar')
+ output = story_filter.FilterStories(stories)
+ self.assertEqual([foo], output)
- def testNotagMatch(self):
- self.ProcessCommandLineArgs(story_tag_filter='tagX')
- self.assertPagesSelected([])
+ def testNoTagMatch(self):
+ a = FakeStory('a')
+ foo = FakeStory('foo') # pylint: disable=blacklisted-name
+ stories = (a, foo)
+ story_filter = story_filter_module.StoryFilter(
+ story_tag_filter='x')
+ output = story_filter.FilterStories(stories)
+ self.assertEqual([], output)
- def testtagsAllMatch(self):
- self.ProcessCommandLineArgs(story_tag_filter='tag1,tag2')
- self.assertPagesSelected(self.pages)
+ def testTagsAllMatch(self):
+ a = FakeStory('a', {'1', '2'})
+ b = FakeStory('b', {'1', '2'})
+ stories = (a, b)
+ story_filter = story_filter_module.StoryFilter(
+ story_tag_filter='1,2')
+ output = story_filter.FilterStories(stories)
+ self.assertEqual(list(stories), output)
def testExcludetagTakesPriority(self):
- self.ProcessCommandLineArgs(
- story_tag_filter='tag1',
- story_tag_filter_exclude='tag2')
- self.assertPagesSelected([self.p2])
+ x = FakeStory('x', {'1'})
+ y = FakeStory('y', {'1', '2'})
+ stories = (x, y)
+ story_filter = story_filter_module.StoryFilter(
+ story_tag_filter='1',
+ story_tag_filter_exclude='2')
+ output = story_filter.FilterStories(stories)
+ self.assertEqual([x], output)
+
+ def testAbridgedStorySetTag(self):
+ x = FakeStory('x', {'1'})
+ y = FakeStory('y', {'1', '2'})
+ stories = (x, y)
+ story_filter = story_filter_module.StoryFilter(
+ abridged_story_set_tag='2')
+ output = story_filter.FilterStories(stories)
+ self.assertEqual([y], output)
+
+ def testAbridgeBeforeShardIndexing(self):
+ """Test that the abridged story set tag gets applied before indexing.
+
+ Shard maps on the chromium side allow us to distribute runtime evenly across
+ shards so that we minimize waterfall cycle time. If we abridge after we
+ select indexes then we cannot control how many stories is on each shard.
+ """
+ x = FakeStory('x', {'t'})
+ y = FakeStory('y')
+ z = FakeStory('z', {'t'})
+ stories = (x, y, z)
+ story_filter = story_filter_module.StoryFilter(
+ abridged_story_set_tag='t',
+ shard_end_index=2)
+ output = story_filter.FilterStories(stories)
+ self.assertEqual([x, z], output)
+
+
+class FilterStoriesShardIndexUnittest(unittest.TestCase):
+ def setUp(self):
+ self.s1 = FakeStory('1')
+ self.s2 = FakeStory('2')
+ self.s3 = FakeStory('3')
+ self.stories = (self.s1, self.s2, self.s3)
def testStoryShardBegin(self):
- self.ProcessCommandLineArgs(story_shard_begin_index=1)
- self.assertPagesSelected([self.p2, self.p3])
+ story_filter = story_filter_module.StoryFilter(
+ shard_begin_index=1)
+ output = story_filter.FilterStories(self.stories)
+ self.assertEqual([self.s2, self.s3], output)
def testStoryShardEnd(self):
- self.ProcessCommandLineArgs(story_shard_end_index=2)
- self.assertPagesSelected([self.p1, self.p2])
+ story_filter = story_filter_module.StoryFilter(
+ shard_end_index=2)
+ output = story_filter.FilterStories(self.stories)
+ self.assertEqual([self.s1, self.s2], output)
def testStoryShardBoth(self):
- self.ProcessCommandLineArgs(
- story_shard_begin_index=1,
- story_shard_end_index=2)
- self.assertPagesSelected([self.p2])
+ story_filter = story_filter_module.StoryFilter(
+ shard_begin_index=1,
+ shard_end_index=2)
+ output = story_filter.FilterStories(self.stories)
+ self.assertEqual([self.s2], output)
def testStoryShardBeginWraps(self):
- self.ProcessCommandLineArgs(story_shard_begin_index=-1)
- self.assertPagesSelected(self.pages)
+ story_filter = story_filter_module.StoryFilter(
+ shard_begin_index=-1)
+ output = story_filter.FilterStories(self.stories)
+ self.assertEqual(list(self.stories), output)
def testStoryShardEndWraps(self):
- self.ProcessCommandLineArgs(story_shard_end_index=5)
- self.assertPagesSelected(self.pages)
+ """This is needed since benchmarks may change size.
+
+ When they change size, we will not immediately write new
+ shard maps for them.
+ """
+ story_filter = story_filter_module.StoryFilter(
+ shard_end_index=5)
+ output = story_filter.FilterStories(self.stories)
+ self.assertEqual(list(self.stories), output)
+
+
+class FakeExpectations(object):
+ def __init__(self, stories_to_disable=None):
+ self._stories_to_disable = stories_to_disable or []
+
+ def IsStoryDisabled(self, story):
+ if story.name in self._stories_to_disable:
+ return 'fake reason'
+ return ''
+
+
+class ShouldSkipUnittest(unittest.TestCase):
+
+ def testRunDisabledStories_DisabledStory(self):
+ story = FakeStory()
+ expectations = FakeExpectations(stories_to_disable=[story.name])
+ story_filter = story_filter_module.StoryFilter(
+ expectations=expectations,
+ run_disabled_stories=True)
+ self.assertFalse(story_filter.ShouldSkip(story))
+
+ def testRunDisabledStories_EnabledStory(self):
+ story = FakeStory()
+ expectations = FakeExpectations(stories_to_disable=[])
+ story_filter = story_filter_module.StoryFilter(
+ expectations=expectations,
+ run_disabled_stories=True)
+ self.assertFalse(story_filter.ShouldSkip(story))
+
+ def testEnabledStory(self):
+ story = FakeStory()
+ expectations = FakeExpectations(stories_to_disable=[])
+ story_filter = story_filter_module.StoryFilter(
+ expectations=expectations,
+ run_disabled_stories=False)
+ self.assertFalse(story_filter.ShouldSkip(story))
+
+ def testDisabledStory(self):
+ story = FakeStory()
+ expectations = FakeExpectations(stories_to_disable=[story.name])
+ story_filter = story_filter_module.StoryFilter(
+ expectations=expectations,
+ run_disabled_stories=False)
+ self.assertEqual(story_filter.ShouldSkip(story), 'fake reason')
+
+ def testDisabledStory_StoryFlag(self):
+ story = FakeStory('a_name')
+ expectations = FakeExpectations(stories_to_disable=[story.name])
+ story_filter = story_filter_module.StoryFilter(
+ expectations=expectations,
+ run_disabled_stories=False,
+ stories=['a_name'])
+ self.assertFalse(story_filter.ShouldSkip(story))
diff --git a/telemetry/telemetry/story/typ_expectations.py b/telemetry/telemetry/story/typ_expectations.py
index ddefa2966c..8d6d347cf6 100644
--- a/telemetry/telemetry/story/typ_expectations.py
+++ b/telemetry/telemetry/story/typ_expectations.py
@@ -25,12 +25,13 @@
SYSTEM_CONDITION_TAGS = frozenset([
'android', 'android-go', 'android-low-end', 'android-nexus-5',
'android-nexus-5x', 'android-nexus-6', 'android-pixel-2',
- 'chromeos', 'desktop', 'linux', 'mac', 'mac-10.12', 'win',
- 'win10', 'win7', 'android-not-webview', 'android-webview',
- 'mobile', 'android-marshmallow', 'android-lollipop', 'android-nougat',
- 'android-oreo', 'android-pie', 'android-10', 'android-webview-google',
- 'reference', 'android-chromium', 'ubuntu', 'android-kitkat', 'highsierra',
- 'sierra', 'mac-10.11', 'release', 'exact', 'debug'
+ 'chromeos', 'chromeos-local', 'chromeos-remote', 'desktop', 'linux', 'mac',
+ 'mac-10.12', 'win', 'win10', 'win7', 'android-not-webview',
+ 'android-webview', 'mobile', 'android-marshmallow', 'android-lollipop',
+ 'android-nougat', 'android-oreo', 'android-pie', 'android-10',
+ 'android-webview-google', 'reference', 'android-chromium', 'ubuntu',
+ 'android-kitkat', 'highsierra', 'sierra', 'mac-10.11', 'release', 'exact',
+ 'debug'
])
diff --git a/telemetry/telemetry/story/typ_expectations_unittest.py b/telemetry/telemetry/story/typ_expectations_unittest.py
index 767ff8fa98..c9a47cc5bd 100644
--- a/telemetry/telemetry/story/typ_expectations_unittest.py
+++ b/telemetry/telemetry/story/typ_expectations_unittest.py
@@ -5,44 +5,49 @@
import unittest
import mock
-from telemetry import benchmark
-from telemetry import story as story_module
+from telemetry.story import typ_expectations
class TypStoryExpectationsTest(unittest.TestCase):
def testDisableBenchmark(self):
- expectations = (
+ expectations = typ_expectations.StoryExpectations('fake_benchmark_name')
+ raw_expectations = (
'# tags: [ all ]\n'
'# results: [ Skip ]\n'
- 'crbug.com/123 [ all ] fake/* [ Skip ]\n')
- with mock.patch.object(benchmark.Benchmark, 'Name', return_value='fake'):
- b = benchmark.Benchmark()
- b.AugmentExpectationsWithFile(expectations)
- b.expectations.SetTags(['All'])
- reason = b._expectations.IsBenchmarkDisabled()
- self.assertTrue(reason)
- self.assertEqual(reason, 'crbug.com/123')
+ 'crbug.com/123 [ all ] fake_benchmark_name/* [ Skip ]\n')
+ expectations.GetBenchmarkExpectationsFromParser(raw_expectations)
+ expectations.SetTags(['All'])
+ reason = expectations.IsBenchmarkDisabled()
+ self.assertTrue(reason)
+ self.assertEqual(reason, 'crbug.com/123')
- def testDisableStoryMultipleConditions(self):
- expectations = (
+ def testDisableStory_WithReason(self):
+ expectations = typ_expectations.StoryExpectations('fake_benchmark_name')
+ raw_expectations = (
'# tags: [ linux win ]\n'
'# results: [ Skip ]\n'
- '[ linux ] fake/one [ Skip ]\n'
- 'crbug.com/123 [ win ] fake/on* [ Skip ]\n')
- for os in ['linux', 'win']:
- with mock.patch.object(
- benchmark.Benchmark, 'Name', return_value='fake'):
- story = mock.MagicMock()
- story.name = 'one'
- story_set = story_module.StorySet()
- story_set._stories.append(story)
- b = benchmark.Benchmark()
- b.AugmentExpectationsWithFile(expectations)
- b.expectations.SetTags([os])
- reason = b._expectations.IsStoryDisabled(story)
- self.assertTrue(reason)
- if os == 'linux':
- self.assertEqual(reason, 'No reason given')
- else:
- self.assertEqual(reason, 'crbug.com/123')
+ '[ linux ] fake_benchmark_name/one [ Skip ]\n'
+ 'crbug.com/123 [ win ] fake_benchmark_name/on* [ Skip ]\n')
+ expectations.GetBenchmarkExpectationsFromParser(raw_expectations)
+ expectations.SetTags(['win'])
+ story = mock.MagicMock()
+ story.name = 'one'
+ reason = expectations.IsStoryDisabled(story)
+ self.assertTrue(reason)
+ self.assertEqual(reason, 'crbug.com/123')
+
+ def testDisableStory_NoReasonGiven(self):
+ expectations = typ_expectations.StoryExpectations('fake_benchmark_name')
+ raw_expectations = (
+ '# tags: [ linux win ]\n'
+ '# results: [ Skip ]\n'
+ '[ linux ] fake_benchmark_name/one [ Skip ]\n'
+ 'crbug.com/123 [ win ] fake_benchmark_name/on* [ Skip ]\n')
+ expectations.GetBenchmarkExpectationsFromParser(raw_expectations)
+ expectations.SetTags(['linux'])
+ story = mock.MagicMock()
+ story.name = 'one'
+ reason = expectations.IsStoryDisabled(story)
+ self.assertTrue(reason)
+ self.assertEqual(reason, 'No reason given')
diff --git a/telemetry/telemetry/testing/fakes/__init__.py b/telemetry/telemetry/testing/fakes/__init__.py
index 1ee6c0d376..6866c58b5b 100644
--- a/telemetry/telemetry/testing/fakes/__init__.py
+++ b/telemetry/telemetry/testing/fakes/__init__.py
@@ -36,9 +36,10 @@ def GetOSName(self):
class FakePlatform(object):
- def __init__(self, os_name='', os_version_name=''):
+ def __init__(self, os_name='', os_version_name='', arch_name=''):
self._network_controller = None
self._tracing_controller = None
+ self._arch_name = arch_name or 'FakeArchitecture'
self._os_name = os_name or 'FakeOS'
self._os_version_name = os_version_name or 'FakeVersion'
self._device_type_name = 'abc'
@@ -82,7 +83,7 @@ def HasBeenThermallyThrottled(self):
return False
def GetArchName(self):
- raise NotImplementedError
+ return self._arch_name
def SetOSName(self, name):
self._os_name = name
@@ -213,10 +214,10 @@ def Create(self, local_port, remote_port, reverse=False):
class FakePossibleBrowser(object):
def __init__(self, execute_on_startup=None,
execute_after_browser_creation=None,
- os_name='', os_version_name='', browser_type=''):
+ arch_name='', os_name='', os_version_name='', browser_type=''):
if os_name:
self._returned_browser = FakeBrowser(
- FakePlatform(os_name, os_version_name), browser_type)
+ FakePlatform(os_name, os_version_name, arch_name), browser_type)
else:
self._returned_browser = FakeBrowser(
FakeLinuxPlatform(), browser_type)
@@ -669,3 +670,24 @@ def Restore(self):
self._module.time = self._actual_time
self._module = None
self._actual_time = None
+
+
+class FakeParsedArgsForStoryFilter(object):
+ def __init__(
+ self, story_filter=None, story_filter_exclude=None,
+ story_tag_filter=None, story_tag_filter_exclude=None,
+ story_shard_begin_index=None,
+ story_shard_end_index=None,
+ run_full_story_set=None,
+ run_disabled_stories=False, stories=None):
+ self.story_filter = story_filter
+ self.story_filter_exclude = story_filter_exclude
+ self.story_tag_filter = story_tag_filter
+ self.story_tag_filter_exclude = story_tag_filter_exclude
+ self.story_shard_begin_index = (
+ story_shard_begin_index)
+ self.story_shard_end_index = (
+ story_shard_end_index)
+ self.run_disabled_stories = run_disabled_stories
+ self.run_full_story_set = run_full_story_set
+ self.stories = stories
diff --git a/telemetry/telemetry/testing/legacy_page_test_case.py b/telemetry/telemetry/testing/legacy_page_test_case.py
new file mode 100644
index 0000000000..809a1276e8
--- /dev/null
+++ b/telemetry/telemetry/testing/legacy_page_test_case.py
@@ -0,0 +1,58 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Provide a TestCase to facilitate testing LegacyPageTest instances."""
+
+import shutil
+import tempfile
+import unittest
+
+from telemetry.internal.results import results_options
+from telemetry.internal import story_runner
+from telemetry.page import legacy_page_test
+from telemetry.testing import options_for_unittests
+from telemetry.testing import test_stories
+
+
+class LegacyPageTestCase(unittest.TestCase):
+ """A helper class to write tests for LegacyPageTest clients."""
+
+ def setUp(self):
+ self.options = options_for_unittests.GetRunOptions(
+ output_dir=tempfile.mkdtemp())
+ self.test_result = None
+
+ def tearDown(self):
+ shutil.rmtree(self.options.output_dir)
+
+ @staticmethod
+ def CreateStorySetForTest(url):
+ # Subclasses can override this method to customize the page used for tests.
+ return test_stories.SinglePageStorySet(url)
+
+ def RunPageTest(self, page_test, url, expect_status='PASS'):
+ """Run a legacy page_test on a test url and return its measurements.
+
+ Args:
+ page_test: A legacy_page_test.LegacyPageTest instance.
+ url: A URL for the test page to load, usually a local 'file://' URI to be
+ served from telemetry/internal/testing. Clients can override the
+ static method CreateStorySetForTestFile to change this behavior.
+ expect_status: A string with the expected status of the test run.
+
+ Returns:
+ A dictionary with measurements recorded by the legacy_page_test.
+ """
+ self.assertIsInstance(page_test, legacy_page_test.LegacyPageTest)
+ page_test.CustomizeBrowserOptions(self.options.browser_options)
+ story_set = self.CreateStorySetForTest(url)
+ self.assertEqual(len(story_set), 1)
+ with results_options.CreateResults(self.options) as results:
+ story_runner.RunStorySet(page_test, story_set, self.options, results)
+ test_results = results_options.ReadTestResults(
+ self.options.intermediate_dir)
+ self.assertEqual(len(test_results), 1)
+ self.test_result = test_results[0]
+ self.assertEqual(self.test_result['status'], expect_status)
+ return results_options.ReadMeasurements(self.test_result)
diff --git a/telemetry/telemetry/testing/options_for_unittests.py b/telemetry/telemetry/testing/options_for_unittests.py
index a63f6f5a52..db4fc30539 100644
--- a/telemetry/telemetry/testing/options_for_unittests.py
+++ b/telemetry/telemetry/testing/options_for_unittests.py
@@ -10,6 +10,8 @@
This allows multiple unit tests to use a specific
browser, in face of multiple options."""
+import os
+
from telemetry.internal import story_runner
from telemetry.testing import fakes
@@ -36,7 +38,7 @@ def AreSet():
def GetRunOptions(output_dir=None, fake_browser=False, benchmark_cls=None,
- overrides=None):
+ overrides=None, environment=None):
"""Get an options object filled in necessary defaults for the Run command.
Args:
@@ -55,6 +57,7 @@ def GetRunOptions(output_dir=None, fake_browser=False, benchmark_cls=None,
the returned object as they see fit. TODO(crbug.com/985712): This should
not be required, ideally the processing of options should not change the
internal state of Telemetry objects.
+ environment: The ProjectConfig to run within.
Returns:
An options object with default values for all command line arguments.
@@ -77,9 +80,11 @@ def GetRunOptions(output_dir=None, fake_browser=False, benchmark_cls=None,
setattr(options, name, value)
if benchmark_cls is not None:
benchmark_cls.ProcessCommandLineArgs(parser, options)
- story_runner.ProcessCommandLineArgs(parser, options)
+ story_runner.ProcessCommandLineArgs(parser, options, environment)
options.suppress_gtest_report = True
options.output_dir = output_dir
+ if output_dir is not None:
+ options.intermediate_dir = os.path.join(output_dir, 'artifacts')
# TODO(crbug.com/928275): Remove these when Telemetry tests no longer
# depend on any result processing options.
options.output_formats = ['none']
diff --git a/telemetry/telemetry/testing/page_test_test_case.py b/telemetry/telemetry/testing/page_test_test_case.py
deleted file mode 100644
index 54d94b0ede..0000000000
--- a/telemetry/telemetry/testing/page_test_test_case.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# Copyright 2014 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Provide a TestCase base class for PageTest subclasses' unittests."""
-
-import unittest
-
-from telemetry import story
-from telemetry.core import util
-from telemetry.internal.results import results_options
-from telemetry.internal import story_runner
-from telemetry.page import page as page_module
-from telemetry.page import legacy_page_test
-
-
-BENCHMARK_NAME = 'page_test_test_case.RunMeasurement'
-
-
-class BasicTestPage(page_module.Page):
- def __init__(self, url, story_set, base_dir, name=''):
- super(BasicTestPage, self).__init__(url, story_set, base_dir, name=name)
-
- def RunPageInteractions(self, action_runner):
- with action_runner.CreateGestureInteraction('ScrollAction'):
- action_runner.ScrollPage()
- # Undo the above action so that we can run BasicTestPage again if we need
- # to, without closing the browser. Otherwise, tests may see unexpected
- # behaviour on Chrome OS; see crbug.com/851523 for an example.
- action_runner.ScrollPage(direction='up')
-
-
-class PageTestTestCase(unittest.TestCase):
- """A base class to simplify writing unit tests for PageTest subclasses."""
-
- def CreateStorySetFromFileInUnittestDataDir(self, test_filename):
- ps = self.CreateEmptyPageSet()
- page = BasicTestPage('file://' + test_filename, ps, base_dir=ps.base_dir,
- name=test_filename)
- ps.AddStory(page)
- return ps
-
- def CreateEmptyPageSet(self):
- base_dir = util.GetUnittestDataDir()
- ps = story.StorySet(base_dir=base_dir)
- return ps
-
- def RunMeasurement(self, measurement, story_set, run_options):
- """Runs a measurement against a story set, returning a results object.
-
- Args:
- measurement: A test object: either a story_test.StoryTest or
- legacy_page_test.LegacyPageTest instance.
- story_set: A story set.
- run_options: An object with all options needed to run stories; can be
- created with the help of options_for_unittests.GetRunOptions().
- """
- if isinstance(measurement, legacy_page_test.LegacyPageTest):
- measurement.CustomizeBrowserOptions(run_options.browser_options)
- with results_options.CreateResults(
- run_options, benchmark_name=BENCHMARK_NAME) as results:
- story_runner.RunStorySet(measurement, story_set, run_options, results)
- return results
diff --git a/telemetry/telemetry/testing/run_tests.py b/telemetry/telemetry/testing/run_tests.py
index fefbd76373..4bef71d1f3 100644
--- a/telemetry/telemetry/testing/run_tests.py
+++ b/telemetry/telemetry/testing/run_tests.py
@@ -68,6 +68,8 @@ def AddCommandLineArgs(cls, parser, _):
action='append', default=[])
parser.add_option('--disable-logging-config', action='store_true',
default=False, help='Configure logging (default on)')
+ parser.add_option('-v', '--verbose', action='count', dest='verbosity',
+ help='Increase verbosity level (repeat as needed)')
typ.ArgumentParser.add_option_group(parser,
"Options for running the tests",
@@ -79,6 +81,13 @@ def AddCommandLineArgs(cls, parser, _):
@classmethod
def ProcessCommandLineArgs(cls, parser, args, _):
+ if args.verbosity >= 2:
+ logging.getLogger().setLevel(logging.DEBUG)
+ elif args.verbosity:
+ logging.getLogger().setLevel(logging.INFO)
+ else:
+ logging.getLogger().setLevel(logging.WARNING)
+
# We retry failures by default unless we're running a list of tests
# explicitly.
if not args.retry_limit and not args.positional_args:
diff --git a/telemetry/telemetry/testing/run_tests_unittest.py b/telemetry/telemetry/testing/run_tests_unittest.py
index b9e77f6f5b..a849f36025 100644
--- a/telemetry/telemetry/testing/run_tests_unittest.py
+++ b/telemetry/telemetry/testing/run_tests_unittest.py
@@ -30,6 +30,9 @@ def __init__(self, browser_type, os_name, os_version_name,
self.platform = MockPlatform(os_name, os_version_name)
self.supports_tab_control = supports_tab_control
+ def GetTypExpectationsTags(self):
+ return []
+
class MockPlatform(object):
def __init__(self, os_name, os_version_name):
diff --git a/telemetry/telemetry/testing/test_stories.py b/telemetry/telemetry/testing/test_stories.py
new file mode 100644
index 0000000000..d5580b42ff
--- /dev/null
+++ b/telemetry/telemetry/testing/test_stories.py
@@ -0,0 +1,213 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Simple customizable stories and story sets to use in tests.
+
+There are two main kinds of stories defined:
+- TestPage, a Page subclass using the default SharedPageState. Whether an
+ actual browser is involved in tests using these depends on the options object
+ built with options_for_unittests.GetRunOptions() and passed to the relevant
+ story running functions.
+- DummyStory, a Story using a TestSharedState and a mock platform. Tests using
+ these never involve a real browser.
+
+This module also provides helpers to easily create story sets and other related
+classes to work with these kinds of stories.
+"""
+
+import posixpath
+import urlparse
+
+import mock
+
+from telemetry.core import platform as platform_module
+from telemetry.core import util
+from telemetry import page
+from telemetry import story as story_module
+from telemetry.web_perf import story_test
+
+
+class DummyStoryTest(story_test.StoryTest):
+ """A dummy no-op StoryTest.
+
+ Does nothing in addition to whatever the shared state, as determined by the
+ stories used in the tests, do.
+ """
+ def __init__(self, options=None):
+ del options # Unused.
+
+ def WillRunStory(self, platform):
+ del platform # Unused.
+
+ def Measure(self, platform, results):
+ del platform, results # Unused.
+
+ def DidRunStory(self, platform, results):
+ del platform, results # Unused.
+
+
+class TestPage(page.Page):
+ def __init__(self, story_set, url, name=None, run_side_effect=None):
+ """A simple customizable page.
+
+ Note that this uses the default shared_page_state.SharedPageState, as most
+ stories do, which includes method calls to interact with a browser and its
+ platform. Whether a real browser is actually used depends on the options
+ object built with the help of options_for_unittests.GetRunOptions().
+
+ Args:
+ story_set: An instance of the StorySet object this page belongs to.
+ url: A URL for the page to load, in tests usually a local 'file://' URI.
+ name: A name for the story. If not given a reasonable default is built
+ from the url.
+ run_side_effect: Side effect of the story's RunPageInteractions method.
+ It should be a callable taking an action_runner, or an instance of
+ an exception to be raised.
+ """
+ if name is None:
+ name = _StoryNameFromUrl(url)
+ super(TestPage, self).__init__(
+ url, story_set, name=name, base_dir=story_set.base_dir)
+ self._run_side_effect = run_side_effect
+
+ def RunPageInteractions(self, action_runner):
+ if self._run_side_effect is not None:
+ if isinstance(self._run_side_effect, Exception):
+ raise self._run_side_effect # pylint: disable=raising-bad-type
+ else:
+ self._run_side_effect(action_runner)
+
+
+def SinglePageStorySet(url=None, name=None, base_dir=None,
+ story_run_side_effect=None):
+ """Create a simple StorySet with a single TestPage.
+
+ Args:
+ url: An optional URL for the page to load, in tests usually a local
+ 'file://' URI. Defaults to 'file://blank.html' which, if using the
+ default base_dir, points to a simple 'Hello World' html page.
+ name: An optional name for the story. If omitted a reasonable default is
+ built from the url.
+ base_dir: A path on the local file system from which file URIs are served.
+ Defaults to serving pages from telemetry/internal/testing.
+ story_run_side_effect: Side effect of running the story. See TestPage
+ docstring for details.
+ """
+ if url is None:
+ url = 'file://blank.html'
+ if base_dir is None:
+ base_dir = util.GetUnittestDataDir()
+ story_set = story_module.StorySet(base_dir=base_dir)
+ story_set.AddStory(TestPage(story_set, url, name, story_run_side_effect))
+ return story_set
+
+
+class DummyStory(story_module.Story):
+ def __init__(self, name, tags=None, serving_dir=None, run_side_effect=None):
+ """A customizable dummy story.
+
+ It uses the TestSharedState, defined below with a mock platform, so tests
+ using these never actually involve a real browser.
+
+ Args:
+ name: A string with the name of the story.
+ tags: Optional sequence of tags for the story.
+ serving_dir: Optional path from which (in a real local story) contents
+ are served. Used in some tests but no local servers are actually set up.
+ run_side_effect: Optional side effect of the story's Run method.
+ It can be either an exception instance to raise, or a callable
+ with no arguments.
+ """
+ super(DummyStory, self).__init__(TestSharedState, name=name, tags=tags)
+ self._serving_dir = serving_dir
+ self._run_side_effect = run_side_effect
+
+ def Run(self, _):
+ if self._run_side_effect is not None:
+ if isinstance(self._run_side_effect, BaseException):
+ raise self._run_side_effect # pylint: disable=raising-bad-type
+ else:
+ self._run_side_effect()
+
+ @property
+ def serving_dir(self):
+ return self._serving_dir
+
+
+class DummyStorySet(story_module.StorySet):
+ def __init__(self, stories, cloud_bucket=None, abridging_tag=None, **kwargs):
+ """A customizable dummy story set.
+
+ Args:
+ stories: A list of either story names or objects to add to the set.
+ Instances of DummyStory are useful here.
+ cloud_bucket: Optional cloud storage bucket where (in a real story set)
+ data for WPR recordings is stored.
+ abridging_tag: Optional story tag used to define a subset of stories
+ to be run in abridged mode.
+ Additional kwargs are passed to the StorySet base class.
+ """
+ super(DummyStorySet, self).__init__(
+ cloud_storage_bucket=cloud_bucket, **kwargs)
+ self._abridging_tag = abridging_tag
+ assert stories, 'There should be at least one story.'
+ for story in stories:
+ if isinstance(story, basestring):
+ story = DummyStory(story)
+ self.AddStory(story)
+
+ def GetAbridgedStorySetTagFilter(self):
+ return self._abridging_tag
+
+
+def MockPlatform():
+ """Create a mock platform to be used by tests."""
+ mock_platform = mock.Mock(spec=platform_module.Platform)
+ mock_platform.CanMonitorThermalThrottling.return_value = False
+ mock_platform.GetArchName.return_value = None
+ mock_platform.GetOSName.return_value = None
+ mock_platform.GetOSVersionName.return_value = None
+ mock_platform.GetDeviceId.return_value = None
+ return mock_platform
+
+
+class TestSharedState(story_module.SharedState):
+ # Using a mock platform so there are no real actions done on the actual
+ # host platform; and allows callers to inspect or configure methods called.
+ mock_platform = MockPlatform()
+
+ def __init__(self, test, options, story_set, possible_browser):
+ super(TestSharedState, self).__init__(
+ test, options, story_set, possible_browser)
+ self._current_story = None
+
+ @property
+ def platform(self):
+ return self.mock_platform
+
+ def WillRunStory(self, story):
+ self._current_story = story
+
+ def CanRunStory(self, story):
+ return True
+
+ def RunStory(self, results):
+ self._current_story.Run(self)
+
+ def DidRunStory(self, results):
+ self._current_story = None
+
+ def TearDownState(self):
+ pass
+
+ def DumpStateUponStoryRunFailure(self, results):
+ pass
+
+
+def _StoryNameFromUrl(url):
+ """Turns e.g. 'file://path/to/name.html' into just 'name'."""
+ # Strip off URI scheme, params and query; keep only netloc and path.
+ uri = urlparse.urlparse(url)
+ filepath = posixpath.basename(uri.netloc + uri.path)
+ return posixpath.splitext(posixpath.basename(filepath))[0]
diff --git a/telemetry/telemetry/value/summary.py b/telemetry/telemetry/value/summary.py
deleted file mode 100644
index 73a68a297c..0000000000
--- a/telemetry/telemetry/value/summary.py
+++ /dev/null
@@ -1,141 +0,0 @@
-# Copyright 2014 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-from collections import defaultdict
-
-from telemetry.value import merge_values
-
-
-class Summary(object):
- """Computes summary values from the per-page-run values produced by a test.
-
- Some telemetry benchmark repeat a number of times in order to get a reliable
- measurement. The test does not have to handle merging of these runs:
- summarizer does it for you.
-
- For instance, if two pages run, 3 and 1 time respectively:
- ScalarValue(page1, 'foo', units='ms', 1)
- ScalarValue(page1, 'foo', units='ms', 1)
- ScalarValue(page1, 'foo', units='ms', 1)
- ScalarValue(page2, 'foo', units='ms', 2)
-
- Then summarizer will produce two sets of values. First,
- computed_per_page_values:
- [
- ListOfScalarValues(page1, 'foo', units='ms', [1,1,1])],
- ListOfScalarValues(page2, 'foo', units='ms', [2])]
- ]
-
- In addition, it will produce a summary value:
- [
- ListOfScalarValues(page=None, 'foo', units='ms', [1,1,1,2])]
- ]
-
- """
- def __init__(self, results, key_func=merge_values.DefaultKeyFunc):
- self._had_failures = results.had_failures
- self._computed_per_page_values = []
- self._computed_summary_values = []
- self._interleaved_computed_per_page_values_and_summaries = []
- self._key_func = key_func
- self._ComputePerPageValues(list(results.IterAllLegacyValues()))
-
- @property
- def computed_per_page_values(self):
- return self._computed_per_page_values
-
- @property
- def computed_summary_values(self):
- return self._computed_summary_values
-
- @property
- def interleaved_computed_per_page_values_and_summaries(self):
- """Returns the computed per page values and summary values interleaved.
-
- All the results for a given name are printed together. First per page
- values, then summary values.
-
- """
- return self._interleaved_computed_per_page_values_and_summaries
-
- def _ComputePerPageValues(self, all_values):
- # We will later need to determine how many values were originally created
- # for each value name, to apply a workaround meant to clean up the printf
- # output.
- num_successful_pages_for_key = defaultdict(int)
- for v in all_values:
- num_successful_pages_for_key[self._key_func(v)] += 1
-
- # By here, due to page repeat options, all_values_from_successful_pages
- # contains values of the same name not only from mulitple pages, but also
- # from the same name. So even if, for instance, only one page ran, it may
- # have run twice, producing two 'x' values.
- #
- # So, get rid of the repeated pages by merging.
- merged_page_values = merge_values.MergeLikeValuesFromSamePage(
- all_values, self._key_func)
-
- # Now we have a bunch of values, but there is only one value_name per page.
- # Suppose page1 and page2 ran, producing values x and y. We want to print
- # x for page1
- # x for page2
- # x for page1, page2 combined
- #
- # y for page1
- # y for page2
- # y for page1, page2 combined
- #
- # We already have the x values in the values array. But, we will need
- # them indexable by summary key.
- #
- # The following dict maps summary_key -> list of pages that have values of
- # that name.
- per_page_values_by_key = defaultdict(list)
- for value in merged_page_values:
- per_page_values_by_key[self._key_func(value)].append(value)
-
- # We already have the x values in the values array. But, we also need
- # the values merged across the pages. And, we will need them indexed by
- # summary key so that we can find them when printing out value names in
- # alphabetical order.
- merged_pages_value_by_key = {}
- if not self._had_failures:
- for value in merge_values.MergeLikeValuesFromDifferentPages(
- merged_page_values, self._key_func):
- value_key = self._key_func(value)
- assert value_key not in merged_pages_value_by_key
- merged_pages_value_by_key[value_key] = value
-
- keys = sorted(set([self._key_func(v) for v in merged_page_values]))
-
- # Time to walk through the values by key, printing first the page-specific
- # values and then the merged_site value.
- for key in keys:
- per_page_values = per_page_values_by_key.get(key, [])
-
- # Sort the values by their URL.
- sorted_per_page_values = list(per_page_values)
- sorted_per_page_values.sort(
- key=lambda per_page_values: per_page_values.page.name)
-
- # Output the page-specific results.
- num_successful_pages_for_this_key = (
- num_successful_pages_for_key[key])
- for per_page_value in sorted_per_page_values:
- self._ComputePerPageValue(per_page_value,
- num_successful_pages_for_this_key)
-
- # Output the combined values.
- merged_pages_value = merged_pages_value_by_key.get(key, None)
- if merged_pages_value:
- self._computed_summary_values.append(merged_pages_value)
- self._interleaved_computed_per_page_values_and_summaries.append(
- merged_pages_value)
-
- def _ComputePerPageValue(
- self, value, num_successful_pages_for_this_value_name):
- if num_successful_pages_for_this_value_name >= 1:
- # Save the result.
- self._computed_per_page_values.append(value)
- self._interleaved_computed_per_page_values_and_summaries.append(value)
diff --git a/telemetry/telemetry/value/summary_unittest.py b/telemetry/telemetry/value/summary_unittest.py
deleted file mode 100644
index e0a865d751..0000000000
--- a/telemetry/telemetry/value/summary_unittest.py
+++ /dev/null
@@ -1,399 +0,0 @@
-# Copyright 2014 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import math
-import os
-import unittest
-
-from telemetry import story
-from telemetry.internal.results import page_test_results
-from telemetry import page as page_module
-from telemetry.value import improvement_direction
-from telemetry.value import list_of_scalar_values
-from telemetry.value import scalar
-from telemetry.value import summary as summary_module
-
-
-class TestBase(unittest.TestCase):
- def setUp(self):
- story_set = story.StorySet(base_dir=os.path.dirname(__file__))
- story_set.AddStory(
- page_module.Page('http://www.bar.com/', story_set, story_set.base_dir,
- name='http://www.bar.com/'))
- story_set.AddStory(
- page_module.Page('http://www.baz.com/', story_set, story_set.base_dir,
- name='http://www.baz.com/'))
- story_set.AddStory(
- page_module.Page('http://www.foo.com/', story_set, story_set.base_dir,
- name='http://www.foo.com/'))
- self.story_set = story_set
-
- @property
- def pages(self):
- return self.story_set.stories
-
-
-class SummaryTest(TestBase):
- def testBasicSummary(self):
- page0 = self.pages[0]
- page1 = self.pages[1]
-
- results = page_test_results.PageTestResults()
-
- results.WillRunPage(page0)
- v0 = scalar.ScalarValue(page0, 'a', 'seconds', 3,
- improvement_direction=improvement_direction.UP)
- results.AddValue(v0)
- results.DidRunPage(page0)
-
- results.WillRunPage(page1)
- v1 = scalar.ScalarValue(page1, 'a', 'seconds', 7,
- improvement_direction=improvement_direction.UP)
- results.AddValue(v1)
- results.DidRunPage(page1)
-
- summary = summary_module.Summary(results)
- values = summary.interleaved_computed_per_page_values_and_summaries
-
- v0_list = list_of_scalar_values.ListOfScalarValues(
- page0, 'a', 'seconds', [3],
- improvement_direction=improvement_direction.UP)
- v1_list = list_of_scalar_values.ListOfScalarValues(
- page1, 'a', 'seconds', [7],
- improvement_direction=improvement_direction.UP)
- # Std is 0 because we only have one measurement per page.
- merged_value = list_of_scalar_values.ListOfScalarValues(
- None, 'a', 'seconds', [3, 7], std=0.0,
- improvement_direction=improvement_direction.UP)
-
- self.assertEquals(3, len(values))
- self.assertIn(v0_list, values)
- self.assertIn(v1_list, values)
- self.assertIn(merged_value, values)
-
- def testBasicSummaryWithOnlyOnePage(self):
- page0 = self.pages[0]
-
- results = page_test_results.PageTestResults()
-
- results.WillRunPage(page0)
- v0 = scalar.ScalarValue(page0, 'a', 'seconds', 3,
- improvement_direction=improvement_direction.UP)
- results.AddValue(v0)
- results.DidRunPage(page0)
-
- summary = summary_module.Summary(results)
- values = summary.interleaved_computed_per_page_values_and_summaries
-
- v0_list = list_of_scalar_values.ListOfScalarValues(
- page0, 'a', 'seconds', [3],
- improvement_direction=improvement_direction.UP)
- merged_list = list_of_scalar_values.ListOfScalarValues(
- None, 'a', 'seconds', [3],
- improvement_direction=improvement_direction.UP)
-
- self.assertEquals(2, len(values))
- self.assertIn(v0_list, values)
- self.assertIn(merged_list, values)
-
- def testBasicSummaryNonuniformResults(self):
- page0 = self.pages[0]
- page1 = self.pages[1]
- page2 = self.pages[2]
-
- results = page_test_results.PageTestResults()
- results.WillRunPage(page0)
- v0 = scalar.ScalarValue(page0, 'a', 'seconds', 3,
- improvement_direction=improvement_direction.UP)
- results.AddValue(v0)
- v1 = scalar.ScalarValue(page0, 'b', 'seconds', 10,
- improvement_direction=improvement_direction.UP)
- results.AddValue(v1)
- results.DidRunPage(page0)
-
- results.WillRunPage(page1)
- v2 = scalar.ScalarValue(page1, 'a', 'seconds', 3,
- improvement_direction=improvement_direction.UP)
- results.AddValue(v2)
- v3 = scalar.ScalarValue(page1, 'b', 'seconds', 10,
- improvement_direction=improvement_direction.UP)
- results.AddValue(v3)
- results.DidRunPage(page1)
-
- results.WillRunPage(page2)
- v4 = scalar.ScalarValue(page2, 'a', 'seconds', 7,
- improvement_direction=improvement_direction.UP)
- results.AddValue(v4)
- # Note, page[2] does not report a 'b' metric.
- results.DidRunPage(page2)
-
- summary = summary_module.Summary(results)
- values = summary.interleaved_computed_per_page_values_and_summaries
-
- v0_list = list_of_scalar_values.ListOfScalarValues(
- page0, 'a', 'seconds', [3],
- improvement_direction=improvement_direction.UP)
- v1_list = list_of_scalar_values.ListOfScalarValues(
- page0, 'b', 'seconds', [10],
- improvement_direction=improvement_direction.UP)
- v2_list = list_of_scalar_values.ListOfScalarValues(
- page1, 'a', 'seconds', [3],
- improvement_direction=improvement_direction.UP)
- v3_list = list_of_scalar_values.ListOfScalarValues(
- page1, 'b', 'seconds', [10],
- improvement_direction=improvement_direction.UP)
- v4_list = list_of_scalar_values.ListOfScalarValues(
- page2, 'a', 'seconds', [7],
- improvement_direction=improvement_direction.UP)
- # Std is 0 because we only have one measurement per page.
- a_summary = list_of_scalar_values.ListOfScalarValues(
- None, 'a', 'seconds', [3, 3, 7], std=0.0,
- improvement_direction=improvement_direction.UP)
- b_summary = list_of_scalar_values.ListOfScalarValues(
- None, 'b', 'seconds', [10, 10], std=0.0,
- improvement_direction=improvement_direction.UP)
-
- self.assertEquals(7, len(values))
- self.assertIn(v0_list, values)
- self.assertIn(v1_list, values)
- self.assertIn(v2_list, values)
- self.assertIn(v3_list, values)
- self.assertIn(v4_list, values)
- self.assertIn(a_summary, values)
- self.assertIn(b_summary, values)
-
- def testBasicSummaryPassAndFailPage(self):
- """If a page failed, only print summary for individual pages."""
- page0 = self.pages[0]
- page1 = self.pages[1]
-
- results = page_test_results.PageTestResults()
- results.WillRunPage(page0)
- v0 = scalar.ScalarValue(page0, 'a', 'seconds', 3,
- improvement_direction=improvement_direction.UP)
- results.AddValue(v0)
- results.Fail('message')
- results.DidRunPage(page0)
-
- results.WillRunPage(page1)
- v2 = scalar.ScalarValue(page1, 'a', 'seconds', 7,
- improvement_direction=improvement_direction.UP)
- results.AddValue(v2)
- results.DidRunPage(page1)
-
- summary = summary_module.Summary(results)
- values = summary.interleaved_computed_per_page_values_and_summaries
-
- v0_list = list_of_scalar_values.ListOfScalarValues(
- page0, 'a', 'seconds', [3],
- improvement_direction=improvement_direction.UP)
- v2_list = list_of_scalar_values.ListOfScalarValues(
- page1, 'a', 'seconds', [7],
- improvement_direction=improvement_direction.UP)
-
- self.assertEquals(2, len(values))
- self.assertIn(v0_list, values)
- self.assertIn(v2_list, values)
-
- def testRepeatedPagesetOneIterationOnePageFails(self):
- """Page fails on one iteration, no averaged results should print."""
- page0 = self.pages[0]
- page1 = self.pages[1]
-
- results = page_test_results.PageTestResults()
- results.WillRunPage(page0)
- v0 = scalar.ScalarValue(page0, 'a', 'seconds', 3,
- improvement_direction=improvement_direction.UP)
- results.AddValue(v0)
- results.DidRunPage(page0)
-
- results.WillRunPage(page1)
- v1 = scalar.ScalarValue(page1, 'a', 'seconds', 7,
- improvement_direction=improvement_direction.UP)
- results.AddValue(v1)
- results.Fail('message')
- results.DidRunPage(page1)
-
- results.WillRunPage(page0)
- v3 = scalar.ScalarValue(page0, 'a', 'seconds', 4,
- improvement_direction=improvement_direction.UP)
- results.AddValue(v3)
- results.DidRunPage(page0)
-
- results.WillRunPage(page1)
- v4 = scalar.ScalarValue(page1, 'a', 'seconds', 8,
- improvement_direction=improvement_direction.UP)
- results.AddValue(v4)
- results.DidRunPage(page1)
-
- summary = summary_module.Summary(results)
- values = summary.interleaved_computed_per_page_values_and_summaries
-
- page0_aggregated = list_of_scalar_values.ListOfScalarValues(
- page0, 'a', 'seconds', [3, 4],
- improvement_direction=improvement_direction.UP)
- page1_aggregated = list_of_scalar_values.ListOfScalarValues(
- page1, 'a', 'seconds', [7, 8],
- improvement_direction=improvement_direction.UP)
-
- self.assertEquals(2, len(values))
- self.assertIn(page0_aggregated, values)
- self.assertIn(page1_aggregated, values)
-
- def testRepeatedPages(self):
- page0 = self.pages[0]
- page1 = self.pages[1]
-
- results = page_test_results.PageTestResults()
- results.WillRunPage(page0)
- v0 = scalar.ScalarValue(page0, 'a', 'seconds', 3,
- improvement_direction=improvement_direction.UP)
- results.AddValue(v0)
- results.DidRunPage(page0)
-
- results.WillRunPage(page0)
- v2 = scalar.ScalarValue(page0, 'a', 'seconds', 4,
- improvement_direction=improvement_direction.UP)
- results.AddValue(v2)
- results.DidRunPage(page0)
-
- results.WillRunPage(page1)
- v1 = scalar.ScalarValue(page1, 'a', 'seconds', 7,
- improvement_direction=improvement_direction.UP)
- results.AddValue(v1)
- results.DidRunPage(page1)
-
- results.WillRunPage(page1)
- v3 = scalar.ScalarValue(page1, 'a', 'seconds', 8,
- improvement_direction=improvement_direction.UP)
- results.AddValue(v3)
- results.DidRunPage(page1)
-
- summary = summary_module.Summary(results)
- values = summary.interleaved_computed_per_page_values_and_summaries
-
- page0_aggregated = list_of_scalar_values.ListOfScalarValues(
- page0, 'a', 'seconds', [3, 4],
- improvement_direction=improvement_direction.UP)
- page1_aggregated = list_of_scalar_values.ListOfScalarValues(
- page1, 'a', 'seconds', [7, 8],
- improvement_direction=improvement_direction.UP)
- # Std is computed using pooled standard deviation.
- a_summary = list_of_scalar_values.ListOfScalarValues(
- None, 'a', 'seconds', [3, 4, 7, 8], std=math.sqrt(0.5),
- improvement_direction=improvement_direction.UP)
-
- self.assertEquals(3, len(values))
- self.assertIn(page0_aggregated, values)
- self.assertIn(page1_aggregated, values)
- self.assertIn(a_summary, values)
-
- def testPageRunsTwice(self):
- page0 = self.pages[0]
-
- results = page_test_results.PageTestResults()
-
- results.WillRunPage(page0)
- v0 = scalar.ScalarValue(page0, 'b', 'seconds', 2,
- improvement_direction=improvement_direction.UP)
- results.AddValue(v0)
- results.DidRunPage(page0)
-
- results.WillRunPage(page0)
- v1 = scalar.ScalarValue(page0, 'b', 'seconds', 3,
- improvement_direction=improvement_direction.UP)
- results.AddValue(v1)
- results.DidRunPage(page0)
-
- summary = summary_module.Summary(results)
- values = summary.interleaved_computed_per_page_values_and_summaries
-
- page0_aggregated = list_of_scalar_values.ListOfScalarValues(
- page0, 'b', 'seconds', [2, 3],
- improvement_direction=improvement_direction.UP)
- b_summary = list_of_scalar_values.ListOfScalarValues(
- None, 'b', 'seconds', [2, 3],
- improvement_direction=improvement_direction.UP)
-
- self.assertEquals(2, len(values))
- self.assertIn(page0_aggregated, values)
- self.assertIn(b_summary, values)
-
- def testListValue(self):
- page0 = self.pages[0]
- page1 = self.pages[1]
-
- results = page_test_results.PageTestResults()
-
- results.WillRunPage(page0)
- v0 = list_of_scalar_values.ListOfScalarValues(
- page0, 'b', 'seconds', [2, 2],
- improvement_direction=improvement_direction.UP)
- results.AddValue(v0)
- results.DidRunPage(page0)
-
- results.WillRunPage(page1)
- v1 = list_of_scalar_values.ListOfScalarValues(
- page1, 'b', 'seconds', [3, 3],
- improvement_direction=improvement_direction.UP)
- results.AddValue(v1)
- results.DidRunPage(page1)
-
- summary = summary_module.Summary(results)
- values = summary.interleaved_computed_per_page_values_and_summaries
-
- b_summary = list_of_scalar_values.ListOfScalarValues(
- None, 'b', 'seconds', [2, 2, 3, 3], std=0.0,
- improvement_direction=improvement_direction.UP)
-
- self.assertEquals(3, len(values))
- self.assertIn(v0, values)
- self.assertIn(v1, values)
- self.assertIn(b_summary, values)
-
- def testSummaryUsesKeyFunc(self):
- page0 = self.pages[0]
- page1 = self.pages[1]
-
- results = page_test_results.PageTestResults()
-
- results.WillRunPage(page0)
- v0 = scalar.ScalarValue(page0, 'a', 'seconds', 20,
- improvement_direction=improvement_direction.UP)
- results.AddValue(v0)
-
- v1 = scalar.ScalarValue(page0, 'b', 'seconds', 42,
- improvement_direction=improvement_direction.UP)
- results.AddValue(v1)
- results.DidRunPage(page0)
-
- results.WillRunPage(page1)
- v2 = scalar.ScalarValue(page1, 'a', 'seconds', 20,
- improvement_direction=improvement_direction.UP)
- results.AddValue(v2)
-
- v3 = scalar.ScalarValue(page1, 'b', 'seconds', 42,
- improvement_direction=improvement_direction.UP)
- results.AddValue(v3)
- results.DidRunPage(page1)
-
- summary = summary_module.Summary(
- results, key_func=lambda v: True)
- values = summary.interleaved_computed_per_page_values_and_summaries
-
- v0_list = list_of_scalar_values.ListOfScalarValues(
- page0, 'a', 'seconds', [20, 42],
- improvement_direction=improvement_direction.UP)
- v2_list = list_of_scalar_values.ListOfScalarValues(
- page1, 'a', 'seconds', [20, 42],
- improvement_direction=improvement_direction.UP)
- # Std is computed using pooled standard deviation.
- merged_value = list_of_scalar_values.ListOfScalarValues(
- None, 'a', 'seconds', [20, 42, 20, 42], std=math.sqrt(242.0),
- improvement_direction=improvement_direction.UP)
-
- self.assertEquals(3, len(values))
- self.assertIn(v0_list, values)
- self.assertIn(v2_list, values)
- self.assertIn(merged_value, values)
diff --git a/telemetry/telemetry/web_perf/timeline_based_measurement_unittest.py b/telemetry/telemetry/web_perf/timeline_based_measurement_unittest.py
index 9a04101899..949b16a13e 100644
--- a/telemetry/telemetry/web_perf/timeline_based_measurement_unittest.py
+++ b/telemetry/telemetry/web_perf/timeline_based_measurement_unittest.py
@@ -4,201 +4,69 @@
import shutil
import tempfile
+import unittest
+from telemetry import benchmark
from telemetry import decorators
-from telemetry.page import page as page_module
+from telemetry.internal.results import results_options
+from telemetry.internal import story_runner
from telemetry.testing import options_for_unittests
-from telemetry.testing import page_test_test_case
-from telemetry.timeline import chrome_trace_category_filter
-from telemetry.web_perf import timeline_based_measurement as tbm_module
-from tracing.value import histogram_set
-from tracing.value.diagnostics import date_range
-from tracing.value.diagnostics import generic_set
-from tracing.value.diagnostics import reserved_infos
+from telemetry.testing import test_stories
+from telemetry.web_perf import timeline_based_measurement
-class TestTimelinebasedMeasurementPage(page_module.Page):
- """A page used to test TBMv2 measurements."""
+class TestTimelineBenchmark(benchmark.Benchmark):
+ def __init__(self, story_run_side_effect=None):
+ super(TestTimelineBenchmark, self).__init__()
+ self._story_run_side_effect = story_run_side_effect
- def __init__(self, story_set, base_dir, trigger_animation=False,
- trigger_jank=False, trigger_slow=False,
- trigger_scroll_gesture=False, measure_memory=False,
- additional_metrics=None):
- super(TestTimelinebasedMeasurementPage, self).__init__(
- 'file://interaction_enabled_page.html', story_set, base_dir,
- name='interaction_enabled_page.html')
- self._trigger_animation = trigger_animation
- self._trigger_jank = trigger_jank
- self._trigger_slow = trigger_slow
- self._trigger_scroll_gesture = trigger_scroll_gesture
- self._measure_memory = measure_memory
- self._additional_metrics = additional_metrics
+ def CreateStorySet(self, _):
+ return test_stories.SinglePageStorySet(
+ story_run_side_effect=self._story_run_side_effect)
- def RunPageInteractions(self, action_runner):
- if self._measure_memory:
- action_runner.MeasureMemory()
- if self._trigger_animation:
- action_runner.TapElement('#animating-button')
- action_runner.WaitForJavaScriptCondition('window.animationDone')
- if self._trigger_jank:
- action_runner.TapElement('#jank-button')
- action_runner.WaitForJavaScriptCondition('window.jankScriptDone')
- if self._trigger_slow:
- action_runner.TapElement('#slow-button')
- action_runner.WaitForJavaScriptCondition('window.slowScriptDone')
- if self._trigger_scroll_gesture:
- with action_runner.CreateGestureInteraction('Scroll'):
- action_runner.ScrollPage()
-
- def GetExtraTracingMetrics(self):
- return self._additional_metrics or []
-
-class FailedTimelinebasedMeasurementPage(page_module.Page):
-
- def __init__(self, story_set, base_dir):
- super(FailedTimelinebasedMeasurementPage, self).__init__(
- 'file://interaction_enabled_page.html', story_set, base_dir,
- name='interaction_enabled_page.html')
+ def CreateCoreTimelineBasedMeasurementOptions(self):
+ options = timeline_based_measurement.Options()
+ options.config.enable_chrome_trace = True
+ options.SetTimelineBasedMetrics(['sampleMetric'])
+ return options
- def RunPageInteractions(self, action_runner):
- action_runner.TapElement('#does-not-exist')
+ @classmethod
+ def Name(cls):
+ return 'test_timeline_benchmark'
-class TimelineBasedMeasurementTest(page_test_test_case.PageTestTestCase):
- """Tests for TimelineBasedMetrics (TBMv2), i.e. //tracing/tracing/metrics."""
+class TimelineBasedMeasurementTest(unittest.TestCase):
+ """Tests for TimelineBasedMeasurement which allows to record traces."""
def setUp(self):
- self._options = options_for_unittests.GetRunOptions(
+ self.options = options_for_unittests.GetRunOptions(
output_dir=tempfile.mkdtemp())
def tearDown(self):
- shutil.rmtree(self._options.output_dir)
-
- @decorators.Disabled('chromeos')
- @decorators.Disabled('win') # crbug.com/956812
- @decorators.Isolated
- def testTraceCaptureUponFailure(self):
- story_set = self.CreateEmptyPageSet()
- story_set.AddStory(
- FailedTimelinebasedMeasurementPage(story_set, story_set.base_dir))
-
- options = tbm_module.Options()
- options.config.enable_chrome_trace = True
- options.SetTimelineBasedMetrics(['sampleMetric'])
- tbm = tbm_module.TimelineBasedMeasurement(options)
+ shutil.rmtree(self.options.output_dir)
- results = self.RunMeasurement(tbm, story_set, run_options=self._options)
+ def RunBenchmarkAndReadResults(self, test_benchmark):
+ story_runner.RunBenchmark(test_benchmark, self.options)
+ test_results = results_options.ReadTestResults(
+ self.options.intermediate_dir)
+ self.assertEqual(len(test_results), 1)
+ return test_results[0]
- self.assertTrue(results.had_failures)
- runs = list(results.IterRunsWithTraces())
- self.assertEquals(1, len(runs))
-
- # Fails on chromeos: crbug.com/483212
- @decorators.Disabled('chromeos')
@decorators.Isolated
- def testTBM2ForSmoke(self):
- ps = self.CreateEmptyPageSet()
- ps.AddStory(TestTimelinebasedMeasurementPage(
- ps, ps.base_dir,
- additional_metrics=['sampleMetric']))
-
- options = tbm_module.Options()
- options.config.enable_chrome_trace = True
+ def testTraceCaptureUponSuccess(self):
+ test_benchmark = TestTimelineBenchmark()
+ results = self.RunBenchmarkAndReadResults(test_benchmark)
+ self.assertEqual(results['status'], 'PASS')
+ # Assert that we can find a Chrome trace.
+ self.assertTrue(any(
+ n.startswith('trace/traceEvents') for n in results['outputArtifacts']))
- tbm = tbm_module.TimelineBasedMeasurement(options)
- results = self.RunMeasurement(tbm, ps, self._options)
- self.assertFalse(results.had_failures)
- tbm_metrics = []
- for story_run in results.IterStoryRuns():
- tbm_metrics += story_run.tbm_metrics
- self.assertEqual(tbm_metrics, ['sampleMetric'])
- histogram_dicts = results.AsHistogramDicts()
- hs = histogram_set.HistogramSet()
- hs.ImportDicts(histogram_dicts)
- self.assertEquals(4, len(hs))
- hist = hs.GetFirstHistogram()
- benchmarks = hist.diagnostics.get(reserved_infos.BENCHMARKS.name)
- self.assertIsInstance(benchmarks, generic_set.GenericSet)
- self.assertEquals(1, len(benchmarks))
- self.assertEquals(page_test_test_case.BENCHMARK_NAME,
- list(benchmarks)[0])
- stories = hist.diagnostics.get(reserved_infos.STORIES.name)
- self.assertIsInstance(stories, generic_set.GenericSet)
- self.assertEquals(1, len(stories))
- self.assertEquals('interaction_enabled_page.html', list(stories)[0])
- repeats = hist.diagnostics.get(reserved_infos.STORYSET_REPEATS.name)
- self.assertIsInstance(repeats, generic_set.GenericSet)
- self.assertEquals(1, len(repeats))
- self.assertEquals(0, list(repeats)[0])
- hist = hs.GetFirstHistogram()
- trace_start = hist.diagnostics.get(reserved_infos.TRACE_START.name)
- self.assertIsInstance(trace_start, date_range.DateRange)
-
- v_foo = results.FindAllPageSpecificValuesNamed('foo_avg')
- self.assertEquals(len(v_foo), 1)
- self.assertEquals(v_foo[0].value, 50)
- self.assertIsNotNone(v_foo[0].page)
-
- @decorators.Disabled('reference')
- @decorators.Disabled('win', 'chromeos') # https://crbug.com/765114
- @decorators.Disabled('mac', 'linux') # https://crbug.com/956812
@decorators.Isolated
- def testHeapProfilerForSmoke(self):
- story_set = self.CreateEmptyPageSet()
- story_set.AddStory(TestTimelinebasedMeasurementPage(
- story_set, story_set.base_dir, measure_memory=True, trigger_slow=True))
-
- cat_filter = chrome_trace_category_filter.ChromeTraceCategoryFilter(
- filter_string='-*,disabled-by-default-memory-infra')
- options = tbm_module.Options(overhead_level=cat_filter)
- options.config.enable_chrome_trace = True
- options.SetTimelineBasedMetrics(['memoryMetric'])
- tbm = tbm_module.TimelineBasedMeasurement(options)
-
- self._options.browser_options.AppendExtraBrowserArgs(
- ['--memlog=all', '--memlog-sampling', '--memlog-stack-mode=pseudo'])
- results = self.RunMeasurement(tbm, story_set, run_options=self._options)
-
- self.assertFalse(results.had_failures)
-
- DUMP_COUNT_METRIC = 'memory:chrome:all_processes:dump_count'
- dumps_detailed = results.FindAllPageSpecificValuesNamed(
- DUMP_COUNT_METRIC + ':detailed_avg')
- dumps_heap_profiler = results.FindAllPageSpecificValuesNamed(
- DUMP_COUNT_METRIC + ':heap_profiler_avg')
- self.assertEquals(1, len(dumps_detailed))
- self.assertEquals(1, len(dumps_heap_profiler))
- self.assertGreater(dumps_detailed[0].value, 0)
- self.assertEquals(dumps_detailed[0].value, dumps_heap_profiler[0].value)
-
- # TODO(ksakamoto): enable this in reference once the reference build of
- # telemetry is updated.
- # Disabled on all platforms due to flakiness: https://crbug.com/947269.
- @decorators.Disabled('reference')
- @decorators.Disabled('all')
- def testFirstPaintMetricSmoke(self):
- story_set = self.CreateEmptyPageSet()
- story_set.AddStory(
- TestTimelinebasedMeasurementPage(story_set, story_set.base_dir))
-
- cat_filter = chrome_trace_category_filter.ChromeTraceCategoryFilter(
- filter_string='*,blink.console,navigation,blink.user_timing,loading,' +
- 'devtools.timeline,disabled-by-default-blink.debug.layout')
-
- options = tbm_module.Options(overhead_level=cat_filter)
- options.SetTimelineBasedMetrics(['loadingMetric'])
-
- tbm = tbm_module.TimelineBasedMeasurement(options)
- results = self.RunMeasurement(tbm, story_set, run_options=self._options)
-
- self.assertFalse(results.had_failures)
- v_ttfcp_max = results.FindAllPageSpecificValuesNamed(
- 'timeToFirstContentfulPaint_max')
- self.assertEquals(len(v_ttfcp_max), 1)
- self.assertIsNotNone(v_ttfcp_max[0].page)
- self.assertGreater(v_ttfcp_max[0].value, 0)
-
- v_ttfmp_max = results.FindAllPageSpecificValuesNamed(
- 'timeToFirstMeaningfulPaint_max')
- self.assertEquals(len(v_ttfmp_max), 1)
- self.assertIsNotNone(v_ttfmp_max[0].page)
+ def testTraceCaptureUponFailure(self):
+ test_benchmark = TestTimelineBenchmark(
+ story_run_side_effect=lambda a: a.TapElement('#does-not-exist'))
+ results = self.RunBenchmarkAndReadResults(test_benchmark)
+ self.assertEqual(results['status'], 'FAIL')
+ # Assert that we can find a Chrome trace.
+ self.assertTrue(any(
+ n.startswith('trace/traceEvents') for n in results['outputArtifacts']))
diff --git a/third_party/snap-it/tests/tests.js b/third_party/snap-it/tests/tests.js
index ae80127100..fef6e8b3f2 100644
--- a/third_party/snap-it/tests/tests.js
+++ b/third_party/snap-it/tests/tests.js
@@ -211,7 +211,7 @@ QUnit.test('processTree: background-image with complex multiple images and local
'url("local/targetId.png"),' +
'url("data:image/gif;base64,R0lGODlhAQABAIAAAA"),' +
'url("local/targetId.png"),' +
- 'linear-gradient(to right top,red,rgb(240,109,6)),' +
+ 'linear-gradient(to right top,rgb(255,0,0),rgb(240,109,6)),' +
'url("local/targetId.png")');
});
diff --git a/third_party/typ/typ/artifacts.py b/third_party/typ/typ/artifacts.py
index c23b0041bf..ee36384b45 100644
--- a/third_party/typ/typ/artifacts.py
+++ b/third_party/typ/typ/artifacts.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import contextlib
import os
import sys
@@ -21,9 +20,10 @@
else:
import urllib.parse as urlparse
+from typ.host import Host
class Artifacts(object):
- def __init__(self, output_dir, iteration=0, test_name='',
+ def __init__(self, output_dir, host, iteration=0, test_name='',
intial_results_base_dir=False, repeat_tests=False):
"""Creates an artifact results object.
@@ -51,6 +51,16 @@ def __init__(self, output_dir, iteration=0, test_name='',
The original design doc for this class can be found at
https://docs.google.com/document/d/1gChmrnkHT8_MuSCKlGo-hGPmkEzg425E8DASX57ODB0/edit?usp=sharing,
open to all chromium.org accounts.
+
+ args:
+ output_dir: Output directory where artifacts will be saved.
+ iteration: Retry attempt number for test.
+ test_name: Name of test, which will be used to create a sub directory for test artifacts
+ intial_results_base_dir: Flag to create a sub directory for initial results
+ repeat_tests: Flag to signal that tests are repeated and therefore the verification to prevent
+ overwriting of artifacts should be skipped
+ file_manager: File manager object which is supplied by the test runner. The object needs to support
+ the exists, open, maybe_make_directory, dirname and join member functions.
"""
self._output_dir = output_dir
self._iteration = iteration
@@ -61,17 +71,28 @@ def __init__(self, output_dir, iteration=0, test_name='',
self._artifact_set = set()
self._intial_results_base_dir = intial_results_base_dir
self._repeat_tests = repeat_tests
+ self._host = host
- def artifacts_sub_directory(self):
+ def ArtifactsSubDirectory(self):
sub_dir = self._test_base_dir
if self._iteration:
- sub_dir = os.path.join(sub_dir, 'retry_%d' % self._iteration)
+ sub_dir = self._host.join(sub_dir, 'retry_%d' % self._iteration)
elif self._intial_results_base_dir:
- sub_dir = os.path.join(sub_dir, 'initial')
+ sub_dir = self._host.join(sub_dir, 'initial')
return sub_dir
- @contextlib.contextmanager
- def CreateArtifact(self, artifact_name, file_relative_path):
+ def AddArtifact(self, artifact_name, path, raise_exception_for_duplicates=True):
+ if path in self.artifacts.get(artifact_name, []):
+ if not self._repeat_tests and raise_exception_for_duplicates:
+ raise ValueError('%s already exists in artifacts list for %s.' % (
+ path, artifact_name))
+ else:
+ return
+ self.artifacts.setdefault(artifact_name, []).append(path)
+
+ def CreateArtifact(
+ self, artifact_name, file_relative_path, data, force_overwrite=False,
+ write_as_text=False):
"""Creates an artifact and yields a handle to its File object.
Args:
@@ -79,20 +100,22 @@ def CreateArtifact(self, artifact_name, file_relative_path):
"reftest_mismatch_actual" or "screenshot".
"""
self._AssertOutputDir()
- file_relative_path = os.path.join(
- self.artifacts_sub_directory(), file_relative_path)
- abs_artifact_path = os.path.join(self._output_dir, file_relative_path)
-
- if not os.path.exists(os.path.dirname(abs_artifact_path)):
- os.makedirs(os.path.dirname(abs_artifact_path))
-
- if os.path.exists(abs_artifact_path) and not self._repeat_tests:
- raise ValueError('%s already exists.' % (abs_artifact_path))
-
- self.artifacts.setdefault(artifact_name, []).append(file_relative_path)
-
- with open(abs_artifact_path, 'wb') as f:
- yield f
+ file_relative_path = self._host.join(
+ self.ArtifactsSubDirectory(), file_relative_path)
+ abs_artifact_path = self._host.join(self._output_dir, file_relative_path)
+
+ if (not self._repeat_tests and
+ not force_overwrite and self._host.exists(abs_artifact_path)):
+ raise ValueError('%s already exists.' % abs_artifact_path)
+
+ self._host.maybe_make_directory(self._host.dirname(abs_artifact_path))
+
+ if file_relative_path not in self.artifacts.get(artifact_name, []):
+ self.AddArtifact(artifact_name, file_relative_path)
+ if write_as_text:
+ self._host.write_text_file(abs_artifact_path, data)
+ else:
+ self._host.write_binary_file(abs_artifact_path, data)
def CreateLink(self, artifact_name, path):
"""Creates a special link/URL artifact.
diff --git a/third_party/typ/typ/fakes/host_fake.py b/third_party/typ/typ/fakes/host_fake.py
index a83e62bb83..a99d27c906 100644
--- a/third_party/typ/typ/fakes/host_fake.py
+++ b/third_party/typ/typ/fakes/host_fake.py
@@ -162,7 +162,7 @@ def join(self, *comps):
p = '/'.join(comps)
return p
- def maybe_mkdir(self, *comps):
+ def maybe_make_directory(self, *comps):
path = self.abspath(self.join(*comps))
if path not in self.dirs:
self.dirs.add(path)
@@ -239,7 +239,7 @@ def write_text_file(self, path, contents):
def _write(self, path, contents):
full_path = self.abspath(path)
- self.maybe_mkdir(self.dirname(full_path))
+ self.maybe_make_directory(self.dirname(full_path))
self.files[full_path] = contents
self.written_files[full_path] = contents
diff --git a/third_party/typ/typ/host.py b/third_party/typ/typ/host.py
index 92650e693a..a1d5953dde 100644
--- a/third_party/typ/typ/host.py
+++ b/third_party/typ/typ/host.py
@@ -129,7 +129,7 @@ def isfile(self, *comps):
def join(self, *comps):
return os.path.join(*comps)
- def maybe_mkdir(self, *comps):
+ def maybe_make_directory(self, *comps):
path = self.abspath(self.join(*comps))
if not self.exists(path):
os.makedirs(path)
diff --git a/third_party/typ/typ/json_results.py b/third_party/typ/typ/json_results.py
index c3a784115a..459451688b 100644
--- a/third_party/typ/typ/json_results.py
+++ b/third_party/typ/typ/json_results.py
@@ -196,11 +196,11 @@ def _results_for_test(test_name, results):
value['expected'] = ' '.join(sorted(r.expected))
# Handle artifacts
- if not r.artifacts or not r.artifacts.artifacts:
+ if not r.artifacts:
continue
if 'artifacts' not in value:
value['artifacts'] = {}
- for artifact_name, artifacts in r.artifacts.artifacts.items():
+ for artifact_name, artifacts in r.artifacts.items():
value['artifacts'].setdefault(artifact_name, []).extend(artifacts)
if not actuals: # pragma: untested
diff --git a/third_party/typ/typ/runner.py b/third_party/typ/typ/runner.py
index 0a34cb32fb..0111ea1cc6 100644
--- a/third_party/typ/typ/runner.py
+++ b/third_party/typ/typ/runner.py
@@ -1046,7 +1046,7 @@ def _run_one_test(child, test_input):
err=err, pid=pid), False)
art = artifacts.Artifacts(
- child.artifact_output_dir, test_input.iteration, test_name)
+ child.artifact_output_dir, h, test_input.iteration, test_name)
test_case = tests[0]
if isinstance(test_case, TypTestCase):
@@ -1071,7 +1071,7 @@ def _run_one_test(child, test_input):
return (_result_from_test_result(test_result, test_name, started, took, out,
err, child.worker_num, pid,
expected_results, child.has_expectations,
- art),
+ art.artifacts),
should_retry_on_failure)
diff --git a/third_party/typ/typ/test_case.py b/third_party/typ/typ/test_case.py
index 9610ace7ac..2c7f942879 100644
--- a/third_party/typ/typ/test_case.py
+++ b/third_party/typ/typ/test_case.py
@@ -39,7 +39,7 @@ def _write_files(self, host, files):
for path, contents in list(files.items()):
dirname = host.dirname(path)
if dirname:
- host.maybe_mkdir(dirname)
+ host.maybe_make_directory(dirname)
host.write_text_file(path, contents)
def _read_files(self, host, tmpdir):
diff --git a/third_party/typ/typ/tests/artifacts_test.py b/third_party/typ/typ/tests/artifacts_test.py
index e81e7c8661..18a4b8e2ea 100644
--- a/third_party/typ/typ/tests/artifacts_test.py
+++ b/third_party/typ/typ/tests/artifacts_test.py
@@ -18,112 +18,134 @@
import unittest
from typ import artifacts
-
+from typ.fakes.host_fake import FakeHost
class ArtifactsArtifactCreationTests(unittest.TestCase):
- def _VerifyPathAndContents(
- self, output_dir, file_rel_path, contents, iteration=0, test_base_dir='',
- intial_results_base_dir=False):
- path = output_dir
- if test_base_dir:
- path = os.path.join(path, test_base_dir)
- if iteration:
- path = os.path.join(path, 'retry_%d' % iteration)
- elif intial_results_base_dir:
- path = os.path.join(path, 'initial')
- path = os.path.join(path, file_rel_path)
- self.assertTrue(os.path.exists(path))
- with open(path, 'r') as f:
- self.assertEqual(f.read(), contents)
-
def test_create_artifact_writes_to_disk_iteration_0_no_test_dir(self):
- """Tests CreateArtifact will write to disk at the correct location."""
- tempdir = tempfile.mkdtemp()
- try:
- ar = artifacts.Artifacts(tempdir)
- file_rel_path = os.path.join('stdout', 'text.txt')
- with ar.CreateArtifact('artifact_name', file_rel_path) as f:
- f.write(b'contents')
- self._VerifyPathAndContents(tempdir, file_rel_path, b'contents')
- finally:
- shutil.rmtree(tempdir)
+ host = FakeHost()
+ output_dir = '%stmp' % host.sep
+ ar = artifacts.Artifacts(output_dir, host)
+ file_rel_path = host.join('stdout', 'test.jpg')
+ ar.CreateArtifact('artifact_name', file_rel_path, b'contents')
+ self.assertEqual(
+ host.read_binary_file(
+ host.join(output_dir, 'stdout', 'test.jpg')),
+ b'contents')
def test_create_artifact_writes_to_disk_iteration_1_no_test_dir(self):
- """Tests CreateArtifact will write to disk at the correct location."""
- tempdir = tempfile.mkdtemp()
- try:
- ar = artifacts.Artifacts(tempdir, iteration=1)
- file_rel_path = os.path.join('stdout', 'text.txt')
- with ar.CreateArtifact('artifact_name', file_rel_path) as f:
- f.write(b'contents')
- self._VerifyPathAndContents(tempdir, file_rel_path, b'contents', iteration=1)
- finally:
- shutil.rmtree(tempdir)
+ host = FakeHost()
+ output_dir = '%stmp' % host.sep
+ ar = artifacts.Artifacts(
+ output_dir, host, iteration=1)
+ file_rel_path = host.join('stdout', 'test.jpg')
+ ar.CreateArtifact('artifact_name', file_rel_path, b'contents')
+ self.assertEqual(
+ host.read_binary_file(
+ host.join(output_dir, 'retry_1', 'stdout', 'test.jpg')),
+ b'contents')
def test_create_artifact_writes_to_disk_iteration_1_test_dir(self):
- """Tests CreateArtifact will write to disk at the correct location."""
- tempdir = tempfile.mkdtemp()
- try:
- ar = artifacts.Artifacts(tempdir, iteration=1, test_name='a.b.c')
- file_rel_path = os.path.join('stdout', 'text.txt')
- with ar.CreateArtifact('artifact_name', file_rel_path) as f:
- f.write(b'contents')
- self._VerifyPathAndContents(
- tempdir, file_rel_path, b'contents', iteration=1, test_base_dir='a.b.c')
- finally:
- shutil.rmtree(tempdir)
-
- def test_create_artifact_overwriting_artifact_raises_value_error(self):
- """Tests CreateArtifact will write to disk at the correct location."""
- tempdir = tempfile.mkdtemp()
- try:
- ar = artifacts.Artifacts(tempdir, iteration=1, test_name='a.b.c')
- file_rel_path = os.path.join('stdout', 'text.txt')
- with ar.CreateArtifact('artifact_name', file_rel_path) as f:
- f.write(b'contents')
- ar = artifacts.Artifacts(tempdir, iteration=0, test_name='a.b.c')
- file_rel_path = os.path.join('retry_1', 'stdout', 'text.txt')
- with self.assertRaises(ValueError) as ve:
- with ar.CreateArtifact('artifact_name', file_rel_path) as f:
- f.write(b'contents')
- self.assertIn('already exists.', str(ve.exception))
- finally:
- shutil.rmtree(tempdir)
+ host = FakeHost()
+ output_dir = '%stmp' % host.sep
+ ar = artifacts.Artifacts(
+ output_dir, host, iteration=1, test_name='a.b.c')
+ file_rel_path = host.join('stdout', 'test.jpg')
+ ar.CreateArtifact('artifact_name', file_rel_path, b'contents')
+ self.assertEqual(
+ host.read_binary_file(
+ host.join(output_dir, 'a.b.c', 'retry_1', 'stdout', 'test.jpg')),
+ b'contents')
+
+ def test_overwriting_artifact_raises_value_error(self):
+ host = FakeHost()
+ output_dir = '%stmp' % host.sep
+ ar = artifacts.Artifacts(
+ output_dir, host, iteration=0, test_name='retry_1')
+ file_rel_path = host.join('stdout', 'test.jpg')
+ ar.CreateArtifact('artifact_name', file_rel_path, b'contents')
+ ar1 = artifacts.Artifacts(
+ output_dir, host, iteration=1)
+ with self.assertRaises(ValueError) as ve:
+ ar1.CreateArtifact('artifact_name', file_rel_path, b'overwritten contents')
+ self.assertIn('already exists', str(ve.exception))
+
+ def test_force_overwriting_artifact_does_not_raise_error(self):
+ host = FakeHost()
+ output_dir = '%stmp' % host.sep
+ ar = artifacts.Artifacts(
+ output_dir, host, iteration=0, test_name='a.b.c', intial_results_base_dir=True)
+ file_rel_path = host.join('stdout', 'test.txt')
+ ar.CreateArtifact('artifact_name', file_rel_path, 'contents',
+ write_as_text=True)
+ self.assertEqual(
+ host.read_text_file(
+ host.join(output_dir, 'a.b.c', 'initial', 'stdout', 'test.txt')),
+ 'contents')
+ ar.CreateArtifact('artifact_name', file_rel_path, 'overwritten contents',
+ force_overwrite=True, write_as_text=True)
+ self.assertEqual(
+ host.read_text_file(
+ host.join(output_dir, 'a.b.c', 'initial', 'stdout', 'test.txt')),
+ 'overwritten contents')
def test_create_artifact_writes_to_disk_initial_results_dir(self):
- """Tests CreateArtifact will write to disk at the correct location."""
- tempdir = tempfile.mkdtemp()
- try:
- ar = artifacts.Artifacts(
- tempdir, iteration=0, test_name='a.b.c', intial_results_base_dir=True)
- file_rel_path = os.path.join('stdout', 'text.txt')
- with ar.CreateArtifact('artifact_name', file_rel_path) as f:
- f.write(b'contents')
- self._VerifyPathAndContents(
- tempdir, file_rel_path, b'contents', iteration=0, test_base_dir='a.b.c',
- intial_results_base_dir=True)
- finally:
- shutil.rmtree(tempdir)
+ host = FakeHost()
+ output_dir = '%stmp' % host.sep
+ ar = artifacts.Artifacts(
+ output_dir, host, iteration=0, test_name='a.b.c', intial_results_base_dir=True)
+ file_rel_path = host.join('stdout', 'test.jpg')
+ ar.CreateArtifact('artifact_name', file_rel_path, b'contents')
+ self.assertEqual(
+ host.read_binary_file(host.join(output_dir, 'a.b.c', 'initial', 'stdout', 'test.jpg')),
+ b'contents')
+
+ def test_file_manager_writes_file(self):
+ host = FakeHost()
+ output_dir = '%stmp' % host.sep
+ ar = artifacts.Artifacts(output_dir, host, iteration=0)
+ file_path = host.join('failures', 'stderr.txt')
+ ar.CreateArtifact('artifact_name', file_path, 'exception raised',
+ write_as_text=True)
+ self.assertEqual(
+ host.read_text_file(file_path), 'exception raised')
+
+ def test_duplicate_artifact_raises_error_when_added_to_list(self):
+ host = FakeHost()
+ output_dir = '%stmp' % host.sep
+ ar = artifacts.Artifacts(output_dir, host, iteration=0)
+ ar.AddArtifact('artifact_name', 'foo.txt')
+ with self.assertRaises(ValueError) as ve:
+ ar.AddArtifact('artifact_name', 'foo.txt')
+ self.assertIn('already exists', str(ve.exception))
+
+ def test_dont_raise_value_error_for_dupl_in_add_artifacts(self):
+ host = FakeHost()
+ output_dir = '%stmp' % host.sep
+ ar = artifacts.Artifacts(output_dir, host, iteration=0)
+ ar.AddArtifact('artifact_name', 'foo.txt')
+ ar.AddArtifact('artifact_name', 'foo.txt',
+ raise_exception_for_duplicates=False)
+ self.assertEqual(ar.artifacts['artifact_name'], ['foo.txt'])
class ArtifactsLinkCreationTests(unittest.TestCase):
def test_create_link(self):
- ar = artifacts.Artifacts(None)
+ ar = artifacts.Artifacts('', FakeHost())
ar.CreateLink('link', 'https://testsite.com')
self.assertEqual(ar.artifacts, {'link': ['https://testsite.com']})
def test_create_link_invalid_url(self):
- ar = artifacts.Artifacts(None)
+ ar = artifacts.Artifacts('', FakeHost())
with self.assertRaises(ValueError):
ar.CreateLink('link', 'https:/malformedurl.com')
def test_create_link_non_https(self):
- ar = artifacts.Artifacts(None)
+ ar = artifacts.Artifacts('', FakeHost())
with self.assertRaises(ValueError):
ar.CreateLink('link', 'http://testsite.com')
def test_create_link_newlines(self):
- ar = artifacts.Artifacts(None)
+ ar = artifacts.Artifacts('', FakeHost())
with self.assertRaises(ValueError):
ar.CreateLink('link', 'https://some\nbadurl.com')
diff --git a/third_party/typ/typ/tests/host_test.py b/third_party/typ/typ/tests/host_test.py
index 8980c303c0..fead951c2e 100644
--- a/third_party/typ/typ/tests/host_test.py
+++ b/third_party/typ/typ/tests/host_test.py
@@ -70,7 +70,7 @@ def test_files(self):
h.chdir(dirpath)
self.assertIn(dirpath, h.getcwd())
- h.maybe_mkdir('bar')
+ h.maybe_make_directory('bar')
self.assertTrue(h.exists(dirpath, 'bar'))
self.assertTrue(h.isdir(dirpath, 'bar'))
self.assertFalse(h.isfile(dirpath, 'bar'))
diff --git a/third_party/typ/typ/tests/json_results_test.py b/third_party/typ/typ/tests/json_results_test.py
index 380d59011f..7a009a01b2 100644
--- a/third_party/typ/typ/tests/json_results_test.py
+++ b/third_party/typ/typ/tests/json_results_test.py
@@ -133,7 +133,7 @@ def test_artifacts_and_types_added(self):
result_set = json_results.ResultSet()
result_set.add(json_results.Result(
'foo_test.FooTest.foobar', json_results.ResultType.Pass,
- 0, 0.2, 0, artifacts=ar))
+ 0, 0.2, 0, artifacts=ar.artifacts))
full_results = json_results.make_full_results(
{'foo': 'bar'}, 0, test_names, result_set)
@@ -151,13 +151,13 @@ def test_artifacts_merged(self):
ar.artifacts = {'artifact_name': ['a/b/c.txt']}
result_set.add(json_results.Result(
'foo_test.FooTest.foobar', json_results.ResultType.Failure,
- 0, 0.2, 0, artifacts=ar))
+ 0, 0.2, 0, artifacts=ar.artifacts))
ar2 = FakeArtifacts()
ar2.artifacts = {'artifact_name': ['d/e/f.txt']}
result_set.add(json_results.Result(
'foo_test.FooTest.foobar', json_results.ResultType.Failure,
- 0, 0.2, 0, artifacts=ar2))
+ 0, 0.2, 0, artifacts=ar2.artifacts))
full_results = json_results.make_full_results(
{'foo': 'bar'}, 0, test_names, result_set)
diff --git a/third_party/typ/typ/tests/main_test.py b/third_party/typ/typ/tests/main_test.py
index 8441f90700..9a6ba16a39 100644
--- a/third_party/typ/typ/tests/main_test.py
+++ b/third_party/typ/typ/tests/main_test.py
@@ -42,8 +42,7 @@
class ArtifactTest(test_case.TestCase):
def test_produce_artifact_for_retries(self):
- with self.artifacts.CreateArtifact('artifact_name', 'test.txt') as f:
- f.write('content')
+ self.artifacts.CreateArtifact('artifact_name', 'test.txt', 'content')
self.fail()
"""
diff --git a/third_party/vinn/third_party/v8/mac/x86_64/d8 b/third_party/vinn/third_party/v8/mac/x86_64/d8
index 57f4a5259a..0e84496252 100755
Binary files a/third_party/vinn/third_party/v8/mac/x86_64/d8 and b/third_party/vinn/third_party/v8/mac/x86_64/d8 differ
diff --git a/third_party/vinn/third_party/v8/win/AMD64/d8.exe b/third_party/vinn/third_party/v8/win/AMD64/d8.exe
index fdf10e96ab..8584e8a1b4 100644
Binary files a/third_party/vinn/third_party/v8/win/AMD64/d8.exe and b/third_party/vinn/third_party/v8/win/AMD64/d8.exe differ
diff --git a/tracing/bin/merge_histograms b/tracing/bin/merge_histograms
index 1d3f1ccca1..ebf65d8615 100755
--- a/tracing/bin/merge_histograms
+++ b/tracing/bin/merge_histograms
@@ -21,7 +21,7 @@ def Main(argv):
help='Path to a HistogramSet JSON file. (output)')
parser.add_argument('groupby', nargs='+',
help='One or more grouping keys (name, benchmark, ' +
- 'time, storyset_repeat, story_repeat, story, tir, label)')
+ 'time, storyset_repeat, story_repeat, story, label)')
args = parser.parse_args(argv[1:])
merged = merge_histograms.MergeHistograms(args.input, args.groupby)
diff --git a/tracing/tracing/base/unittest/interactive_test_runner.html b/tracing/tracing/base/unittest/interactive_test_runner.html
index d83012033b..ce0b1c8f7a 100644
--- a/tracing/tracing/base/unittest/interactive_test_runner.html
+++ b/tracing/tracing/base/unittest/interactive_test_runner.html
@@ -614,6 +614,9 @@ Tests
// });
},
function(err) {
+ if (state.headless) {
+ tr.b.postAsync('/tracing/notify_test_error', err.stack);
+ }
hideLoadingOverlay();
tr.showPanic('Module loading failure', err);
throw err;
diff --git a/tracing/tracing/metrics/compare_samples_cmdline.html b/tracing/tracing/metrics/compare_samples_cmdline.html
index f774f03f1b..b43b82614b 100644
--- a/tracing/tracing/metrics/compare_samples_cmdline.html
+++ b/tracing/tracing/metrics/compare_samples_cmdline.html
@@ -47,7 +47,7 @@
return Math.exp(sumOfLogs / count);
}
-function guessFullTIRMetricName(metricName) {
+function guessFullMetricName(metricName) {
const parts = metricName.split('/');
if (parts.length === 2) {
return metricName + '/summary';
@@ -104,9 +104,9 @@
const allValues = valuesFromCharts(listOfCharts, metricName);
if (allValues.length > 0) return allValues;
- // If this had a tir_label, the "summary" part may have been stripped by
+ // If this had a grouping_label, the "summary" part may have been stripped by
// the dashboard during upload. We can re-add it here.
- const fullMetricName = guessFullTIRMetricName(metricName);
+ const fullMetricName = guessFullMetricName(metricName);
if (!fullMetricName) return [];
return valuesFromCharts(listOfCharts, fullMetricName);
diff --git a/tracing/tracing/metrics/compare_samples_unittest.py b/tracing/tracing/metrics/compare_samples_unittest.py
index eae8e3be56..2a766b2477 100644
--- a/tracing/tracing/metrics/compare_samples_unittest.py
+++ b/tracing/tracing/metrics/compare_samples_unittest.py
@@ -236,26 +236,26 @@ def testCompareUnlikelyRegressionWithMultipleRuns(self):
lower_values, higher_values, '/'.join(metric)).stdout)
self.assertEqual(result['result']['significance'], FAIL_TO_REJECT)
- def testCompareTIRLabel(self):
- tir_metric = ('some_chart', 'some_label', 'some_trace')
- tir_metric_name = ('%s@@%s' % (tir_metric[1], tir_metric[0]), tir_metric[2])
+ def testCompareGroupingLabel(self):
+ parts = ('some_chart', 'some_label', 'some_trace')
+ metric_name = ('%s@@%s' % (parts[1], parts[0]), parts[2])
lower_values = ','.join(self.MakeCharts(
- metric=tir_metric_name, seed='lower', mu=10, sigma=1, n=10))
+ metric=metric_name, seed='lower', mu=10, sigma=1, n=10))
higher_values = ','.join(self.MakeCharts(
- metric=tir_metric_name, seed='higher', mu=20, sigma=2, n=10))
+ metric=metric_name, seed='higher', mu=20, sigma=2, n=10))
result = json.loads(compare_samples.CompareSamples(
- lower_values, higher_values, '/'.join(tir_metric)).stdout)
+ lower_values, higher_values, '/'.join(parts)).stdout)
self.assertEqual(result['result']['significance'], REJECT)
- def testCompareTIRLabelMissingSummary(self):
- tir_metric = ('some_chart', 'some_label')
- tir_metric_name = ('%s@@%s' % (tir_metric[1], tir_metric[0]), 'summary')
+ def testCompareGroupingLabelMissingSummary(self):
+ parts = ('some_chart', 'some_label')
+ metric_name = ('%s@@%s' % (parts[1], parts[0]), 'summary')
lower_values = ','.join(self.MakeCharts(
- metric=tir_metric_name, seed='lower', mu=10, sigma=1, n=10))
+ metric=metric_name, seed='lower', mu=10, sigma=1, n=10))
higher_values = ','.join(self.MakeCharts(
- metric=tir_metric_name, seed='higher', mu=20, sigma=2, n=10))
+ metric=metric_name, seed='higher', mu=20, sigma=2, n=10))
result = json.loads(compare_samples.CompareSamples(
- lower_values, higher_values, '/'.join(tir_metric)).stdout)
+ lower_values, higher_values, '/'.join(parts)).stdout)
self.assertEqual(result['result']['significance'], REJECT)
def testCompareInsufficientData(self):
diff --git a/tracing/tracing/metrics/uma_metric.html b/tracing/tracing/metrics/uma_metric.html
index 912a4bdff8..8f741b9cf7 100644
--- a/tracing/tracing/metrics/uma_metric.html
+++ b/tracing/tracing/metrics/uma_metric.html
@@ -120,6 +120,10 @@
return tr.v.HistogramBinBoundaries.createLinear(0, 100, 101);
}
+ if (name.startsWith('Memory.Memory.GPU.PeakMemoryUsage')) {
+ return tr.v.HistogramBinBoundaries.createLinear(0, 1e6, 100);
+ }
+
return tr.v.HistogramBinBoundaries.createExponential(1e-3, 1e3, 50);
}
diff --git a/tracing/tracing/ui/base/heading.html b/tracing/tracing/ui/base/heading.html
index 9b625c61b0..052172e894 100644
--- a/tracing/tracing/ui/base/heading.html
+++ b/tracing/tracing/ui/base/heading.html
@@ -23,7 +23,6 @@
display: block;
overflow-x: hidden;
text-align: left;
- text-overflow: ellipsis;
white-space: nowrap;
}
diff --git a/tracing/tracing/ui/timeline_view.html b/tracing/tracing/ui/timeline_view.html
index 9a7a83f4a7..917c6cb452 100644
--- a/tracing/tracing/ui/timeline_view.html
+++ b/tracing/tracing/ui/timeline_view.html
@@ -5,6 +5,7 @@
found in the LICENSE file.
-->
+
@@ -63,6 +64,8 @@
padding-left: 8px;
padding-right: 8px;
flex: 1 1 auto;
+ overflow: hidden;
+ white-space: nowrap;
}
#control > #bar > #left_controls,
@@ -70,6 +73,7 @@
display: flex;
flex-direction: row;
align-items: stretch;
+ flex-shrink: 0;
}
#control > #bar > #left_controls > * { margin-right: 2px; }
@@ -97,7 +101,7 @@
tr-ui-b-drag-handle { flex: 0 0 auto; }
tr-ui-a-analysis-view { flex: 0 0 auto; }
- #view_options_dropdown, #process_filter_dropdown {
+ tr-ui-b-dropdown {
--dropdown-button: {
-webkit-appearance: none;
align-items: normal;
@@ -118,11 +122,12 @@
^_^
+
M
-
+
»
@@ -207,16 +212,13 @@
this.railScoreSpan_ = undefined;
}
+ this.flowEventFilter_ = this.$.flow_event_filter_dropdown;
this.processFilter_ = this.$.process_filter_dropdown;
this.optionsDropdown_ = this.$.view_options_dropdown;
Polymer.dom(this.optionsDropdown_.iconElement).textContent = 'View Options';
- this.showFlowEvents_ = false;
- Polymer.dom(this.optionsDropdown_).appendChild(tr.ui.b.createCheckBox(
- this, 'showFlowEvents',
- 'tr.ui.TimelineView.showFlowEvents', false,
- 'Flow events'));
+ this.selectedFlowEvents_ = new Set();
this.highlightVSync_ = false;
this.highlightVSyncCheckbox_ = tr.ui.b.createCheckBox(
this, 'highlightVSync',
@@ -288,15 +290,13 @@
link.href = faviconData;
},
- get showFlowEvents() {
- return this.showFlowEvents_;
- },
- set showFlowEvents(showFlowEvents) {
- this.showFlowEvents_ = showFlowEvents;
- if (!this.trackView_) return;
+ get selectedFlowEvents() {
+ return this.selectedFlowEvents_;
+ },
- this.trackView_.viewport.showFlowEvents = showFlowEvents;
+ set selectedFlowEvents(selectedFlowEvents) {
+ this.selectedFlowEvents_ = selectedFlowEvents;
},
get highlightVSync() {
@@ -367,6 +367,53 @@
(this.model && this.model.metadata.length) ? '' : 'none';
},
+ updateFlowEventList_() {
+ const dropdown = Polymer.dom(this.flowEventFilter_);
+ while (dropdown.firstChild) {
+ dropdown.removeChild(dropdown.firstChild);
+ }
+ if (!this.model) return;
+
+ const cboxes = [];
+ const updateAll = (checked) => {
+ for (const cbox of cboxes) {
+ cbox.checked = checked;
+ }
+ };
+
+ dropdown.appendChild(tr.ui.b.createButton('All', () => updateAll(true)));
+ dropdown.appendChild(tr.ui.b.createButton('None', () => updateAll(false)));
+
+ const categories = new Set();
+ for (const event of this.model.flowEvents) {
+ for (const category of tr.b.getCategoryParts(event.category)) {
+ categories.add(category);
+ }
+ }
+
+ const sortedCategories = [...categories].sort(
+ (a, b) => a.localeCompare(b, 'en', {sensitivity: 'base'}));
+ for (const category of sortedCategories) {
+ const cbox = tr.ui.b.createCheckBox(undefined, undefined,
+ 'tr.ui.TimelineView.selectedFlowEvents.' + category, false, category,
+ () => {
+ if (cbox.checked) {
+ this.selectedFlowEvents.add(category);
+ } else {
+ this.selectedFlowEvents.delete(category);
+ }
+ if (this.trackView_) {
+ this.trackView_.viewport.dispatchChangeEvent();
+ }
+ });
+ if (cbox.checked) {
+ this.selectedFlowEvents.add(category);
+ }
+ cboxes.push(cbox);
+ dropdown.appendChild(cbox);
+ }
+ },
+
updateProcessList_() {
const dropdown = Polymer.dom(this.processFilter_);
while (dropdown.firstChild) {
@@ -495,7 +542,7 @@
// Set the model.
if (modelValid) {
this.trackView_.model = model;
- this.trackView_.viewport.showFlowEvents = this.showFlowEvents;
+ this.trackView_.viewport.selectedFlowEvents = this.selectedFlowEvents;
this.trackView_.viewport.highlightVSync = this.highlightVSync;
if (this.railScoreSpan_) {
this.railScoreSpan_.model = model;
@@ -518,6 +565,7 @@
// Do things that are selection specific
if (modelInstanceChanged) {
+ this.updateFlowEventList_();
this.updateProcessList_();
this.updateMetadataButtonVisibility_();
this.brushingStateController_.modelDidChange();
diff --git a/tracing/tracing/ui/timeline_view_test.html b/tracing/tracing/ui/timeline_view_test.html
index 7591f82df7..ccef813f43 100644
--- a/tracing/tracing/ui/timeline_view_test.html
+++ b/tracing/tracing/ui/timeline_view_test.html
@@ -207,11 +207,13 @@
assert.strictEqual(countVisibleTracks(), 51);
assert.isFalse(trackView.processViews[0].visible);
assert.isTrue(trackView.processViews[1].visible);
+ assert.isAbove(trackView.processViews[1].tracks_.length, 0);
// Hide a track. Validate that the checkbox updated state correctly.
trackView.processViews[1].visible = false;
assert.isFalse(trackView.processViews[1].visible);
assert.isFalse(checkboxes[1].checked);
+ assert.strictEqual(trackView.processViews[1].tracks_.length, 0);
});
});
diff --git a/tracing/tracing/ui/timeline_viewport.html b/tracing/tracing/ui/timeline_viewport.html
index 58dafb55af..6b6b316d43 100644
--- a/tracing/tracing/ui/timeline_viewport.html
+++ b/tracing/tracing/ui/timeline_viewport.html
@@ -90,7 +90,7 @@
this.initAnimationController_();
// Flow events
- this.showFlowEvents_ = false;
+ this.selectedFlowEvents_ = new Set();
// Highlights.
this.highlightVSync_ = false;
@@ -232,12 +232,12 @@
this.onModelTrackControllerScroll_);
},
- get showFlowEvents() {
- return this.showFlowEvents_;
+ get selectedFlowEvents() {
+ return this.selectedFlowEvents_;
},
- set showFlowEvents(showFlowEvents) {
- this.showFlowEvents_ = showFlowEvents;
+ set selectedFlowEvents(selectedFlowEvents) {
+ this.selectedFlowEvents_ = selectedFlowEvents;
this.dispatchChangeEvent();
},
diff --git a/tracing/tracing/ui/tracks/model_track.html b/tracing/tracing/ui/tracks/model_track.html
index 01e6b76b98..74f99637df 100644
--- a/tracing/tracing/ui/tracks/model_track.html
+++ b/tracing/tracing/ui/tracks/model_track.html
@@ -5,6 +5,7 @@
found in the LICENSE file.
-->
+
@@ -340,10 +341,13 @@
const events =
this.model_.flowIntervalTree.findIntersection(viewLWorld, viewRWorld);
- // When not showing flow events, show only highlighted/selected ones.
- const onlyHighlighted = !this.viewport.showFlowEvents;
const canvasBounds = ctx.canvas.getBoundingClientRect();
for (let i = 0; i < events.length; ++i) {
+ // Check whether some category of |events[i]| is in
+ // |selectedFlowEvents|. If not, don't draw the flow arrow unless the
+ // event is selected or highlighted.
+ const onlyHighlighted = !tr.b.getCategoryParts(events[i].category).some(
+ (x) => this.viewport.selectedFlowEvents.has(x));
if (onlyHighlighted &&
events[i].selectionState !== SelectionState.SELECTED &&
events[i].selectionState !== SelectionState.HIGHLIGHTED) {
diff --git a/tracing/tracing/ui/tracks/process_track_base.html b/tracing/tracing/ui/tracks/process_track_base.html
index 89358b8411..9d65d9c4c7 100644
--- a/tracing/tracing/ui/tracks/process_track_base.html
+++ b/tracing/tracing/ui/tracks/process_track_base.html
@@ -146,6 +146,7 @@
this.clearTracks_();
if (!this.processBase_) return;
+ if (!this.visible) return;
Polymer.dom(this.processNameEl_).textContent =
this.processBase_.userFriendlyName;
diff --git a/tracing/tracing/value/chart_json_converter.html b/tracing/tracing/value/chart_json_converter.html
index fb30056f60..4f5aa7f167 100644
--- a/tracing/tracing/value/chart_json_converter.html
+++ b/tracing/tracing/value/chart_json_converter.html
@@ -88,8 +88,8 @@
}
const storyTags = [];
- if (value.tir_label) {
- storyTags.push(`tir_label:${value.tir_label}`);
+ if (value.grouping_label) {
+ storyTags.push(`grouping_label:${value.grouping_label}`);
}
if (value.story_tags) {
storyTags.push(...value.story_tags);
diff --git a/tracing/tracing/value/chart_json_converter_test.html b/tracing/tracing/value/chart_json_converter_test.html
index 6b0bb9e49e..5a3a1a3f5b 100644
--- a/tracing/tracing/value/chart_json_converter_test.html
+++ b/tracing/tracing/value/chart_json_converter_test.html
@@ -124,7 +124,7 @@
assert.isTrue(labels.hasGuid);
assert.strictEqual(labels, histograms.lookupDiagnostic(labels.guid));
});
- test('convertWithoutTIRLabel', function() {
+ test('convertWithoutGroupingLabel', function() {
const charts = {
charts: {
mean_frame_time: {
@@ -221,17 +221,17 @@
assert.strictEqual(stories, histograms.lookupDiagnostic(stories.guid));
});
- test('convertWithTIRLabel', function() {
+ test('convertWithGroupingLabel', function() {
const charts = {
charts: {
- 'TIR-A@@value-name': {
+ 'GROUP-A@@value-name': {
'story-name': {
name: 'value-name',
page_id: 7,
improvement_direction: 'down',
values: [42],
units: 'ms',
- tir_label: 'TIR-A',
+ grouping_label: 'GROUP-A',
type: 'list_of_scalar_values',
},
'summary': {
@@ -239,7 +239,7 @@
improvement_direction: 'down',
values: [42],
units: 'ms',
- tir_label: 'TIR-A',
+ grouping_label: 'GROUP-A',
type: 'list_of_scalar_values',
},
},
@@ -250,7 +250,7 @@
const hist = tr.b.getOnlyElement(histograms);
assert.strictEqual('value-name', hist.name);
assert.strictEqual(tr.b.getOnlyElement(hist.diagnostics.get(
- tr.v.d.RESERVED_NAMES.STORY_TAGS)), 'tir_label:TIR-A');
+ tr.v.d.RESERVED_NAMES.STORY_TAGS)), 'grouping_label:GROUP-A');
assert.strictEqual('story-name',
tr.v.HistogramGrouping.BY_KEY.get(
tr.v.d.RESERVED_NAMES.STORIES).callback(hist));
@@ -266,7 +266,7 @@
test('convertWithStoryTags', function() {
const charts = {
charts: {
- 'TIR-A@@value-name': {
+ 'GROUP-A@@value-name': {
'story-name': {
name: 'value-name',
page_id: 7,
diff --git a/tracing/tracing/value/diagnostics/add_reserved_diagnostics.py b/tracing/tracing/value/diagnostics/add_reserved_diagnostics.py
index d826e768e0..f81ae9336d 100644
--- a/tracing/tracing/value/diagnostics/add_reserved_diagnostics.py
+++ b/tracing/tracing/value/diagnostics/add_reserved_diagnostics.py
@@ -31,11 +31,9 @@ def TempFile():
os.unlink(temp.name)
-def GetTIRLabelFromHistogram(hist):
+def GetGroupingLabelFromHistogram(hist):
tags = hist.diagnostics.get(reserved_infos.STORY_TAGS.name) or []
-
tags_to_use = [t.split(':') for t in tags if ':' in t]
-
return '_'.join(v for _, v in sorted(tags_to_use))
@@ -49,10 +47,10 @@ def ComputeTestPath(hist):
is_summary = list(
hist.diagnostics.get(reserved_infos.SUMMARY_KEYS.name, []))
- tir_label = GetTIRLabelFromHistogram(hist)
- if tir_label and (
+ grouping_label = GetGroupingLabelFromHistogram(hist)
+ if grouping_label and (
not is_summary or reserved_infos.STORY_TAGS.name in is_summary):
- path += '/' + tir_label
+ path += '/' + grouping_label
is_ref = hist.diagnostics.get(reserved_infos.IS_REFERENCE_BUILD.name)
if is_ref and len(is_ref) == 1:
@@ -180,7 +178,7 @@ def AddReservedDiagnostics(histogram_dicts, names_to_values, max_bytes=0):
# This call creates summary metrics across each tag set of stories.
hs = histogram_set.HistogramSet()
hs.ImportDicts(hs_with_stories.AsDicts())
- hs.FilterHistograms(lambda h: not GetTIRLabelFromHistogram(h))
+ hs.FilterHistograms(lambda h: not GetGroupingLabelFromHistogram(h))
for h in hs:
h.diagnostics[reserved_infos.SUMMARY_KEYS.name] = (
diff --git a/tracing/tracing/value/diagnostics/reserved_infos.py b/tracing/tracing/value/diagnostics/reserved_infos.py
index ba515f5465..1a52a0f305 100644
--- a/tracing/tracing/value/diagnostics/reserved_infos.py
+++ b/tracing/tracing/value/diagnostics/reserved_infos.py
@@ -69,6 +69,7 @@ def entry_type(self):
V8_COMMIT_POSITIONS = _Info('v8CommitPositions', 'DateRange')
V8_REVISIONS = _Info('v8Revisions', 'GenericSet', str)
WEBRTC_REVISIONS = _Info('webrtcRevisions', 'GenericSet', str)
+WEBRTC_INTERNAL_REVISIONS = _Info('webrtcInternalRevisions', 'GenericSet', str)
def _CreateCachedInfoTypes():
diff --git a/tracing/tracing/value/diagnostics/reserved_names.html b/tracing/tracing/value/diagnostics/reserved_names.html
index 656b2f6286..77bb968101 100644
--- a/tracing/tracing/value/diagnostics/reserved_names.html
+++ b/tracing/tracing/value/diagnostics/reserved_names.html
@@ -65,6 +65,8 @@
V8_COMMIT_POSITIONS: {name: 'v8CommitPositions', type: tr.v.d.DateRange},
V8_REVISIONS: {name: 'v8Revisions', type: tr.v.d.GenericSet},
WEBRTC_REVISIONS: {name: 'webrtcRevisions', type: tr.v.d.GenericSet},
+ WEBRTC_INTERNAL_REVISIONS: {
+ name: 'webrtcInternalRevisions', type: tr.v.d.GenericSet},
};
const RESERVED_NAMES = {};
diff --git a/tracing/tracing/value/histogram.py b/tracing/tracing/value/histogram.py
index fff07dc40d..4e87432641 100644
--- a/tracing/tracing/value/histogram.py
+++ b/tracing/tracing/value/histogram.py
@@ -1245,6 +1245,7 @@ def FromDict(dct):
else:
raise ValueError('Unrecognized HistogramBinBoundaries slice type')
+ bin_boundaries._BuildBins()
HistogramBinBoundaries.CACHE[cache_key] = bin_boundaries
return bin_boundaries
@@ -1255,11 +1256,17 @@ def AsDict(self):
@staticmethod
def CreateExponential(lower, upper, num_bins):
- return HistogramBinBoundaries(lower).AddExponentialBins(upper, num_bins)
+ bin_boundaries = HistogramBinBoundaries(lower)
+ bin_boundaries.AddExponentialBins(upper, num_bins)
+ bin_boundaries._BuildBins()
+ return bin_boundaries
@staticmethod
def CreateLinear(lower, upper, num_bins):
- return HistogramBinBoundaries(lower).AddLinearBins(upper, num_bins)
+ bin_boundaries = HistogramBinBoundaries(lower)
+ bin_boundaries.AddLinearBins(upper, num_bins)
+ bin_boundaries._BuildBins()
+ return bin_boundaries
def _PushBuilderSlice(self, slic):
self._builder += [slic]
diff --git a/tracing/tracing/value/histogram_set.py b/tracing/tracing/value/histogram_set.py
index 80794481fb..1f2caff012 100644
--- a/tracing/tracing/value/histogram_set.py
+++ b/tracing/tracing/value/histogram_set.py
@@ -58,6 +58,11 @@ def AddSharedDiagnosticToAllHistograms(self, name, diag):
for hist in self:
hist.diagnostics[name] = diag
+ def Merge(self, other):
+ """Merge another HistogramSet's contents."""
+ self._shared_diagnostics_by_guid.update(other._shared_diagnostics_by_guid)
+ self._histograms.update(other._histograms)
+
def GetFirstHistogram(self):
for hist in self._histograms:
return hist
diff --git a/tracing/tracing/value/histogram_set_unittest.py b/tracing/tracing/value/histogram_set_unittest.py
index 60da4eb2d1..0ed84f3c9f 100644
--- a/tracing/tracing/value/histogram_set_unittest.py
+++ b/tracing/tracing/value/histogram_set_unittest.py
@@ -98,6 +98,23 @@ def testAddSharedDiagnostic(self):
self.assertEqual(val['diagnostics']['da'], da.guid)
seen_once = True
+ def testMerge(self):
+ hs1 = histogram_set.HistogramSet([histogram.Histogram('a', 'unitless')])
+ hs1.AddSharedDiagnosticToAllHistograms('name',
+ generic_set.GenericSet(['diag1']))
+
+ hs2 = histogram_set.HistogramSet([histogram.Histogram('b', 'unitless')])
+ hs2.AddSharedDiagnosticToAllHistograms('name',
+ generic_set.GenericSet(['diag2']))
+
+ hs1.Merge(hs2)
+
+ self.assertEqual(len(hs1), 2)
+ self.assertEqual(len(hs1.shared_diagnostics), 2)
+ self.assertEqual(hs1.GetHistogramNamed('a').diagnostics['name'],
+ generic_set.GenericSet(['diag1']))
+ self.assertEqual(hs1.GetHistogramNamed('b').diagnostics['name'],
+ generic_set.GenericSet(['diag2']))
def testSharedDiagnostic(self):
hist = histogram.Histogram('', 'unitless')
diff --git a/tracing/tracing/value/histogram_unittest.py b/tracing/tracing/value/histogram_unittest.py
index 8a58a5c3c8..30ed5c0761 100644
--- a/tracing/tracing/value/histogram_unittest.py
+++ b/tracing/tracing/value/histogram_unittest.py
@@ -9,6 +9,7 @@
import copy
import json
import math
+from multiprocessing.dummy import Pool as ThreadPool
import unittest
from six.moves import range # pylint: disable=redefined-builtin
@@ -625,6 +626,20 @@ def testSingularBin(self):
self.assertEqual(3, hist.GetApproximatePercentile(0.9))
self.assertEqual(4, hist.GetApproximatePercentile(1))
+ def testFromDictMultithreaded(self):
+ hdict = {
+ "allBins": {"23": [1]},
+ "binBoundaries": [0.001, [1, 100000, 30]],
+ "name": "foo",
+ "running": [1, 1, 1, 1, 1, 1, 0],
+ "sampleValues": [1],
+ "unit": "ms",
+ }
+ pool = ThreadPool(10)
+ histograms = pool.map(histogram.Histogram.FromDict, [hdict] * 10)
+ self.assertEqual(len(histograms), 10)
+ for h in histograms:
+ self.assertEqual(h.name, 'foo')
class DiagnosticMapUnittest(unittest.TestCase):
def testDisallowReservedNames(self):
diff --git a/tracing/tracing/value/legacy_unit_info.py b/tracing/tracing/value/legacy_unit_info.py
index 7e243688a9..b9dd39b14e 100644
--- a/tracing/tracing/value/legacy_unit_info.py
+++ b/tracing/tracing/value/legacy_unit_info.py
@@ -80,6 +80,7 @@ def AsTuple(self):
'mips': LegacyUnit('unitless', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER),
'mpixels_sec': LegacyUnit('unitless',
IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER),
+ 'ms': LegacyUnit('ms', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER),
'mtri_sec': LegacyUnit('unitless', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER),
'mvtx_sec': LegacyUnit('unitless', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER),
'objects (bigger is better)': LegacyUnit(
diff --git a/tracing/tracing/value/merge_histograms.py b/tracing/tracing/value/merge_histograms.py
index c7e4c9a95b..a56be5abd5 100644
--- a/tracing/tracing/value/merge_histograms.py
+++ b/tracing/tracing/value/merge_histograms.py
@@ -19,7 +19,7 @@ def MergeHistograms(json_path, groupby=()):
Args:
json_path: Path to a HistogramSet JSON file.
groupby: Array of grouping keys (name, benchmark, time, storyset_repeat,
- story_repeat, story, tir, label)
+ story_repeat, story, label)
Returns:
HistogramSet dicts of the merged Histograms.
"""