diff --git a/catapult_build/build_steps.py b/catapult_build/build_steps.py index d6755d93bc..dd14e7dbd5 100644 --- a/catapult_build/build_steps.py +++ b/catapult_build/build_steps.py @@ -37,7 +37,9 @@ 'additional_args': [ '--no-install-hooks', '--no-use-local-chrome', - '--channel=stable' + '--channel=stable', + '--timeout-sec=120', + '--timeout-retries=2' ], 'outputs_presentation_json': True, 'disabled': ['android'], @@ -48,7 +50,9 @@ 'additional_args': [ '--no-install-hooks', '--no-use-local-chrome', - '--channel=canary' + '--channel=canary', + '--timeout-sec=120', + '--timeout-retries=2' ], 'outputs_presentation_json': True, 'disabled': ['android'], @@ -112,7 +116,8 @@ 'path': 'catapult_build/fetch_telemetry_deps_and_run_tests', 'additional_args': [ '--browser=reference', - '--start-xvfb' + '--start-xvfb', + '-v', ], 'uses_sandbox_env': True, 'disabled': ['android'], @@ -123,7 +128,8 @@ 'additional_args': [ '--browser=reference', '--device=android', - '--jobs=1' + '--jobs=1', + '-v', ], 'uses_sandbox_env': True, 'disabled': ['win', 'mac', 'linux'] @@ -134,6 +140,7 @@ 'additional_args': [ 'BrowserTest', '--browser=reference', + '-v', ], 'uses_sandbox_env': True, 'disabled': ['android', 'linux'], # TODO(nedn): enable this on linux @@ -145,6 +152,7 @@ '--no-install-hooks', '--no-use-local-chrome', '--channel=stable', + '--timeout-sec=900', ], 'outputs_presentation_json': True, 'disabled': ['android'], @@ -156,6 +164,7 @@ '--no-install-hooks', '--no-use-local-chrome', '--channel=canary', + '--timeout-sec=900', ], 'outputs_presentation_json': True, 'disabled': ['android'], diff --git a/catapult_build/dev_server.py b/catapult_build/dev_server.py index d9145949ee..d109f2c8f8 100644 --- a/catapult_build/dev_server.py +++ b/catapult_build/dev_server.py @@ -2,6 +2,8 @@ # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. +from __future__ import print_function + import argparse import json import os @@ -82,6 +84,15 @@ def post(self, *args, **kwargs): # pylint: disable=unused-argument self.app.server.please_exit(exit_code) return self.response.write('') +class TestsErrorHandler(webapp2.RequestHandler): + def post(self, *args, **kwargs): + del args, kwargs + msg = self.request.body + sys.stderr.write(msg + '\n') + exit_code = 1 + if hasattr(self.app.server, 'please_exit'): + self.app.server.please_exit(exit_code) + return self.response.write('') class DirectoryListingHandler(webapp2.RequestHandler): def get(self, *args, **kwargs): # pylint: disable=unused-argument @@ -198,7 +209,9 @@ def _InitFromArgs(self, args): Route('/%s/notify_test_result' % pd.GetName(), TestResultHandler), Route('/%s/notify_tests_completed' % pd.GetName(), - TestsCompletedHandler) + TestsCompletedHandler), + Route('/%s/notify_test_error' % pd.GetName(), + TestsErrorHandler) ] for pd in self.pds: @@ -276,6 +289,7 @@ def ServeForever(): # allow CTRL+C to shutdown return 255 + print("Exiting dev server") if len(exit_code_attempt) == 1: return exit_code_attempt[0] # The serve_forever returned for some reason separate from diff --git a/catapult_build/run_dev_server_tests.py b/catapult_build/run_dev_server_tests.py index 871cd3d75c..e2afac96e8 100644 --- a/catapult_build/run_dev_server_tests.py +++ b/catapult_build/run_dev_server_tests.py @@ -3,6 +3,8 @@ # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. +from __future__ import print_function + import argparse import json import logging @@ -12,8 +14,11 @@ import subprocess import sys import tempfile +import threading import time +from collections import namedtuple + from hooks import install from py_utils import binary_manager @@ -31,6 +36,8 @@ # Default port to run on if not auto-assigning from OS DEFAULT_PORT = '8111' +_TIMEOUT_RETURNCODE = 124 + # Mapping of sys.platform -> platform-specific names and paths. PLATFORM_MAPPING = { 'linux2': { @@ -49,16 +56,20 @@ 'omaha': 'mac', 'prefix': 'Mac', 'zip_prefix': 'mac', - 'chromepath': ('chrome-mac/Chromium.app/Contents/MacOS/Chromium'), - 'version_path': 'chrome-mac/Chromium.app/Contents/Versions/', + 'chromepath': ('chrome-mac/Chrome.app/Contents/MacOS/Chrome'), + 'version_path': 'chrome-mac/Chrome.app/Contents/Versions/', 'additional_paths': [ - ('chrome-mac/Chromium.app/Contents/Versions/%VERSION%/' - 'Chromium Helper.app/Contents/MacOS/Chromium Helper'), + ('chrome-mac/Chrome.app/Contents/Versions/%VERSION%/' + 'Chrome Helper.app/Contents/MacOS/Chrome Helper'), ], }, } +class ChromeNotFound(Exception): + pass + + def IsDepotToolsPath(path): return os.path.isfile(os.path.join(path, 'gclient')) @@ -108,33 +119,59 @@ def GetLocalChromePath(path_from_command_line): return None -def Main(argv): - try: - parser = argparse.ArgumentParser( - description='Run dev_server tests for a project.') - parser.add_argument('--chrome_path', type=str, - help='Path to Chrome browser binary.') - parser.add_argument('--no-use-local-chrome', - dest='use_local_chrome', action='store_false') - parser.add_argument( - '--no-install-hooks', dest='install_hooks', action='store_false') - parser.add_argument('--tests', type=str, - help='Set of tests to run (tracing or perf_insights)') - parser.add_argument('--channel', type=str, default='stable', - help='Chrome channel to run (stable or canary)') - parser.add_argument('--presentation-json', type=str, - help='Recipe presentation-json output file path') - parser.set_defaults(install_hooks=True) - parser.set_defaults(use_local_chrome=True) - args = parser.parse_args(argv[1:]) - - if args.install_hooks: - install.InstallHooks() +ChromeInfo = namedtuple('ChromeInfo', 'path, version') - user_data_dir = tempfile.mkdtemp() - tmpdir = None - xvfb_process = None +def GetChromeInfo(args): + """Finds chrome either locally or remotely and returns path and version info. + + Version is not reported if local chrome is used. + """ + if args.use_local_chrome: + chrome_path = GetLocalChromePath(args.chrome_path) + if not chrome_path: + raise ChromeNotFound('Could not find chrome locally. You can supply it ' + 'manually using --chrome_path') + return ChromeInfo(path=chrome_path, version=None) + else: + channel = args.channel + if sys.platform == 'linux2' and channel == 'canary': + channel = 'dev' + assert channel in ['stable', 'beta', 'dev', 'canary'] + + binary = 'chrome' + print('Fetching the', channel, binary, 'binary via the binary_manager.') + chrome_manager = binary_manager.BinaryManager([CHROME_BINARIES_CONFIG]) + arch, os_name = dependency_util.GetOSAndArchForCurrentDesktopPlatform() + chrome_path, version = chrome_manager.FetchPathWithVersion( + '%s_%s' % (binary, channel), arch, os_name) + print('Finished fetching the', binary, 'binary to', chrome_path) + return ChromeInfo(path=chrome_path, version=version) + + +def KillProcess(process): + """Kills process on all platform, including windows.""" + if sys.platform == 'win32': + # Use taskkill on Windows to make sure process and all its subprocesses are + # killed. + subprocess.call(['taskkill', '/F', '/T', '/PID', str(process.pid)]) + else: + process.kill() + + +def RunTests(args, chrome_path): + """Runs tests and returns dev server return code. + + Returns _TIMEOUT_RETURNCODE if tests exceed args.timeout_sec. + """ + user_data_dir = None + xvfb_process = None + chrome_process = None + server_process = None + timer = None + test_start_time = time.time() + try: + user_data_dir = tempfile.mkdtemp() server_path = os.path.join(os.path.dirname( os.path.abspath(__file__)), os.pardir, 'bin', 'run_dev_server') # TODO(anniesullie): Make OS selection of port work on Windows. See #1235. @@ -145,7 +182,7 @@ def Main(argv): server_command = [server_path, '--no-install-hooks', '--port', port] if sys.platform.startswith('win'): server_command = ['python.exe'] + server_command - print "Starting dev_server..." + print('Starting dev_server...') server_process = subprocess.Popen( server_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1) @@ -154,40 +191,9 @@ def Main(argv): output = server_process.stderr.readline() port = re.search( r'Now running on http://127.0.0.1:([\d]+)', output).group(1) - - chrome_info = None - if args.use_local_chrome: - chrome_path = GetLocalChromePath(args.chrome_path) - if not chrome_path: - logging.error('Could not find path to chrome.') - sys.exit(1) - chrome_info = 'with command `%s`' % chrome_path - else: - channel = args.channel - if sys.platform == 'linux2': - print ('Using chromium instead of chrome on linux due to ' + - 'https://crbug.com/998338.') - binary = 'chromium' - if channel == 'canary': - # Linux does not have canary. - channel = 'dev' - else: - binary = 'chrome' - - assert channel in ['stable', 'beta', 'dev', 'canary'] - - print ('Fetching the {0} {1}'.format(channel, binary) + - ' binary via the binary_manager.') - - chrome_manager = binary_manager.BinaryManager([CHROME_BINARIES_CONFIG]) - arch, os_name = dependency_util.GetOSAndArchForCurrentDesktopPlatform() - chrome_path, version = chrome_manager.FetchPathWithVersion( - '{0}_{1}'.format(binary, channel), arch, os_name) - print 'Finished fetching the chrome binary to %s' % chrome_path - if xvfb.ShouldStartXvfb(): - print 'Starting xvfb...' - xvfb_process = xvfb.StartXvfb() - chrome_info = 'version %s from channel %s' % (version, channel) + if xvfb.ShouldStartXvfb(): + print('Starting xvfb...') + xvfb_process = xvfb.StartXvfb() chrome_command = [ chrome_path, '--user-data-dir=%s' % user_data_dir, @@ -196,49 +202,132 @@ def Main(argv): '--no-first-run', '--noerrdialogs', '--window-size=1280,1024', + '--enable-logging', '--v=1', + '--enable-features=ForceWebRequestProxyForTest', + '--enable-blink-features=CustomElementsV0,' + 'HTMLImportsStyleApplication,ShadowDOMV0', ('http://localhost:%s/%s/tests.html?' % (port, args.tests)) + 'headless=true&testTypeToRun=all', ] - print "Starting Chrome %s..." % chrome_info + print('Starting Chrome at path %s...' % chrome_path) chrome_process = subprocess.Popen( chrome_command, stdout=sys.stdout, stderr=sys.stderr) - print 'chrome process command: %s' % ' '.join(chrome_command) - print "Waiting for tests to finish..." + print('Chrome process command:', ' '.join(chrome_command)) + print('Waiting for tests to finish...') + + def KillServer(): + print('Timeout reached. Killing dev server...') + KillProcess(server_process) + + timer = threading.Timer(args.timeout_sec, KillServer) + timer.start() server_out, server_err = server_process.communicate() - print "Killing Chrome..." - if sys.platform == 'win32': - # Use taskkill on Windows to make sure Chrome and all subprocesses are - # killed. - subprocess.call(['taskkill', '/F', '/T', '/PID', str(chrome_process.pid)]) - else: - chrome_process.kill() - if server_process.returncode != 0: - logging.error('Tests failed!') - logging.error('Server stdout:\n%s', server_out) - logging.error('Server stderr:\n%s', server_err) + timed_out = not timer.is_alive() + timer.cancel() + + # There is a very unlikely case where you see server saying "ALL_PASSED" + # but the test still saying "timed out". This usually happens because the + # server takes about a second to exit after printing "ALL_PASSED", and it + # can time out within that time. Looking at the server returncode can help + # here. The timeout should be increased if we're hitting this case. + print("Server return code:", server_process.returncode) + + logging.error('Server stdout:\n%s', server_out) + logging.error('Server stderr:\n%s', server_err) + + if timed_out: + print('Tests did not finish before', args.timeout_sec, 'seconds') + return _TIMEOUT_RETURNCODE else: - print server_out - if args.presentation_json: - with open(args.presentation_json, 'w') as recipe_out: - # Add a link to the buildbot status for the step saying which version - # of Chrome the test ran on. The actual linking feature is not used, - # but there isn't a way to just add text. - link_name = 'Chrome Version %s' % version - presentation_info = {'links': {link_name: CHROME_CONFIG_URL}} - json.dump(presentation_info, recipe_out) + if server_process.returncode == 0: + print("Tests passed in %.2f seconds." % (time.time() - test_start_time)) + else: + logging.error('Tests failed!') + return server_process.returncode + finally: + if timer: + timer.cancel() + if server_process and server_process.poll is None: + # Dev server is still running. Kill it. + print('Killing dev server...') + KillProcess(server_process) + if chrome_process: + print('Killing Chrome...') + KillProcess(chrome_process) # Wait for Chrome to be killed before deleting temp Chrome dir. Only have # this timing issue on Windows. if sys.platform == 'win32': time.sleep(5) - if tmpdir: + if user_data_dir: + chrome_debug_logs = os.path.join(user_data_dir, 'chrome_debug.log') + if os.path.exists(chrome_debug_logs): + with open(chrome_debug_logs) as f: + print('-------- chrome_debug.log --------') + sys.stdout.write(f.read()) + print('-------- ---------------- --------') + print('Chrome debug logs printed from', chrome_debug_logs) try: - shutil.rmtree(tmpdir) shutil.rmtree(user_data_dir) except OSError as e: - logging.error('Error cleaning up temp dirs %s and %s: %s', - tmpdir, user_data_dir, e) + logging.error('Error cleaning up temp dirs %s: %s', user_data_dir, e) if xvfb_process: - xvfb_process.kill() + KillProcess(xvfb_process) + - sys.exit(server_process.returncode) +def Main(argv): + parser = argparse.ArgumentParser( + description='Run dev_server tests for a project.') + parser.add_argument('--chrome_path', type=str, + help='Path to Chrome browser binary.') + parser.add_argument('--no-use-local-chrome', + dest='use_local_chrome', action='store_false', + help='Use chrome binary fetched from cloud storage ' + 'instead of chrome available on the system.') + parser.add_argument( + '--no-install-hooks', dest='install_hooks', action='store_false') + parser.add_argument('--tests', type=str, + help='Set of tests to run (tracing or perf_insights)') + parser.add_argument('--channel', type=str, default='stable', + help='Chrome channel to run (stable or canary)') + parser.add_argument('--presentation-json', type=str, + help='Recipe presentation-json output file path') + parser.add_argument('--timeout-sec', type=float, default=float('inf'), + help='Timeout for running all tests, in seconds') + parser.add_argument('--timeout-retries', type=int, default=0, + help='Number of times to retry if tests time out.' + 'Default 0 (no retries)') + parser.set_defaults(install_hooks=True) + parser.set_defaults(use_local_chrome=True) + args = parser.parse_args(argv[1:]) + + if args.install_hooks: + install.InstallHooks() + + chrome_info = GetChromeInfo(args) + print('Using chrome at path', chrome_info.path) + if not args.use_local_chrome: + print ('Chrome version', chrome_info.version, '| channel ', args.channel) + attempts_left = max(0, args.timeout_retries) + 1 + return_code = None + while attempts_left: + print(attempts_left, 'attempts left. Running tests...') + return_code = RunTests(args, chrome_info.path) + if return_code == _TIMEOUT_RETURNCODE: + attempts_left -= 1 + continue + else: + break + else: + logging.error('Tests timed out every time. Retried %d times.', + args.timeout_retries) + return_code = 1 + if args.presentation_json: + with open(args.presentation_json, 'w') as recipe_out: + # Add a link to the buildbot status for the step saying which version + # of Chrome the test ran on. The actual linking feature is not used, + # but there isn't a way to just add text. + link_name = 'Chrome Version %s' % chrome_info.version + presentation_info = {'links': {link_name: CHROME_CONFIG_URL}} + json.dump(presentation_info, recipe_out) + sys.exit(return_code) diff --git a/common/bin/update_chrome_reference_binaries b/common/bin/update_chrome_reference_binaries.py similarity index 61% rename from common/bin/update_chrome_reference_binaries rename to common/bin/update_chrome_reference_binaries.py index c62ea78203..f38232678b 100755 --- a/common/bin/update_chrome_reference_binaries +++ b/common/bin/update_chrome_reference_binaries.py @@ -14,7 +14,6 @@ import argparse import collections -import csv import logging import os import shutil @@ -30,21 +29,28 @@ from dependency_manager import base_config -def BuildNotFoundError(error_string): - raise ValueError(error_string) - _CHROME_BINARIES_CONFIG = os.path.join( os.path.dirname(os.path.abspath(__file__)), '..', '..', 'common', 'py_utils', 'py_utils', 'chrome_binaries.json') -CHROME_GS_BUCKET = 'chrome-unsigned' +_CHROME_GS_BUCKET = 'chrome-unsigned' +_CHROMIUM_GS_BUCKET = 'chromium-browser-snapshots' + +# How many commit positions to search below and above omaha branch position to +# find closest chromium build snapshot. The value 10 is chosen because it looks +# more than sufficient from manual inspection of the bucket. +_CHROMIUM_SNAPSHOT_SEARCH_WINDOW = 10 # Remove a platform name from this list to disable updating it. # Add one to enable updating it. (Must also update _PLATFORM_MAP.) _PLATFORMS_TO_UPDATE = ['mac_x86_64', 'win_x86', 'win_AMD64', 'linux_x86_64', 'android_k_armeabi-v7a', 'android_l_arm64-v8a', 'android_l_armeabi-v7a', 'android_n_armeabi-v7a', - 'android_n_arm64-v8a'] + 'android_n_arm64-v8a' ] + +# Add platforms here if you also want to update chromium binary for it. +# Must add chromium_info for it in _PLATFORM_MAP. +_CHROMIUM_PLATFORMS = ['mac_x86_64', 'win_x86', 'win_AMD64', 'linux_x86_64'] # Remove a channel name from this list to disable updating it. # Add one to enable updating it. @@ -57,65 +63,78 @@ def BuildNotFoundError(error_string): 'dev': ['linux'], 'canary': ['mac', 'win']} -# Chromium binaries are only updated for linux builds. -# See https://crbug.com/973847 for context. -_CHROMIUM_PLATFORM = 'linux_x86_64' -_CHROMIUM_CHANNELS_TO_UPDATE = ['stable', 'dev'] -_CHROMIUM_GS_BUCKET = 'chromium-browser-snapshots' -_CHROMIUM_GS_BUILD_DIR = 'Linux_x64' -_CHROMIUM_ZIP_NAME = 'chrome-linux.zip' -# How many commit positions to search below and above omaha branch position to -# find closest chromium build snapshot. The value 10 is chosen because it looks -# more than sufficient from manual inspection of the bucket. -_CHROMIUM_SNAPSHOT_SEARCH_WINDOW = 10 - # All of the information we need to update each platform. # omaha: name omaha uses for the platforms. # zip_name: name of the zip file to be retrieved from cloud storage. # gs_build: name of the Chrome build platform used in cloud storage. +# chromium_info: information needed to update chromium (optional). # destination: Name of the folder to download the reference build to. UpdateInfo = collections.namedtuple('UpdateInfo', - 'omaha, gs_folder, gs_build, zip_name') + 'omaha, gs_folder, gs_build, chromium_info, zip_name') +# build_dir: name of the build directory in _CHROMIUM_GS_BUCKET. +# zip_name: name of the zip file to be retrieved from cloud storage. +ChromiumInfo = collections.namedtuple('ChromiumInfo', 'build_dir, zip_name') _PLATFORM_MAP = {'mac_x86_64': UpdateInfo(omaha='mac', gs_folder='desktop-*', gs_build='mac64', + chromium_info=ChromiumInfo( + build_dir='Mac', + zip_name='chrome-mac.zip'), zip_name='chrome-mac.zip'), 'win_x86': UpdateInfo(omaha='win', gs_folder='desktop-*', gs_build='win-clang', + chromium_info=ChromiumInfo( + build_dir='Win', + zip_name='chrome-win.zip'), zip_name='chrome-win-clang.zip'), 'win_AMD64': UpdateInfo(omaha='win', gs_folder='desktop-*', gs_build='win64-clang', + chromium_info=ChromiumInfo( + build_dir='Win_x64', + zip_name='chrome-win.zip'), zip_name='chrome-win64-clang.zip'), 'linux_x86_64': UpdateInfo(omaha='linux', gs_folder='desktop-*', gs_build='linux64', + chromium_info=ChromiumInfo( + build_dir='Linux_x64', + zip_name='chrome-linux.zip'), zip_name='chrome-linux64.zip'), 'android_k_armeabi-v7a': UpdateInfo(omaha='android', gs_folder='android-*', gs_build='arm', + chromium_info=None, zip_name='Chrome.apk'), 'android_l_arm64-v8a': UpdateInfo(omaha='android', gs_folder='android-*', gs_build='arm_64', + chromium_info=None, zip_name='ChromeModern.apk'), 'android_l_armeabi-v7a': UpdateInfo(omaha='android', gs_folder='android-*', gs_build='arm', + chromium_info=None, zip_name='Chrome.apk'), 'android_n_armeabi-v7a': UpdateInfo(omaha='android', gs_folder='android-*', gs_build='arm', + chromium_info=None, zip_name='Monochrome.apk'), 'android_n_arm64-v8a': UpdateInfo(omaha='android', gs_folder='android-*', gs_build='arm_64', + chromium_info=None, zip_name='Monochrome.apk'), } +VersionInfo = collections.namedtuple('VersionInfo', + 'version, branch_base_position') + + def _ChannelVersionsMap(channel): rows = _OmahaReportVersionInfo(channel) omaha_versions_map = _OmahaVersionsMap(rows, channel) @@ -136,7 +155,8 @@ def _OmahaReportVersionInfo(channel): def _OmahaVersionsMap(rows, channel): platforms = _OMAHA_PLATFORMS.get(channel, []) if (len(rows) < 1 or - not rows[0][0:3] == ['os', 'channel', 'current_version']): + rows[0][0:3] != ['os', 'channel', 'current_version'] or + rows[0][7] != 'branch_base_position'): raise ValueError( 'Omaha report is not in the expected form: %s.' % rows) versions_map = {} @@ -145,78 +165,29 @@ def _OmahaVersionsMap(rows, channel): raise ValueError( 'Omaha report contains a line with the channel %s' % row[1]) if row[0] in platforms: - versions_map[row[0]] = row[2] + versions_map[row[0]] = VersionInfo(version=row[2], + branch_base_position=int(row[7])) logging.warn('versions map: %s' % versions_map) if not all(platform in versions_map for platform in platforms): raise ValueError( - 'Omaha report did not contain all desired platforms for channel %s' % channel) + 'Omaha report did not contain all desired platforms ' + 'for channel %s' % channel) return versions_map -def _QueuePlatformUpdate(platform, version, config, channel): - """ platform: the name of the platform for the browser to - be downloaded & updated from cloud storage. """ - platform_info = _PLATFORM_MAP[platform] - filename = platform_info.zip_name - # remote_path example: desktop-*/30.0.1595.0/precise32/chrome-precise32.zip - remote_path = '%s/%s/%s/%s' % ( - platform_info.gs_folder, version, platform_info.gs_build, filename) - if not cloud_storage.Exists(CHROME_GS_BUCKET, remote_path): - cloud_storage_path = 'gs://%s/%s' % (CHROME_GS_BUCKET, remote_path) - raise BuildNotFoundError( - 'Failed to find %s build for version %s at path %s.' % ( - platform, version, cloud_storage_path)) - reference_builds_folder = os.path.join( - os.path.dirname(os.path.abspath(__file__)), 'chrome_telemetry_build', - 'reference_builds', channel) - if not os.path.exists(reference_builds_folder): - os.makedirs(reference_builds_folder) - local_dest_path = os.path.join(reference_builds_folder, filename) - cloud_storage.Get(CHROME_GS_BUCKET, remote_path, local_dest_path) - _ModifyBuildIfNeeded(local_dest_path, platform) - config.AddCloudStorageDependencyUpdateJob( - 'chrome_%s' % channel, platform, local_dest_path, version=version, - execute_job=False) +RemotePath = collections.namedtuple('RemotePath', 'bucket, path') -def _ModifyBuildIfNeeded(location, platform): - """Hook to modify the build before saving it for Telemetry to use. - - This can be used to remove various utilities that cause noise in a - test environment. Right now, it is just used to remove Keystone, - which is a tool used to autoupdate Chrome. - """ - if platform == 'mac_x86_64': - _RemoveKeystoneFromBuild(location) - return - - if 'mac' in platform: - raise NotImplementedError( - 'Platform <%s> sounds like it is an OSX version. If so, we may need to ' - 'remove Keystone from it per crbug.com/932615. Please edit this script' - ' and teach it what needs to be done :).') +def _ResolveChromeRemotePath(platform_info, version_info): + # Path example: desktop-*/30.0.1595.0/precise32/chrome-precise32.zip + return RemotePath(bucket=_CHROME_GS_BUCKET, + path=('%s/%s/%s/%s' % (platform_info.gs_folder, + version_info.version, + platform_info.gs_build, + platform_info.zip_name))) -def _RemoveKeystoneFromBuild(location): - """Removes the Keystone autoupdate binary from the chrome mac zipfile.""" - logging.info('Removing keystone from mac build at %s' % location) - temp_folder = tempfile.mkdtemp(prefix='RemoveKeystoneFromBuild') - try: - subprocess.check_call(['unzip', '-q', location, '-d', temp_folder]) - keystone_folder = os.path.join( - temp_folder, 'chrome-mac', 'Google Chrome.app', 'Contents', - 'Frameworks', 'Google Chrome Framework.framework', 'Frameworks', - 'KeystoneRegistration.framework') - shutil.rmtree(keystone_folder) - os.remove(location) - subprocess.check_call(['zip', '--quiet', '--recurse-paths', '--symlinks', - location, 'chrome-mac'], - cwd=temp_folder) - finally: - shutil.rmtree(temp_folder) - - -def _FindClosestChromiumSnapshot(base_position): +def _FindClosestChromiumSnapshot(base_position, build_dir): """Returns the closest chromium snapshot available in cloud storage. Chromium snapshots are pulled from _CHROMIUM_BUILD_DIR in CHROMIUM_GS_BUCKET. @@ -239,7 +210,7 @@ def _FindClosestChromiumSnapshot(base_position): available_positions = [] for position_prefix in range(min_position_prefix, max_position_prefix + 1): - query = '%s/%d*' % (_CHROMIUM_GS_BUILD_DIR, position_prefix) + query = '%s/%d*' % (build_dir, position_prefix) try: ls_results = cloud_storage.ListDirs(_CHROMIUM_GS_BUCKET, query) except cloud_storage.NotFoundError: @@ -248,12 +219,12 @@ def _FindClosestChromiumSnapshot(base_position): continue for entry in ls_results: - # entry looks like "/Linux_x64/${commit_position}/". + # entry looks like '/Linux_x64/${commit_position}/'. position = int(entry.split('/')[2]) available_positions.append(position) if len(available_positions) == 0: - raise ValueError("No chromium build found +/-%d commit positions of %d" % + raise ValueError('No chromium build found +/-%d commit positions of %d' % (_CHROMIUM_SNAPSHOT_SEARCH_WINDOW, base_position)) distance_function = lambda position: abs(position - base_position) @@ -261,73 +232,125 @@ def _FindClosestChromiumSnapshot(base_position): return min_distance_snapshot -def _GetLinuxOmahaInfo(): - """Returns a dict of channel -> its omaha info on linux as a csv dict. """ - url = 'https://omahaproxy.appspot.com/all?os=linux' - reader = csv.DictReader(urllib2.urlopen(url)) - channel_to_info_map = {} - for row in reader: - channel_to_info_map[row['channel']] = row - return channel_to_info_map +def _ResolveChromiumRemotePath(channel, platform, version_info): + platform_info = _PLATFORM_MAP[platform] + branch_base_position = version_info.branch_base_position + omaha_version = version_info.version + build_dir = platform_info.chromium_info.build_dir + # Look through chromium-browser-snapshots for closest match. + closest_snapshot = _FindClosestChromiumSnapshot( + branch_base_position, build_dir) + if closest_snapshot != branch_base_position: + print ('Channel %s corresponds to commit position ' % channel + + '%d on %s, ' % (branch_base_position, platform) + + 'but closest chromium snapshot available on ' + + '%s is %d' % (_CHROMIUM_GS_BUCKET, closest_snapshot)) + return RemotePath(bucket=_CHROMIUM_GS_BUCKET, + path = ('%s/%s/%s' % (build_dir, closest_snapshot, + platform_info.chromium_info.zip_name))) + + +def _QueuePlatformUpdate(binary, platform, version_info, config, channel): + """ platform: the name of the platform for the browser to + be downloaded & updated from cloud storage. """ + platform_info = _PLATFORM_MAP[platform] + if binary == 'chrome': + remote_path = _ResolveChromeRemotePath(platform_info, version_info) + elif binary == 'chromium': + remote_path = _ResolveChromiumRemotePath(channel, platform, version_info) + else: + raise ValueError('binary must be \'chrome\' or \'chromium\'') + + if not cloud_storage.Exists(remote_path.bucket, remote_path.path): + cloud_storage_path = 'gs://%s/%s' % (remote_path.bucket, remote_path.path) + logging.warn('Failed to find %s build for version %s at path %s.' % ( + platform, version_info.version, cloud_storage_path)) + logging.warn('Skipping this update for this platform/channel.') + return + + reference_builds_folder = os.path.join( + os.path.dirname(os.path.abspath(__file__)), 'chrome_telemetry_build', + 'reference_builds', binary, channel) + if not os.path.exists(reference_builds_folder): + os.makedirs(reference_builds_folder) + local_dest_path = os.path.join(reference_builds_folder, + platform, + platform_info.zip_name) + cloud_storage.Get(remote_path.bucket, remote_path.path, local_dest_path) + _ModifyBuildIfNeeded(binary, local_dest_path, platform) + config.AddCloudStorageDependencyUpdateJob('%s_%s' % (binary, channel), + platform, local_dest_path, version=version_info.version, + execute_job=False) + + +def _ModifyBuildIfNeeded(binary, location, platform): + """Hook to modify the build before saving it for Telemetry to use. + + This can be used to remove various utilities that cause noise in a + test environment. Right now, it is just used to remove Keystone, + which is a tool used to autoupdate Chrome. + """ + if binary != 'chrome': + return + + if platform == 'mac_x86_64': + _RemoveKeystoneFromBuild(location) + return + + if 'mac' in platform: + raise NotImplementedError( + 'Platform <%s> sounds like it is an OSX version. If so, we may need to ' + 'remove Keystone from it per crbug.com/932615. Please edit this script' + ' and teach it what needs to be done :).') + + +def _RemoveKeystoneFromBuild(location): + """Removes the Keystone autoupdate binary from the chrome mac zipfile.""" + logging.info('Removing keystone from mac build at %s' % location) + temp_folder = tempfile.mkdtemp(prefix='RemoveKeystoneFromBuild') + try: + subprocess.check_call(['unzip', '-q', location, '-d', temp_folder]) + keystone_folder = os.path.join( + temp_folder, 'chrome-mac', 'Google Chrome.app', 'Contents', + 'Frameworks', 'Google Chrome Framework.framework', 'Frameworks', + 'KeystoneRegistration.framework') + shutil.rmtree(keystone_folder) + os.remove(location) + subprocess.check_call(['zip', '--quiet', '--recurse-paths', '--symlinks', + location, 'chrome-mac'], + cwd=temp_folder) + finally: + shutil.rmtree(temp_folder) -def _UpdateChromiumLinuxBuilds(config): - omaha_info = _GetLinuxOmahaInfo() - for channel in _CHROMIUM_CHANNELS_TO_UPDATE: - if channel not in omaha_info: - raise ValueError( - 'Omaha report did not contain linux information for channel %s' - % channel) - - branch_base_position = int(omaha_info[channel]['branch_base_position']) - current_version = config.GetVersion( - 'chromium_%s' % channel, _CHROMIUM_PLATFORM) - omaha_version = omaha_info[channel]['current_version'] - if current_version and current_version == omaha_version: - print 'Chromium %s channel is already up to date.' % channel - continue - # Look through chromium-browser-snapshots for closest match. - closest_snapshot = _FindClosestChromiumSnapshot(branch_base_position) - if closest_snapshot != branch_base_position: - print ('Channel %s corresponds to commit position ' % channel + - '%d on %s, ' % (branch_base_position, _CHROMIUM_PLATFORM) + - 'but closest chromium snapshot available on ' + - '%s is %d' % (_CHROMIUM_GS_BUCKET, closest_snapshot)) - - remote_path = '%s/%s/%s' % (_CHROMIUM_GS_BUILD_DIR, closest_snapshot, - _CHROMIUM_ZIP_NAME) - reference_builds_folder = os.path.join( - os.path.dirname(os.path.abspath(__file__)), 'chrome_telemetry_build', - 'reference_builds', 'chromium_' + channel) - if not os.path.exists(reference_builds_folder): - os.makedirs(reference_builds_folder) - local_path = os.path.join(reference_builds_folder, _CHROMIUM_ZIP_NAME) - - print 'Downloading Chromium at %d on linux_x86_64' % (closest_snapshot) - cloud_storage.Get(_CHROMIUM_GS_BUCKET, remote_path, local_path) - config.AddCloudStorageDependencyUpdateJob( - 'chromium_%s' % channel, 'linux_x86_64', local_path, - version=omaha_version, execute_job=False) +def _NeedsUpdate(config, binary, channel, platform, version_info): + channel_version = version_info.version + print 'Checking %s (%s channel) on %s' % (binary, channel, platform) + current_version = config.GetVersion('%s_%s' % (binary, channel), platform) + print 'current: %s, channel: %s' % (current_version, channel_version) + if current_version and current_version == channel_version: + print 'Already up to date.' + return False + return True def UpdateBuilds(args): config = base_config.BaseConfig(_CHROME_BINARIES_CONFIG, writable=True) - if args.update_chrome: - logging.info("Preparing updates for chrome builds.") - for channel in _CHANNELS_TO_UPDATE: - channel_versions_map = _ChannelVersionsMap(channel) - for platform in channel_versions_map: - print 'Downloading Chrome (%s channel) on %s' % (channel, platform) - current_version = config.GetVersion('chrome_%s' % channel, platform) - channel_version = channel_versions_map.get(platform) - print 'current: %s, channel: %s' % (current_version, channel_version) - if current_version and current_version == channel_version: - continue - _QueuePlatformUpdate(platform, channel_version, config, channel) - if args.update_chromium: - logging.info("Preparing updates for chromium builds.") - _UpdateChromiumLinuxBuilds(config) + for channel in _CHANNELS_TO_UPDATE: + channel_versions_map = _ChannelVersionsMap(channel) + for platform in channel_versions_map: + version_info = channel_versions_map.get(platform) + if args.update_chrome: + if _NeedsUpdate(config, 'chrome', channel, platform, version_info): + _QueuePlatformUpdate('chrome', platform, version_info, config, + channel) + if args.update_chromium and platform in _CHROMIUM_PLATFORMS: + if _NeedsUpdate(config, 'chromium', channel, platform, version_info): + _QueuePlatformUpdate('chromium', platform, version_info, + config, channel) + + print 'Updating builds with downloaded binaries' config.ExecuteUpdateJobs(force=True) diff --git a/common/py_utils/py_utils/chrome_binaries.json b/common/py_utils/py_utils/chrome_binaries.json index 00e4f63a2e..66caf0c908 100644 --- a/common/py_utils/py_utils/chrome_binaries.json +++ b/common/py_utils/py_utils/chrome_binaries.json @@ -6,22 +6,22 @@ "cloud_storage_bucket": "chrome-telemetry", "file_info": { "mac_x86_64": { - "cloud_storage_hash": "381a491e14ab523b8db4cdf3c993713678237af8", + "cloud_storage_hash": "805bcd36abcadd252fc433b1b440edc9c1d1abaf", "download_path": "bin/reference_builds/chrome-mac64.zip", "path_within_archive": "chrome-mac/Google Chrome.app/Contents/MacOS/Google Chrome", - "version_in_cs": "77.0.3822.0" + "version_in_cs": "80.0.3951.6" }, "win_AMD64": { - "cloud_storage_hash": "600ee522c410efe1de2f593c0efc32ae113a7d99", + "cloud_storage_hash": "7ad4063151c506f73b350665ca8e189e18565a3a", "download_path": "bin\\reference_build\\chrome-win64-clang.zip", "path_within_archive": "chrome-win64-clang\\chrome.exe", - "version_in_cs": "77.0.3822.0" + "version_in_cs": "80.0.3951.4" }, "win_x86": { - "cloud_storage_hash": "5b79a181bfbd94d8288529b0da1defa3ef097197", + "cloud_storage_hash": "9ccaf1f26fc1f9d4e46258504a2de0f3808c1cf0", "download_path": "bin\\reference_build\\chrome-win32-clang.zip", "path_within_archive": "chrome-win32-clang\\chrome.exe", - "version_in_cs": "77.0.3822.0" + "version_in_cs": "80.0.3951.4" } } }, @@ -30,10 +30,10 @@ "cloud_storage_bucket": "chrome-telemetry", "file_info": { "linux_x86_64": { - "cloud_storage_hash": "61d68a6b00f25c964f5162f5251962468c886f3a", + "cloud_storage_hash": "0db52435a728bbb0343791e275efd52904d059d6", "download_path": "bin/reference_build/chrome-linux64.zip", "path_within_archive": "chrome-linux64/chrome", - "version_in_cs": "76.0.3809.21" + "version_in_cs": "79.0.3945.16" } } }, @@ -42,53 +42,77 @@ "cloud_storage_bucket": "chrome-telemetry", "file_info": { "android_k_armeabi-v7a": { - "cloud_storage_hash": "28b913c720d56a30c092625c7862f00175a316c7", + "cloud_storage_hash": "fcd18925f0929d38273c860e6fa4d1c3064b2037", "download_path": "bin/reference_build/android_k_armeabi-v7a/ChromeStable.apk", - "version_in_cs": "75.0.3770.67" + "version_in_cs": "78.0.3904.62" }, "android_l_arm64-v8a": { - "cloud_storage_hash": "4b953c33c61f94c2198e8001d0d8142c6504a875", + "cloud_storage_hash": "f2a8fd07fa7e082835a3c4ac228e66dc0dc89fee", "download_path": "bin/reference_build/android_l_arm64-v8a/ChromeStable.apk", - "version_in_cs": "75.0.3770.67" + "version_in_cs": "78.0.3904.62" }, "android_l_armeabi-v7a": { - "cloud_storage_hash": "28b913c720d56a30c092625c7862f00175a316c7", + "cloud_storage_hash": "fcd18925f0929d38273c860e6fa4d1c3064b2037", "download_path": "bin/reference_build/android_l_armeabi-v7a/ChromeStable.apk", - "version_in_cs": "75.0.3770.67" + "version_in_cs": "78.0.3904.62" }, "android_n_arm64-v8a": { - "cloud_storage_hash": "84152ba8f7a25cacc79d588ed827ea75f0e4ab94", + "cloud_storage_hash": "46943be19af7dd4dd70930d1838e7058a4a91235", "download_path": "bin/reference_build/android_n_arm64-v8a/Monochrome.apk", - "version_in_cs": "75.0.3770.67" + "version_in_cs": "78.0.3904.62" }, "android_n_armeabi-v7a": { - "cloud_storage_hash": "656bb9e3982d0d35decd5347ced2c320a7267f33", + "cloud_storage_hash": "628c0a492ac8c465b6da47909b3d1c92769da771", "download_path": "bin/reference_build/android_n_armeabi-v7a/Monochrome.apk", - "version_in_cs": "75.0.3770.67" + "version_in_cs": "78.0.3904.62" }, "linux_x86_64": { - "cloud_storage_hash": "dee8469e8dcd8453efd33f3a00d7ea302a126a4b", + "cloud_storage_hash": "6428da5968a0e69b84ee4525f8886517a45e4c92", "download_path": "bin/reference_build/chrome-linux64.zip", "path_within_archive": "chrome-linux64/chrome", - "version_in_cs": "75.0.3770.80" + "version_in_cs": "78.0.3904.70" }, "mac_x86_64": { - "cloud_storage_hash": "16a43a1e794bb99ec1ebcd40569084985b3c6626", + "cloud_storage_hash": "40096f095b8f8b3694219c23b3f7254a60ca35e0", "download_path": "bin/reference_builds/chrome-mac64.zip", "path_within_archive": "chrome-mac/Google Chrome.app/Contents/MacOS/Google Chrome", - "version_in_cs": "75.0.3770.80" + "version_in_cs": "78.0.3904.70" }, "win_AMD64": { - "cloud_storage_hash": "1ec52bd4164f2d93c53113a093dae9e041eb2d73", + "cloud_storage_hash": "7fcc267926ac55afe6fc28bc14eb252c98e20e08", "download_path": "bin\\reference_build\\chrome-win64-clang.zip", "path_within_archive": "chrome-win64-clang\\chrome.exe", - "version_in_cs": "75.0.3770.80" + "version_in_cs": "78.0.3904.70" }, "win_x86": { - "cloud_storage_hash": "0f9eb991ba618dc61f2063ea252f44be94c2252e", + "cloud_storage_hash": "d6fdf2a4858bf9ddcbdb97b29b68863dfa3574f7", "download_path": "bin\\reference_build\\chrome-win-clang.zip", "path_within_archive": "chrome-win-clang\\chrome.exe", - "version_in_cs": "75.0.3770.80" + "version_in_cs": "78.0.3904.70" + } + } + }, + "chromium_canary": { + "cloud_storage_base_folder": "binary_dependencies", + "cloud_storage_bucket": "chrome-telemetry", + "file_info": { + "mac_x86_64": { + "cloud_storage_hash": "6502438babd29256ae0407c818123d7d25b439c4", + "download_path": "bin/reference_builds/chrome-mac.zip", + "path_within_archive": "chrome-mac/Chromium.app/Contents/MacOS/Chromium", + "version_in_cs": "80.0.3951.6" + }, + "win_AMD64": { + "cloud_storage_hash": "e177a29aa1bc1d86dae31fc80bca293011e8ff51", + "download_path": "bin\\reference_build\\chrome-win.zip", + "path_within_archive": "chrome-win\\chrome.exe", + "version_in_cs": "80.0.3951.4" + }, + "win_x86": { + "cloud_storage_hash": "4f1bfd18c5cc386cb966ab48bf174d34ae9596ee", + "download_path": "bin\\reference_build\\chrome-win32-clang.zip", + "path_within_archive": "chrome-win32-clang\\chrome.exe", + "version_in_cs": "80.0.3951.4" } } }, @@ -97,10 +121,10 @@ "cloud_storage_bucket": "chrome-telemetry", "file_info": { "linux_x86_64": { - "cloud_storage_hash": "af3848b34d925ce9d13ea91ce416e0fd46a1a102", - "download_path": "bin/reference_build/chromium-linux.zip", + "cloud_storage_hash": "5821ab5c8693c87b5a02c2684bb45e08ba901960", + "download_path": "bin/reference_build/chrome-linux.zip", "path_within_archive": "chrome-linux/chrome", - "version_in_cs": "79.0.3921.0" + "version_in_cs": "79.0.3945.16" } } }, @@ -109,12 +133,30 @@ "cloud_storage_bucket": "chrome-telemetry", "file_info": { "linux_x86_64": { - "cloud_storage_hash": "c3d855e65c71eec418c6caf21184e73c90c86aa0", - "download_path": "bin/reference_build/chromium-linux.zip", + "cloud_storage_hash": "eb82b5d41759b6eeb2e61ef1a702be31aadf71c5", + "download_path": "bin/reference_build/chrome-linux.zip", "path_within_archive": "chrome-linux/chrome", - "version_in_cs": "77.0.3865.90" + "version_in_cs": "78.0.3904.70" + }, + "mac_x86_64": { + "cloud_storage_hash": "0eb8d99f6ea6e1ff5bd9607d5be3e0eb29a9a497", + "download_path": "bin/reference_builds/chrome-mac.zip", + "path_within_archive": "chrome-mac/Chromium.app/Contents/MacOS/Chromium", + "version_in_cs": "78.0.3904.70" + }, + "win_AMD64": { + "cloud_storage_hash": "c28d9e5bd2229164731fc7725293e361d9a850df", + "download_path": "bin\\reference_build\\chrome-win.zip", + "path_within_archive": "chrome-win\\chrome.exe", + "version_in_cs": "78.0.3904.70" + }, + "win_x86": { + "cloud_storage_hash": "5af50c744ace488341a79e5f8d208ddaee04c5e7", + "download_path": "bin\\reference_build\\chrome-win-clang.zip", + "path_within_archive": "chrome-win-clang\\chrome.exe", + "version_in_cs": "78.0.3904.70" } } } } -} +} \ No newline at end of file diff --git a/dashboard/cron.yaml b/dashboard/cron.yaml index 8aff23491e..d4f09963d7 100644 --- a/dashboard/cron.yaml +++ b/dashboard/cron.yaml @@ -6,6 +6,10 @@ cron: url: /internal/cron/ts_mon/send schedule: every 1 minutes +- description: Fix sheriff configs that get out of sync from tests + url: /cron/update_sheriff + schedule: every 48 hours + - description: Mark alerts as recovered. url: /mark_recovered_alerts schedule: every 6 hours diff --git a/dashboard/dashboard/add_histograms_queue.py b/dashboard/dashboard/add_histograms_queue.py index cc234da16e..a80263e6c6 100644 --- a/dashboard/dashboard/add_histograms_queue.py +++ b/dashboard/dashboard/add_histograms_queue.py @@ -40,6 +40,7 @@ reserved_infos.CATAPULT_REVISIONS.name: 'r_catapult_git', reserved_infos.ANGLE_REVISIONS.name: 'r_angle_git', reserved_infos.WEBRTC_REVISIONS.name: 'r_webrtc_git', + reserved_infos.WEBRTC_INTERNAL_REVISIONS.name: 'r_webrtc_internal_cl', reserved_infos.FUCHSIA_GARNET_REVISIONS.name: 'r_fuchsia_garnet_git', reserved_infos.FUCHSIA_PERIDOT_REVISIONS.name: 'r_fuchsia_peridot_git', reserved_infos.FUCHSIA_TOPAZ_REVISIONS.name: 'r_fuchsia_topaz_git', @@ -198,10 +199,6 @@ def _AddRowsFromData(params, revision, parent_test, legacy_parent_tests): data_dict = params['data'] test_key = parent_test.key - all_tests = [parent_test] + legacy_parent_tests.values() - yield [a.UpdateSheriffAsync() for a in all_tests] - yield ndb.put_multi_async(all_tests) - stat_names_to_test_keys = {k: v.key for k, v in legacy_parent_tests.items()} rows = CreateRowEntities( diff --git a/dashboard/dashboard/add_histograms_test.py b/dashboard/dashboard/add_histograms_test.py index f760653a83..a0d51f027e 100644 --- a/dashboard/dashboard/add_histograms_test.py +++ b/dashboard/dashboard/add_histograms_test.py @@ -244,41 +244,6 @@ def testPost_Succeeds(self, mock_process_test, mock_graph_revisions): mock_graph_revisions.assert_called_once_with(mock.ANY) self.assertEqual(len(mock_graph_revisions.mock_calls[0][1][0]), len(rows)) - @mock.patch.object( - add_histograms_queue.graph_revisions, 'AddRowsToCacheAsync', - mock.MagicMock()) - @mock.patch.object( - add_histograms_queue.find_anomalies, 'ProcessTestsAsync', - mock.MagicMock()) - def testPost_Succeeds_SheriffUpdated(self): - hs = _CreateHistogram( - master='master', bot='bot', benchmark='benchmark', commit_position=123, - benchmark_description='Benchmark description.', samples=[1, 2, 3]) - data = json.dumps(hs.AsDicts()) - - self.PostAddHistogram({'data': data}) - self.ExecuteTaskQueueTasks('/add_histograms_queue', - add_histograms.TASK_QUEUE_NAME) - - t = utils.TestKey('master/bot/benchmark/hist').get() - self.assertIsNone(t.sheriff) - - sheriff.Sheriff( - id='my_sheriff1', email='a@chromium.org', patterns=[ - '*/*/*/hist', '*/*/*/hist_avg']).put() - - hs = _CreateHistogram( - master='master', bot='bot', benchmark='benchmark', commit_position=124, - benchmark_description='Benchmark description.', samples=[1, 2, 3]) - data = json.dumps(hs.AsDicts()) - - self.PostAddHistogram({'data': data}) - self.ExecuteTaskQueueTasks('/add_histograms_queue', - add_histograms.TASK_QUEUE_NAME) - - t = utils.TestKey('master/bot/benchmark/hist').get() - self.assertIsNotNone(t.sheriff) - @mock.patch.object( add_histograms_queue.graph_revisions, 'AddRowsToCacheAsync') @mock.patch.object(add_histograms_queue.find_anomalies, 'ProcessTestsAsync') diff --git a/dashboard/dashboard/add_point.py b/dashboard/dashboard/add_point.py index cd85a0ba2a..7dd5034744 100644 --- a/dashboard/dashboard/add_point.py +++ b/dashboard/dashboard/add_point.py @@ -18,6 +18,7 @@ from google.appengine.ext import ndb from dashboard import post_data_handler +from dashboard.api import api_auth from dashboard.common import datastore_hooks from dashboard.common import histogram_helpers from dashboard.common import math_utils @@ -125,7 +126,14 @@ def post(self): """ datastore_hooks.SetPrivilegedRequest() if not self._CheckIpAgainstWhitelist(): - return + try: + api_auth.Authorize() + except api_auth.ApiAuthException as error: + logging.error('Auth error: %s', error) + self.ReportError( + 'IP address %s not in IP whitelist!' % self.request.remote_addr, + 403) + return data_str = self.request.get('data') if not data_str: @@ -378,8 +386,8 @@ def _FlattenTrace(test_suite_name, chart_name, trace_name, trace, BadRequestError: The data wasn't valid. """ if '@@' in chart_name: - tir_label, chart_name = chart_name.split('@@') - chart_name = chart_name + '/' + tir_label + grouping_label, chart_name = chart_name.split('@@') + chart_name = chart_name + '/' + grouping_label value, error = _ExtractValueAndError(trace) diff --git a/dashboard/dashboard/add_point_test.py b/dashboard/dashboard/add_point_test.py index 2146f73df9..891034cfd5 100644 --- a/dashboard/dashboard/add_point_test.py +++ b/dashboard/dashboard/add_point_test.py @@ -21,6 +21,7 @@ from dashboard import add_point from dashboard import add_point_queue from dashboard import units_to_direction +from dashboard.api import api_auth from dashboard.common import layered_cache from dashboard.common import testing_common from dashboard.common import utils @@ -186,6 +187,45 @@ def setUp(self): testing_common.SetIpWhitelist([_WHITELISTED_IP]) self.SetCurrentUser('foo@bar.com', is_admin=True) + @mock.patch.object(add_point_queue.find_anomalies, 'ProcessTestsAsync') + def testPost_OauthUser_Authorized(self, mock_process_test): + sheriff.Sheriff( + id='my_sheriff1', email='a@chromium.org', patterns=['*/*/*/dom']).put() + data_param = json.dumps([ + { + 'master': 'ChromiumPerf', + 'bot': 'win7', + 'test': 'dromaeo/dom', + 'revision': 12345, + 'value': 22.4, + 'error': 1.23, + 'supplemental_columns': { + 'r_webkit': 1355, + 'a_extra': 'hello', + 'd_median': 22.2, + }, + }, + { + 'master': 'ChromiumPerf', + 'bot': 'win7', + 'test': 'dromaeo/jslib', + 'revision': 12345, + 'value': 44.3, + } + ]) + self.SetCurrentUserOAuth(testing_common.INTERNAL_USER) + self.SetCurrentClientIdOAuth(api_auth.OAUTH_CLIENT_ID_WHITELIST[0]) + self.Post('/add_point', {'data': data_param}) + self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) + + # Verify everything was added to the database correctly + rows = graph_data.Row.query().fetch(limit=_FETCH_LIMIT) + self.assertEqual(2, len(rows)) + + # Verify that an anomaly processing was called. + tests = graph_data.TestMetadata.query().fetch(limit=_FETCH_LIMIT) + mock_process_test.assert_called_once_with([tests[1].key]) + @mock.patch.object(add_point_queue.find_anomalies, 'ProcessTestsAsync') def testPost(self, mock_process_test): """Tests all basic functionality of a POST request.""" @@ -1536,13 +1576,13 @@ def testFlattenTrace_SanitizesTraceName(self): 'foo', 'bar', 'http://example.com', trace) self.assertEqual(row['test'], 'foo/bar/http___example.com') - def testFlattenTrace_FlattensInteractionRecordLabelToFivePartName(self): - """Tests whether a TIR label will appear between chart and trace name.""" + def testFlattenTrace_FlattensGroupingLabelToFivePartName(self): + """Tests whether a grouping label appears between chart and trace name.""" trace = self._SampleTrace() trace.update({ 'name': 'bar', 'page': 'https://abc.xyz/', - 'tir_label': 'baz' + 'grouping_label': 'baz' }) row = add_point._FlattenTrace('foo', 'baz@@bar', 'https://abc.xyz/', trace) self.assertEqual(row['test'], 'foo/bar/baz/https___abc.xyz_') diff --git a/dashboard/dashboard/common/histogram_helpers.py b/dashboard/dashboard/common/histogram_helpers.py index 74cfc23ed5..75009dca16 100644 --- a/dashboard/dashboard/common/histogram_helpers.py +++ b/dashboard/dashboard/common/histogram_helpers.py @@ -102,7 +102,7 @@ def ComputeTestPath(hist): is_summary = list( hist.diagnostics.get(reserved_infos.SUMMARY_KEYS.name, [])) - tir_label = GetTIRLabelFromHistogram(hist) + grouping_label = GetGroupingLabelFromHistogram(hist) is_ref = hist.diagnostics.get(reserved_infos.IS_REFERENCE_BUILD.name) if is_ref and len(is_ref) == 1: @@ -115,17 +115,18 @@ def ComputeTestPath(hist): story_name = None return ComputeTestPathFromComponents( - hist.name, tir_label=tir_label, story_name=story_name, + hist.name, grouping_label=grouping_label, story_name=story_name, is_summary=is_summary, is_ref=is_ref) def ComputeTestPathFromComponents( - hist_name, tir_label=None, story_name=None, is_summary=None, is_ref=False): + hist_name, grouping_label=None, story_name=None, is_summary=None, + is_ref=False): path = hist_name - if tir_label and ( + if grouping_label and ( not is_summary or reserved_infos.STORY_TAGS.name in is_summary): - path += '/' + tir_label + path += '/' + grouping_label if story_name and not is_summary: escaped_story_name = EscapeName(story_name) @@ -138,7 +139,7 @@ def ComputeTestPathFromComponents( return path -def GetTIRLabelFromHistogram(hist): +def GetGroupingLabelFromHistogram(hist): tags = hist.diagnostics.get(reserved_infos.STORY_TAGS.name) or [] tags_to_use = [t.split(':') for t in tags if ':' in t] diff --git a/dashboard/dashboard/common/histogram_helpers_test.py b/dashboard/dashboard/common/histogram_helpers_test.py index 458d3b9bc1..b90afad450 100644 --- a/dashboard/dashboard/common/histogram_helpers_test.py +++ b/dashboard/dashboard/common/histogram_helpers_test.py @@ -19,19 +19,19 @@ class HistogramHelpersTest(testing_common.TestCase): def setUp(self): super(HistogramHelpersTest, self).setUp() - def testGetTIRLabelFromHistogram_NoTags_ReturnsEmpty(self): + def testGetGroupingLabelFromHistogram_NoTags_ReturnsEmpty(self): hist = histogram_module.Histogram('hist', 'count') - self.assertEqual('', histogram_helpers.GetTIRLabelFromHistogram(hist)) + self.assertEqual('', histogram_helpers.GetGroupingLabelFromHistogram(hist)) - def testGetTIRLabelFromHistogram_NoValidTags_ReturnsEmpty(self): + def testGetGroupingLabelFromHistogram_NoValidTags_ReturnsEmpty(self): hist = histogram_module.Histogram('hist', 'count') histograms = histogram_set.HistogramSet([hist]) histograms.AddSharedDiagnosticToAllHistograms( reserved_infos.STORY_TAGS.name, generic_set.GenericSet(['foo', 'bar'])) - self.assertEqual('', histogram_helpers.GetTIRLabelFromHistogram(hist)) + self.assertEqual('', histogram_helpers.GetGroupingLabelFromHistogram(hist)) - def testGetTIRLabelFromHistogram_ValidTags_SortsByKey(self): + def testGetGroupingLabelFromHistogram_ValidTags_SortsByKey(self): hist = histogram_module.Histogram('hist', 'count') histograms = histogram_set.HistogramSet([hist]) histograms.AddSharedDiagnosticToAllHistograms( @@ -39,7 +39,8 @@ def testGetTIRLabelFromHistogram_ValidTags_SortsByKey(self): generic_set.GenericSet( ['z:last', 'ignore', 'a:first', 'me', 'm:middle'])) self.assertEqual( - 'first_middle_last', histogram_helpers.GetTIRLabelFromHistogram(hist)) + 'first_middle_last', + histogram_helpers.GetGroupingLabelFromHistogram(hist)) def testComputeTestPathWithStory(self): hist = histogram_module.Histogram('hist', 'count') @@ -51,7 +52,7 @@ def testComputeTestPathWithStory(self): test_path = histogram_helpers.ComputeTestPath(hist) self.assertEqual('hist/http___story', test_path) - def testComputeTestPathWithTIRLabel(self): + def testComputeTestPathWithGroupingLabel(self): hist = histogram_module.Histogram('hist', 'count') histograms = histogram_set.HistogramSet([hist]) histograms.AddSharedDiagnosticToAllHistograms( diff --git a/dashboard/dashboard/common/utils.py b/dashboard/dashboard/common/utils.py index 9b9eaa5d19..ba823ed1cf 100644 --- a/dashboard/dashboard/common/utils.py +++ b/dashboard/dashboard/common/utils.py @@ -36,7 +36,7 @@ OAUTH_SCOPES = ( 'https://www.googleapis.com/auth/userinfo.email', ) -OAUTH_ENDPOINTS = ['/api/', '/add_histograms'] +OAUTH_ENDPOINTS = ['/api/', '/add_histograms', '/add_point'] _AUTOROLL_DOMAINS = ( 'chops-service-accounts.iam.gserviceaccount.com', @@ -293,14 +293,14 @@ class ParseTelemetryMetricFailed(Exception): def ParseTelemetryMetricParts(test_path): - """Parses a test path and returns the tir_label, measurement, and story. + """Parses a test path and returns the grouping_label, measurement, and story. Args: test_path_parts: A test path. Returns: - A tuple of (tir_label, measurement, story), or None if this doesn't appear - to be a telemetry test. + A tuple of (grouping_label, measurement, story), or None if this doesn't + appear to be a telemetry test. """ test_path_parts = test_path.split('/') metric_parts = test_path_parts[3:] @@ -312,7 +312,7 @@ def ParseTelemetryMetricParts(test_path): if len(metric_parts) == 2: return '', metric_parts[0], metric_parts[1] - # 3 part structure, so there's a TIR label in there. + # 3 part structure, so there's a grouping label in there. # ie. M/B/S/timeToFirstMeaningfulPaint_avg/load_tools/load_tools_weather if len(metric_parts) == 3: return metric_parts[1], metric_parts[0], metric_parts[2] diff --git a/dashboard/dashboard/cron_update_sheriff.py b/dashboard/dashboard/cron_update_sheriff.py new file mode 100644 index 0000000000..e4b159f03b --- /dev/null +++ b/dashboard/dashboard/cron_update_sheriff.py @@ -0,0 +1,63 @@ +# Copyright 2019 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +from __future__ import print_function +from __future__ import division +from __future__ import absolute_import + +from google.appengine.ext import deferred +from google.appengine.ext import ndb + +from dashboard.common import datastore_hooks +from dashboard.common import request_handler +from dashboard.models import anomaly_config +from dashboard.models import graph_data +from dashboard.models import sheriff as sheriff_module + + +_TASK_QUEUE_NAME = 'deprecate-tests-queue' +_TESTS_PER_QUERY = 100 + + +class CronUpdateSheriffHandler(request_handler.RequestHandler): + def get(self): + datastore_hooks.SetPrivilegedRequest() + _QueryTestsTask(start_cursor=None) + + def post(self): + datastore_hooks.SetPrivilegedRequest() + _QueryTestsTask(start_cursor=None) + + + +@ndb.synctasklet +def _QueryTestsTask(start_cursor=None, sheriffs=None, anomaly_configs=None): + if not sheriffs: + sheriffs = yield sheriff_module.Sheriff.query().fetch_async() + + if not anomaly_configs: + anomaly_configs = yield anomaly_config.AnomalyConfig.query().fetch_async() + + q = graph_data.TestMetadata.query() + q.filter(graph_data.TestMetadata.has_rows == True) + q.order(graph_data.TestMetadata.key) + keys, next_cursor, more = q.fetch_page( + _TESTS_PER_QUERY, start_cursor=start_cursor, keys_only=True) + + if more: + deferred.defer( + _QueryTestsTask, start_cursor=next_cursor, _queue=_TASK_QUEUE_NAME) + + yield [_DoTestUpdateSheriff(k, sheriffs, anomaly_configs) for k in keys] + + +@ndb.tasklet +def _DoTestUpdateSheriff(test_key, sheriffs, anomaly_configs): + test = yield test_key.get_async() + + changed = yield test.UpdateSheriffAsync( + sheriffs=sheriffs, anomaly_configs=anomaly_configs) + + if changed: + yield test.put_async() diff --git a/dashboard/dashboard/cron_update_sheriff_test.py b/dashboard/dashboard/cron_update_sheriff_test.py new file mode 100644 index 0000000000..bade0370a1 --- /dev/null +++ b/dashboard/dashboard/cron_update_sheriff_test.py @@ -0,0 +1,82 @@ +# Copyright 2019 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +from __future__ import print_function +from __future__ import division +from __future__ import absolute_import + +import webapp2 +import webtest + +from dashboard import cron_update_sheriff +from dashboard.common import testing_common +from dashboard.common import utils +from dashboard.models import anomaly_config +from dashboard.models import sheriff as sheriff_module + + +_TESTS = [ + ['ChromiumPerf'], + ['mac'], + { + 'SunSpider': { + 'Total': { + 't': {}, + 't_ref': {}, + }, + }, + 'OtherTest': { + 'OtherMetric': { + 'foo1': {}, + 'foo2': {}, + }, + }, + } +] + + +class CronSheriffUpdateTest(testing_common.TestCase): + + def setUp(self): + super(CronSheriffUpdateTest, self).setUp() + app = webapp2.WSGIApplication([( + '/cron/update_sheriff', cron_update_sheriff.CronUpdateSheriffHandler)]) + self.testapp = webtest.TestApp(app) + cron_update_sheriff._TESTS_PER_QUERY = 1 + + def testPost_UpdatesSheriff(self): + testing_common.AddTests(*_TESTS) + + sheriff_module.Sheriff( + id='s1', email='a@chromium.org', patterns=[ + '*/*/SunSpider/Total']).put() + + t = utils.TestKey('ChromiumPerf/mac/SunSpider/Total').get() + self.assertIsNone(t.sheriff) + self.assertIsNone(t.overridden_anomaly_config) + + self.testapp.post('/cron/update_sheriff') + self.ExecuteDeferredTasks(cron_update_sheriff._TASK_QUEUE_NAME) + + t = utils.TestKey('ChromiumPerf/mac/SunSpider/Total').get() + self.assertIsNotNone(t.sheriff) + self.assertIsNone(t.overridden_anomaly_config) + + def testPost_UpdatesAnomalyConfig(self): + testing_common.AddTests(*_TESTS) + + anomaly_config.AnomalyConfig( + id='anomaly_config1', config='', + patterns=['ChromiumPerf/mac/SunSpider/Total']).put() + + t = utils.TestKey('ChromiumPerf/mac/SunSpider/Total').get() + self.assertIsNone(t.sheriff) + self.assertIsNone(t.overridden_anomaly_config) + + self.testapp.post('/cron/update_sheriff') + self.ExecuteDeferredTasks(cron_update_sheriff._TASK_QUEUE_NAME) + + t = utils.TestKey('ChromiumPerf/mac/SunSpider/Total').get() + self.assertIsNone(t.sheriff) + self.assertIsNotNone(t.overridden_anomaly_config) diff --git a/dashboard/dashboard/dispatcher.py b/dashboard/dashboard/dispatcher.py index e0a632fb4b..782574c87e 100644 --- a/dashboard/dashboard/dispatcher.py +++ b/dashboard/dashboard/dispatcher.py @@ -19,6 +19,7 @@ from dashboard import bug_details from dashboard import buildbucket_job_status from dashboard import create_health_report +from dashboard import cron_update_sheriff from dashboard import debug_alert from dashboard import delete_test_data from dashboard import deprecate_tests @@ -104,6 +105,7 @@ buildbucket_job_status.BuildbucketJobStatusHandler), ('/create_health_report', create_health_report.CreateHealthReportHandler), ('/configs/update', sheriff_config_poller.ConfigsUpdateHandler), + ('/cron/update_sheriff', cron_update_sheriff.CronUpdateSheriffHandler), ('/debug_alert', debug_alert.DebugAlertHandler), ('/delete_expired_entities', layered_cache_delete_expired.LayeredCacheDeleteExpiredHandler), diff --git a/dashboard/dashboard/models/graph_data.py b/dashboard/dashboard/models/graph_data.py index 00c81ff57f..0f74ad9cf9 100644 --- a/dashboard/dashboard/models/graph_data.py +++ b/dashboard/dashboard/models/graph_data.py @@ -279,10 +279,11 @@ def __init__(self, *args, **kwargs): @ndb.synctasklet def UpdateSheriff(self): - yield self.UpdateSheriffAsync() + r = yield self.UpdateSheriffAsync() + raise ndb.Return(r) @ndb.tasklet - def UpdateSheriffAsync(self): + def UpdateSheriffAsync(self, sheriffs=None, anomaly_configs=None): """This method is called before a TestMetadata is put into the datastore. Here, we check the key to make sure it is valid and check the sheriffs and @@ -297,7 +298,11 @@ def UpdateSheriffAsync(self): # Set the sheriff to the first sheriff (alphabetically by sheriff name) # that has a test pattern that matches this test. - sheriffs = yield sheriff_module.Sheriff.query().fetch_async() + old_sheriff = self.sheriff + old_anomaly_config = self.overridden_anomaly_config + + if not sheriffs: + sheriffs = yield sheriff_module.Sheriff.query().fetch_async() self.sheriff = None for sheriff_entity in sheriffs: for pattern in sheriff_entity.patterns: @@ -311,7 +316,8 @@ def UpdateSheriffAsync(self): # that more specifically matches the test are given higher priority. # ie. */*/*/foo is chosen over */*/*/* self.overridden_anomaly_config = None - anomaly_configs = yield anomaly_config.AnomalyConfig.query().fetch_async() + if not anomaly_configs: + anomaly_configs = yield anomaly_config.AnomalyConfig.query().fetch_async() anomaly_data_list = [] for e in anomaly_configs: for p in e.patterns: @@ -321,6 +327,10 @@ def UpdateSheriffAsync(self): if anomaly_config_to_use: self.overridden_anomaly_config = anomaly_config_to_use.key + raise ndb.Return( + self.sheriff != old_sheriff or + self.overridden_anomaly_config != old_anomaly_config) + def CreateCallback(self): """Called when the entity is first saved.""" if len(self.key.id().split('/')) > 3: diff --git a/dashboard/dashboard/pinpoint/elements/jobs-page/new-job-fab.html b/dashboard/dashboard/pinpoint/elements/jobs-page/new-job-fab.html index 8a0d9eb568..d55ca7c6e5 100644 --- a/dashboard/dashboard/pinpoint/elements/jobs-page/new-job-fab.html +++ b/dashboard/dashboard/pinpoint/elements/jobs-page/new-job-fab.html @@ -42,7 +42,44 @@ } paper-dialog { - width: 30em; + width: 50em; + } + + .divider { + width: 100%; + border-bottom: 1px dashed black; + line-height: 0.1em; + margin: 2em 0em; + text-align: center; + font-size: 1.5em; + font-weight: lighter; + } + + .divider span { + background: white; + padding: 0 1em; + } + + .horizontal { + display: inline-flex; + width: 100%; + } + + .column { + width: 50%; + } + + .rightPad { + margin-right: 0.25em; + } + + #chartDropdown { + width: 75%; + margin-right: 0.5em; + } + + #statisticDropdown { + width: 25%; } .error { @@ -99,14 +136,18 @@

Run a try job

+
+ Benchmark Configuration +
+ - - - - - - Run a try job - - +
+
+ + +
+
+ + +
+
+ +
+
+
+ + +
+ +
+
+ + +
+
- - + - -