Devuan deployment of britney2
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 

1126 lines
50 KiB

  1. # -*- coding: utf-8 -*-
  2. # Copyright (C) 2013 - 2016 Canonical Ltd.
  3. # Authors:
  4. # Colin Watson <cjwatson@ubuntu.com>
  5. # Jean-Baptiste Lallement <jean-baptiste.lallement@canonical.com>
  6. # Martin Pitt <martin.pitt@ubuntu.com>
  7. # This program is free software; you can redistribute it and/or modify
  8. # it under the terms of the GNU General Public License as published by
  9. # the Free Software Foundation; either version 2 of the License, or
  10. # (at your option) any later version.
  11. # This program is distributed in the hope that it will be useful,
  12. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. # GNU General Public License for more details.
  15. import calendar
  16. import collections
  17. from copy import deepcopy
  18. from enum import Enum
  19. import os
  20. import json
  21. import tarfile
  22. import io
  23. import itertools
  24. import re
  25. import sys
  26. import time
  27. import urllib.parse
  28. from urllib.request import urlopen
  29. import apt_pkg
  30. import britney2.hints
  31. from britney2 import SuiteClass
  32. from britney2.policies.policy import BasePolicy, PolicyVerdict
  33. from britney2.utils import iter_except
  34. class Result(Enum):
  35. FAIL = 1
  36. PASS = 2
  37. NEUTRAL = 3
  38. NONE = 4
  39. EXCUSES_LABELS = {
  40. "PASS": '<span style="background:#87d96c">Pass</span>',
  41. "NEUTRAL": '<span style="background:#e5c545">No test results</span>',
  42. "FAIL": '<span style="background:#ff6666">Failed</span>',
  43. "ALWAYSFAIL": '<span style="background:#e5c545">Not a regression</span>',
  44. "REGRESSION": '<span style="background:#ff6666">Regression</span>',
  45. "IGNORE-FAIL": '<span style="background:#e5c545">Ignored failure</span>',
  46. "RUNNING": '<span style="background:#99ddff">Test in progress</span>',
  47. "RUNNING-REFERENCE": '<span style="background:#ff6666">Reference test in progress, but real test failed already</span>',
  48. "RUNNING-ALWAYSFAIL": '<span style="background:#99ddff">Test in progress (will not be considered a regression)</span>',
  49. }
  50. REF_TRIG = 'migration-reference/0'
  51. SECPERDAY = 24 * 60 * 60
  52. def srchash(src):
  53. '''archive hash prefix for source package'''
  54. if src.startswith('lib'):
  55. return src[:4]
  56. else:
  57. return src[0]
  58. def added_pkgs_compared_to_target_suite(package_ids, target_suite, *, invert=False):
  59. if invert:
  60. pkgs_ids_to_ignore = package_ids - set(target_suite.which_of_these_are_in_the_suite(package_ids))
  61. names_ignored = {p.package_name for p in pkgs_ids_to_ignore}
  62. else:
  63. names_ignored = {p.package_name for p in target_suite.which_of_these_are_in_the_suite(package_ids)}
  64. yield from (p for p in package_ids if p.package_name not in names_ignored)
  65. def all_leaf_results(test_results):
  66. for trigger in test_results.values():
  67. for arch in trigger.values():
  68. yield from arch.values()
  69. class AutopkgtestPolicy(BasePolicy):
  70. """autopkgtest regression policy for source migrations
  71. Run autopkgtests for the excuse and all of its reverse dependencies, and
  72. reject the upload if any of those regress.
  73. """
  74. def __init__(self, options, suite_info):
  75. super().__init__('autopkgtest', options, suite_info, {SuiteClass.PRIMARY_SOURCE_SUITE})
  76. # tests requested in this and previous runs
  77. # trigger -> src -> [arch]
  78. self.pending_tests = None
  79. self.pending_tests_file = os.path.join(self.state_dir, 'autopkgtest-pending.json')
  80. self.testsuite_triggers = {}
  81. self.result_in_baseline_cache = collections.defaultdict(dict)
  82. # results map: trigger -> src -> arch -> [passed, version, run_id, seen]
  83. # - trigger is "source/version" of an unstable package that triggered
  84. # this test run.
  85. # - "passed" is a bool
  86. # - "version" is the package version of "src" of that test
  87. # - "run_id" is an opaque ID that identifies a particular test run for
  88. # a given src/arch.
  89. # - "seen" is an approximate time stamp of the test run. How this is
  90. # deduced depends on the interface used.
  91. self.test_results = {}
  92. if self.options.adt_shared_results_cache:
  93. self.results_cache_file = self.options.adt_shared_results_cache
  94. else:
  95. self.results_cache_file = os.path.join(self.state_dir, 'autopkgtest-results.cache')
  96. try:
  97. self.options.adt_ppas = self.options.adt_ppas.strip().split()
  98. except AttributeError:
  99. self.options.adt_ppas = []
  100. self.swift_container = 'autopkgtest-' + options.series
  101. if self.options.adt_ppas:
  102. self.swift_container += '-' + options.adt_ppas[-1].replace('/', '-')
  103. # restrict adt_arches to architectures we actually run for
  104. self.adt_arches = []
  105. for arch in self.options.adt_arches.split():
  106. if arch in self.options.architectures:
  107. self.adt_arches.append(arch)
  108. else:
  109. self.logger.info("Ignoring ADT_ARCHES %s as it is not in architectures list", arch)
  110. def register_hints(self, hint_parser):
  111. hint_parser.register_hint_type('force-badtest', britney2.hints.split_into_one_hint_per_package)
  112. hint_parser.register_hint_type('force-skiptest', britney2.hints.split_into_one_hint_per_package)
  113. def initialise(self, britney):
  114. super().initialise(britney)
  115. # We want to use the "current" time stamp in multiple locations
  116. self._now = round(time.time())
  117. # compute inverse Testsuite-Triggers: map, unifying all series
  118. self.logger.info('Building inverse testsuite_triggers map')
  119. for suite in self.suite_info:
  120. for src, data in suite.sources.items():
  121. for trigger in data.testsuite_triggers:
  122. self.testsuite_triggers.setdefault(trigger, set()).add(src)
  123. target_suite_name = self.suite_info.target_suite.name
  124. os.makedirs(self.state_dir, exist_ok=True)
  125. self.read_pending_tests()
  126. if not hasattr(self.options, 'adt_baseline'):
  127. # Make adt_baseline optional
  128. setattr(self.options, 'adt_baseline', None)
  129. # read the cached results that we collected so far
  130. if os.path.exists(self.results_cache_file):
  131. with open(self.results_cache_file) as f:
  132. test_results = json.load(f)
  133. self.test_results = self.check_and_upgrade_cache(test_results)
  134. self.logger.info('Read previous results from %s', self.results_cache_file)
  135. # The cache can contain results against versions of packages that
  136. # are not in any suite anymore. Strip those out, as we don't want
  137. # to use those results.
  138. if self.options.adt_baseline == 'reference':
  139. self.filter_results_for_old_versions()
  140. else:
  141. self.logger.info('%s does not exist, re-downloading all results from swift', self.results_cache_file)
  142. # read in the new results
  143. if self.options.adt_swift_url.startswith('file://'):
  144. debci_file = self.options.adt_swift_url[7:]
  145. if os.path.exists(debci_file):
  146. with open(debci_file) as f:
  147. test_results = json.load(f)
  148. self.logger.info('Read new results from %s', debci_file)
  149. # With debci, pending tests are determined from the debci file
  150. self.pending_tests = {}
  151. for res in test_results['results']:
  152. # Blacklisted tests don't get a version
  153. if res['version'] is None:
  154. res['version'] = 'blacklisted'
  155. (test_suite, triggers, src, arch, ver, status, run_id, seen) = ([
  156. res['suite'],
  157. res['trigger'],
  158. res['package'],
  159. res['arch'],
  160. res['version'],
  161. res['status'],
  162. str(res['run_id']),
  163. round(calendar.timegm(time.strptime(res['updated_at'][0:-5], '%Y-%m-%dT%H:%M:%S')))])
  164. if test_suite != target_suite_name:
  165. # not requested for this target suite, so ignore
  166. continue
  167. if triggers is None:
  168. # not requested for this policy, so ignore
  169. continue
  170. for trigger in triggers.split():
  171. if status is None:
  172. # still running => pending
  173. arch_list = self.pending_tests.setdefault(trigger, {}).setdefault(src, [])
  174. if arch not in arch_list:
  175. self.logger.info('Pending autopkgtest %s on %s to verify %s', src, arch, trigger)
  176. arch_list.append(arch)
  177. arch_list.sort()
  178. elif status == 'tmpfail':
  179. # let's see if we still need it
  180. continue
  181. else:
  182. self.logger.debug('Results %s %s %s added', src, trigger, status)
  183. self.add_trigger_to_results(trigger, src, ver, arch, run_id, seen, Result[status.upper()])
  184. else:
  185. self.logger.info('%s does not exist, no new data will be processed', debci_file)
  186. # we need sources, binaries, and installability tester, so for now
  187. # remember the whole britney object
  188. self.britney = britney
  189. # Initialize AMQP connection
  190. self.amqp_channel = None
  191. self.amqp_file = None
  192. if self.options.dry_run:
  193. return
  194. amqp_url = self.options.adt_amqp
  195. if amqp_url.startswith('amqp://'):
  196. import amqplib.client_0_8 as amqp
  197. # depending on the setup we connect to a AMQP server
  198. creds = urllib.parse.urlsplit(amqp_url, allow_fragments=False)
  199. self.amqp_con = amqp.Connection(creds.hostname, userid=creds.username,
  200. password=creds.password)
  201. self.amqp_channel = self.amqp_con.channel()
  202. self.logger.info('Connected to AMQP server')
  203. elif amqp_url.startswith('file://'):
  204. # or in Debian and in testing mode, adt_amqp will be a file:// URL
  205. self.amqp_file = amqp_url[7:]
  206. else:
  207. raise RuntimeError('Unknown ADT_AMQP schema %s' % amqp_url.split(':', 1)[0])
  208. def check_and_upgrade_cache(self, test_results):
  209. for result in all_leaf_results(test_results):
  210. try:
  211. result[0] = Result[result[0]]
  212. except KeyError:
  213. # Legacy support
  214. if isinstance(result[0], type(True)):
  215. if result[0]:
  216. result[0] = Result.PASS
  217. else:
  218. result[0] = Result.FAIL
  219. else:
  220. raise
  221. # More legacy support
  222. try:
  223. dummy = result[3]
  224. except IndexError:
  225. result.append(self._now)
  226. return test_results
  227. def filter_results_for_old_versions(self):
  228. '''Remove results for old versions from the cache'''
  229. test_results = self.test_results
  230. test_results_new = deepcopy(test_results)
  231. for (trigger, trigger_data) in test_results.items():
  232. for (src, results) in trigger_data.items():
  233. for (arch, result) in results.items():
  234. if not self.test_version_in_any_suite(src, result[1]):
  235. del test_results_new[trigger][src][arch]
  236. if len(test_results_new[trigger][src]) == 0:
  237. del test_results_new[trigger][src]
  238. if len(test_results_new[trigger]) == 0:
  239. del test_results_new[trigger]
  240. self.test_results = test_results_new
  241. def test_version_in_any_suite(self, src, version):
  242. '''Check if the mentioned version of src is found in a suite
  243. To prevent regressions in the target suite, the result should be
  244. from a test with the version of the package in either the source
  245. suite or the target suite. The source suite is also valid,
  246. because due to versioned test dependencies and Breaks/Conflicts
  247. relations, regularly the version in the source suite is used
  248. during testing.
  249. '''
  250. versions = set()
  251. for suite in self.suite_info:
  252. try:
  253. srcinfo = suite.sources[src]
  254. except KeyError:
  255. continue
  256. versions.add(srcinfo.version)
  257. valid_version = False
  258. for ver in versions:
  259. if apt_pkg.version_compare(ver, version) == 0:
  260. valid_version = True
  261. break
  262. return valid_version
  263. def save_state(self, britney):
  264. super().save_state(britney)
  265. # update the results on-disk cache, unless we are using a r/o shared one
  266. if not self.options.adt_shared_results_cache:
  267. self.logger.info('Updating results cache')
  268. test_results = deepcopy(self.test_results)
  269. for result in all_leaf_results(test_results):
  270. result[0] = result[0].name
  271. with open(self.results_cache_file + '.new', 'w') as f:
  272. json.dump(test_results, f, indent=2)
  273. os.rename(self.results_cache_file + '.new', self.results_cache_file)
  274. # update the pending tests on-disk cache
  275. self.logger.info('Updating pending requested tests in %s', self.pending_tests_file)
  276. with open(self.pending_tests_file + '.new', 'w') as f:
  277. json.dump(self.pending_tests, f, indent=2)
  278. os.rename(self.pending_tests_file + '.new', self.pending_tests_file)
  279. def apply_src_policy_impl(self, tests_info, item, source_data_tdist, source_data_srcdist, excuse):
  280. # initialize
  281. verdict = PolicyVerdict.PASS
  282. elegible_for_bounty = False
  283. source_name = item.package
  284. results_info = []
  285. # skip/delay autopkgtests until new package is built somewhere
  286. if not source_data_srcdist.binaries:
  287. self.logger.info('%s hasn''t been built anywhere, skipping autopkgtest policy', excuse.name)
  288. verdict = PolicyVerdict.REJECTED_TEMPORARILY
  289. excuse.add_verdict_info(verdict, "nothing built yet, autopkgtest delayed")
  290. if 'all' in excuse.missing_builds:
  291. self.logger.info('%s hasn''t been built for arch:all, skipping autopkgtest policy', source_name)
  292. verdict = PolicyVerdict.REJECTED_TEMPORARILY
  293. excuse.add_verdict_info(verdict, "arch:all not built yet, autopkgtest delayed")
  294. if verdict == PolicyVerdict.PASS:
  295. self.logger.debug('Checking autopkgtests for %s', source_name)
  296. trigger = source_name + '/' + source_data_srcdist.version
  297. # build a (testsrc, testver) → arch → (status, log_url) map; we trigger/check test
  298. # results per architecture for technical/efficiency reasons, but we
  299. # want to evaluate and present the results by tested source package
  300. # first
  301. pkg_arch_result = collections.defaultdict(dict)
  302. for arch in self.adt_arches:
  303. if arch in excuse.missing_builds:
  304. verdict = PolicyVerdict.REJECTED_TEMPORARILY
  305. self.logger.info('%s hasn''t been built on arch %s, delay autopkgtest there', source_name, arch)
  306. excuse.add_verdict_info(verdict, "arch:%s not built yet, autopkgtest delayed there" % arch)
  307. elif arch in excuse.unsatisfiable_on_archs:
  308. verdict = PolicyVerdict.REJECTED_TEMPORARILY
  309. self.logger.info('%s is uninstallable on arch %s, delay autopkgtest there', source_name, arch)
  310. excuse.add_verdict_info(verdict, "uninstallable on arch %s, autopkgtest delayed there" % arch)
  311. else:
  312. self.request_tests_for_source(item, arch, source_data_srcdist, pkg_arch_result)
  313. # add test result details to Excuse
  314. cloud_url = self.options.adt_ci_url + "packages/%(h)s/%(s)s/%(r)s/%(a)s"
  315. for (testsrc, testver) in sorted(pkg_arch_result):
  316. arch_results = pkg_arch_result[(testsrc, testver)]
  317. r = {v[0] for v in arch_results.values()}
  318. if 'REGRESSION' in r:
  319. verdict = PolicyVerdict.REJECTED_PERMANENTLY
  320. elif ('RUNNING' in r or 'RUNNING-REFERENCE' in r) and verdict == PolicyVerdict.PASS:
  321. verdict = PolicyVerdict.REJECTED_TEMPORARILY
  322. # skip version if still running on all arches
  323. if not r - {'RUNNING', 'RUNNING-ALWAYSFAIL'}:
  324. testver = None
  325. # A source package is elegible for the bounty if it has tests
  326. # of its own that pass on all tested architectures.
  327. if testsrc == source_name and r == {'PASS'}:
  328. elegible_for_bounty = True
  329. if testver:
  330. testname = '%s/%s' % (testsrc, testver)
  331. else:
  332. testname = testsrc
  333. html_archmsg = []
  334. for arch in sorted(arch_results):
  335. (status, run_id, log_url) = arch_results[arch]
  336. artifact_url = None
  337. retry_url = None
  338. history_url = None
  339. if self.options.adt_ppas:
  340. if log_url.endswith('log.gz'):
  341. artifact_url = log_url.replace('log.gz', 'artifacts.tar.gz')
  342. else:
  343. history_url = cloud_url % {
  344. 'h': srchash(testsrc), 's': testsrc,
  345. 'r': self.options.series, 'a': arch}
  346. if status == 'REGRESSION':
  347. if self.options.adt_retry_url_mech == 'run_id':
  348. retry_url = self.options.adt_ci_url + 'api/v1/retry/' + run_id
  349. else:
  350. retry_url = self.options.adt_ci_url + 'request.cgi?' + \
  351. urllib.parse.urlencode([('release', self.options.series),
  352. ('arch', arch),
  353. ('package', testsrc),
  354. ('trigger', trigger)] +
  355. [('ppa', p) for p in self.options.adt_ppas])
  356. tests_info.setdefault(testname, {})[arch] = \
  357. [status, log_url, history_url, artifact_url, retry_url]
  358. # render HTML snippet for testsrc entry for current arch
  359. if history_url:
  360. message = '<a href="%s">%s</a>' % (history_url, arch)
  361. else:
  362. message = arch
  363. message += ': <a href="%s">%s</a>' % (log_url, EXCUSES_LABELS[status])
  364. if retry_url:
  365. message += ' <a href="%s" style="text-decoration: none;">♻ </a> ' % retry_url
  366. if artifact_url:
  367. message += ' <a href="%s">[artifacts]</a>' % artifact_url
  368. html_archmsg.append(message)
  369. # render HTML line for testsrc entry, but only when action is
  370. # or may be required
  371. if r - {'PASS', 'NEUTRAL', 'RUNNING-ALWAYSFAIL', 'ALWAYSFAIL'}:
  372. results_info.append("autopkgtest for %s: %s" % (testname, ', '.join(html_archmsg)))
  373. if verdict != PolicyVerdict.PASS:
  374. # check for force-skiptest hint
  375. hints = self.hints.search('force-skiptest', package=source_name, version=source_data_srcdist.version)
  376. if hints:
  377. excuse.addreason('skiptest')
  378. excuse.addinfo("Should wait for tests relating to %s %s, but forced by %s" %
  379. (source_name, source_data_srcdist.version, hints[0].user))
  380. verdict = PolicyVerdict.PASS_HINTED
  381. else:
  382. excuse.addreason('autopkgtest')
  383. if self.options.adt_success_bounty and verdict == PolicyVerdict.PASS and elegible_for_bounty:
  384. excuse.add_bounty('autopkgtest', int(self.options.adt_success_bounty))
  385. if self.options.adt_regression_penalty and \
  386. verdict in {PolicyVerdict.REJECTED_PERMANENTLY, PolicyVerdict.REJECTED_TEMPORARILY}:
  387. excuse.add_penalty('autopkgtest', int(self.options.adt_regression_penalty))
  388. # In case we give penalties instead of blocking, we must always pass
  389. verdict = PolicyVerdict.PASS
  390. for i in results_info:
  391. if verdict.is_rejected:
  392. excuse.add_verdict_info(verdict, i)
  393. else:
  394. excuse.addinfo(i)
  395. return verdict
  396. #
  397. # helper functions
  398. #
  399. @classmethod
  400. def has_autodep8(kls, srcinfo):
  401. '''Check if package is covered by autodep8
  402. srcinfo is an item from self.britney.sources
  403. binaries is self.britney.binaries['unstable'][arch]
  404. '''
  405. # autodep8?
  406. for t in srcinfo.testsuite:
  407. if t.startswith('autopkgtest-pkg'):
  408. return True
  409. return False
  410. def request_tests_for_source(self, item, arch, source_data_srcdist, pkg_arch_result):
  411. pkg_universe = self.britney.pkg_universe
  412. target_suite = self.suite_info.target_suite
  413. sources_s = item.suite.sources
  414. packages_s_a = item.suite.binaries[arch]
  415. source_name = item.package
  416. source_version = source_data_srcdist.version
  417. # request tests (unless they were already requested earlier or have a result)
  418. tests = self.tests_for_source(source_name, source_version, arch)
  419. is_huge = False
  420. try:
  421. is_huge = len(tests) > int(self.options.adt_huge)
  422. except AttributeError:
  423. pass
  424. # Here we figure out what is required from the source suite
  425. # for the test to install successfully.
  426. #
  427. # Loop over all binary packages from trigger and
  428. # recursively look up which *versioned* dependencies are
  429. # only satisfied in the source suite.
  430. #
  431. # For all binaries found, look up which packages they
  432. # break/conflict with in the target suite, but not in the
  433. # source suite. The main reason to do this is to cover test
  434. # dependencies, so we will check Testsuite-Triggers as
  435. # well.
  436. #
  437. # OI: do we need to do the first check in a smart way
  438. # (i.e. only for the packages that are actully going to be
  439. # installed) for the breaks/conflicts set as well, i.e. do
  440. # we need to check if any of the packages that we now
  441. # enforce being from the source suite, actually have new
  442. # versioned depends and new breaks/conflicts.
  443. #
  444. # For all binaries found, add the set of unique source
  445. # packages to the list of triggers.
  446. bin_triggers = set()
  447. bin_new = set(source_data_srcdist.binaries)
  448. for binary in iter_except(bin_new.pop, KeyError):
  449. if binary in bin_triggers:
  450. continue
  451. bin_triggers.add(binary)
  452. # Check if there is a dependency that is not
  453. # available in the target suite.
  454. # We add slightly too much here, because new binaries
  455. # will also show up, but they are already properly
  456. # installed. Nevermind.
  457. depends = pkg_universe.dependencies_of(binary)
  458. # depends is a frozenset{frozenset{BinaryPackageId, ..}}
  459. for deps_of_bin in depends:
  460. # We'll figure out which version later
  461. bin_new.update(added_pkgs_compared_to_target_suite(deps_of_bin, target_suite))
  462. # Check if the package breaks/conflicts anything. We might
  463. # be adding slightly too many source packages due to the
  464. # check here as a binary package that is broken may be
  465. # coming from a different source package in the source
  466. # suite. Nevermind.
  467. bin_broken = set()
  468. for binary in bin_triggers:
  469. # broken is a frozenset{BinaryPackageId, ..}
  470. broken = pkg_universe.negative_dependencies_of(binary)
  471. # We'll figure out which version later
  472. bin_broken.update(added_pkgs_compared_to_target_suite(broken, target_suite, invert=True))
  473. bin_triggers.update(bin_broken)
  474. triggers = set()
  475. for binary in bin_triggers:
  476. if binary.architecture == arch:
  477. try:
  478. source_of_bin = packages_s_a[binary.package_name].source
  479. triggers.add(
  480. source_of_bin + '/' +
  481. sources_s[source_of_bin].version)
  482. except KeyError:
  483. # Apparently the package was removed from
  484. # unstable e.g. if packages are replaced
  485. # (e.g. -dbg to -dbgsym)
  486. pass
  487. if binary not in source_data_srcdist.binaries:
  488. for tdep_src in self.testsuite_triggers.get(binary.package_name, set()):
  489. try:
  490. triggers.add(
  491. tdep_src + '/' +
  492. sources_s[tdep_src].version)
  493. except KeyError:
  494. # Apparently the source was removed from
  495. # unstable (testsuite_triggers are unified
  496. # over all suites)
  497. pass
  498. trigger = source_name + '/' + source_version
  499. triggers.discard(trigger)
  500. trigger_str = trigger
  501. if triggers:
  502. # Make the order (minus the "real" trigger) deterministic
  503. trigger_str += ' ' + ' '.join(sorted(list(triggers)))
  504. for (testsrc, testver) in tests:
  505. self.pkg_test_request(testsrc, arch, trigger_str, huge=is_huge)
  506. (result, real_ver, run_id, url) = self.pkg_test_result(testsrc, testver, arch, trigger)
  507. pkg_arch_result[(testsrc, real_ver)][arch] = (result, run_id, url)
  508. def tests_for_source(self, src, ver, arch):
  509. '''Iterate over all tests that should be run for given source and arch'''
  510. source_suite = self.suite_info.primary_source_suite
  511. target_suite = self.suite_info.target_suite
  512. sources_info = target_suite.sources
  513. binaries_info = target_suite.binaries[arch]
  514. reported_pkgs = set()
  515. tests = []
  516. # gcc-N triggers tons of tests via libgcc1, but this is mostly in vain:
  517. # gcc already tests itself during build, and it is being used from
  518. # -proposed, so holding it back on a dozen unrelated test failures
  519. # serves no purpose. Just check some key packages which actually use
  520. # gcc during the test, and doxygen as an example for a libgcc user.
  521. if src.startswith('gcc-'):
  522. if re.match(r'gcc-\d$', src) or src == 'gcc-defaults':
  523. # add gcc's own tests, if it has any
  524. srcinfo = source_suite.sources[src]
  525. if 'autopkgtest' in srcinfo.testsuite:
  526. tests.append((src, ver))
  527. for test in ['binutils', 'fglrx-installer', 'doxygen', 'linux']:
  528. try:
  529. tests.append((test, sources_info[test].version))
  530. except KeyError:
  531. # no package in that series? *shrug*, then not (mostly for testing)
  532. pass
  533. return tests
  534. else:
  535. # for other compilers such as gcc-snapshot etc. we don't need
  536. # to trigger anything
  537. return []
  538. # Debian doesn't have linux-meta, but Ubuntu does
  539. # for linux themselves we don't want to trigger tests -- these should
  540. # all come from linux-meta*. A new kernel ABI without a corresponding
  541. # -meta won't be installed and thus we can't sensibly run tests against
  542. # it.
  543. if src.startswith('linux') and src.replace('linux', 'linux-meta') in sources_info:
  544. return []
  545. # we want to test the package itself, if it still has a test in unstable
  546. srcinfo = source_suite.sources[src]
  547. if 'autopkgtest' in srcinfo.testsuite or self.has_autodep8(srcinfo):
  548. reported_pkgs.add(src)
  549. tests.append((src, ver))
  550. extra_bins = []
  551. # Debian doesn't have linux-meta, but Ubuntu does
  552. # Hack: For new kernels trigger all DKMS packages by pretending that
  553. # linux-meta* builds a "dkms" binary as well. With that we ensure that we
  554. # don't regress DKMS drivers with new kernel versions.
  555. if src.startswith('linux-meta'):
  556. # does this have any image on this arch?
  557. for pkg_id in srcinfo.binaries:
  558. if pkg_id.architecture == arch and '-image' in pkg_id.package_name:
  559. try:
  560. extra_bins.append(binaries_info['dkms'].pkg_id)
  561. except KeyError:
  562. pass
  563. pkg_universe = self.britney.pkg_universe
  564. # plus all direct reverse dependencies and test triggers of its
  565. # binaries which have an autopkgtest
  566. for binary in itertools.chain(srcinfo.binaries, extra_bins):
  567. rdeps = pkg_universe.reverse_dependencies_of(binary)
  568. for rdep in rdeps:
  569. try:
  570. rdep_src = binaries_info[rdep.package_name].source
  571. # Don't re-trigger the package itself here; this should
  572. # have been done above if the package still continues to
  573. # have an autopkgtest in unstable.
  574. if rdep_src == src:
  575. continue
  576. except KeyError:
  577. continue
  578. rdep_src_info = sources_info[rdep_src]
  579. if 'autopkgtest' in rdep_src_info.testsuite or self.has_autodep8(rdep_src_info):
  580. if rdep_src not in reported_pkgs:
  581. tests.append((rdep_src, rdep_src_info.version))
  582. reported_pkgs.add(rdep_src)
  583. for tdep_src in self.testsuite_triggers.get(binary.package_name, set()):
  584. if tdep_src not in reported_pkgs:
  585. try:
  586. tdep_src_info = sources_info[tdep_src]
  587. except KeyError:
  588. continue
  589. if 'autopkgtest' in tdep_src_info.testsuite or self.has_autodep8(tdep_src_info):
  590. for pkg_id in tdep_src_info.binaries:
  591. if pkg_id.architecture == arch:
  592. tests.append((tdep_src, tdep_src_info.version))
  593. reported_pkgs.add(tdep_src)
  594. break
  595. tests.sort(key=lambda s_v: s_v[0])
  596. return tests
  597. def read_pending_tests(self):
  598. '''Read pending test requests from previous britney runs
  599. Initialize self.pending_tests with that data.
  600. '''
  601. assert self.pending_tests is None, 'already initialized'
  602. if not os.path.exists(self.pending_tests_file):
  603. self.logger.info('No %s, starting with no pending tests', self.pending_tests_file)
  604. self.pending_tests = {}
  605. return
  606. with open(self.pending_tests_file) as f:
  607. self.pending_tests = json.load(f)
  608. self.logger.info('Read pending requested tests from %s: %s', self.pending_tests_file, self.pending_tests)
  609. def latest_run_for_package(self, src, arch):
  610. '''Return latest run ID for src on arch'''
  611. # this requires iterating over all triggers and thus is expensive;
  612. # cache the results
  613. try:
  614. return self.latest_run_for_package._cache[src][arch]
  615. except KeyError:
  616. pass
  617. latest_run_id = ''
  618. for srcmap in self.test_results.values():
  619. try:
  620. run_id = srcmap[src][arch][2]
  621. except KeyError:
  622. continue
  623. if run_id > latest_run_id:
  624. latest_run_id = run_id
  625. self.latest_run_for_package._cache[arch] = latest_run_id
  626. return latest_run_id
  627. latest_run_for_package._cache = collections.defaultdict(dict)
  628. def fetch_swift_results(self, swift_url, src, arch):
  629. '''Download new results for source package/arch from swift'''
  630. # Download results for one particular src/arch at most once in every
  631. # run, as this is expensive
  632. done_entry = src + '/' + arch
  633. if done_entry in self.fetch_swift_results._done:
  634. return
  635. self.fetch_swift_results._done.add(done_entry)
  636. # prepare query: get all runs with a timestamp later than the latest
  637. # run_id for this package/arch; '@' is at the end of each run id, to
  638. # mark the end of a test run directory path
  639. # example: <autopkgtest-wily>wily/amd64/libp/libpng/20150630_054517@/result.tar
  640. query = {'delimiter': '@',
  641. 'prefix': '%s/%s/%s/%s/' % (self.options.series, arch, srchash(src), src)}
  642. # determine latest run_id from results
  643. if not self.options.adt_shared_results_cache:
  644. latest_run_id = self.latest_run_for_package(src, arch)
  645. if latest_run_id:
  646. query['marker'] = query['prefix'] + latest_run_id
  647. # request new results from swift
  648. url = os.path.join(swift_url, self.swift_container)
  649. url += '?' + urllib.parse.urlencode(query)
  650. f = None
  651. try:
  652. f = urlopen(url, timeout=30)
  653. if f.getcode() == 200:
  654. result_paths = f.read().decode().strip().splitlines()
  655. elif f.getcode() == 204: # No content
  656. result_paths = []
  657. else:
  658. # we should not ever end up here as we expect a HTTPError in
  659. # other cases; e. g. 3XX is something that tells us to adjust
  660. # our URLS, so fail hard on those
  661. raise NotImplementedError('fetch_swift_results(%s): cannot handle HTTP code %i' %
  662. (url, f.getcode()))
  663. except IOError as e:
  664. # 401 "Unauthorized" is swift's way of saying "container does not exist"
  665. if hasattr(e, 'code') and e.code == 401:
  666. self.logger.info('fetch_swift_results: %s does not exist yet or is inaccessible', url)
  667. return
  668. # Other status codes are usually a transient
  669. # network/infrastructure failure. Ignoring this can lead to
  670. # re-requesting tests which we already have results for, so
  671. # fail hard on this and let the next run retry.
  672. self.logger.error('Failure to fetch swift results from %s: %s', url, str(e))
  673. sys.exit(1)
  674. finally:
  675. if f is not None:
  676. f.close()
  677. for p in result_paths:
  678. self.fetch_one_result(
  679. os.path.join(swift_url, self.swift_container, p, 'result.tar'), src, arch)
  680. fetch_swift_results._done = set()
  681. def fetch_one_result(self, url, src, arch):
  682. '''Download one result URL for source/arch
  683. Remove matching pending_tests entries.
  684. '''
  685. f = None
  686. try:
  687. f = urlopen(url, timeout=30)
  688. if f.getcode() == 200:
  689. tar_bytes = io.BytesIO(f.read())
  690. else:
  691. raise NotImplementedError('fetch_one_result(%s): cannot handle HTTP code %i' %
  692. (url, f.getcode()))
  693. except IOError as e:
  694. self.logger.error('Failure to fetch %s: %s', url, str(e))
  695. # we tolerate "not found" (something went wrong on uploading the
  696. # result), but other things indicate infrastructure problems
  697. if hasattr(e, 'code') and e.code == 404:
  698. return
  699. sys.exit(1)
  700. finally:
  701. if f is not None:
  702. f.close()
  703. try:
  704. with tarfile.open(None, 'r', tar_bytes) as tar:
  705. exitcode = int(tar.extractfile('exitcode').read().strip())
  706. srcver = tar.extractfile('testpkg-version').read().decode().strip()
  707. (ressrc, ver) = srcver.split()
  708. testinfo = json.loads(tar.extractfile('testinfo.json').read().decode())
  709. except (KeyError, ValueError, tarfile.TarError) as e:
  710. self.logger.error('%s is damaged, ignoring: %s', url, str(e))
  711. # ignore this; this will leave an orphaned request in autopkgtest-pending.json
  712. # and thus require manual retries after fixing the tmpfail, but we
  713. # can't just blindly attribute it to some pending test.
  714. return
  715. if src != ressrc:
  716. self.logger.error('%s is a result for package %s, but expected package %s', url, ressrc, src)
  717. return
  718. # parse recorded triggers in test result
  719. for e in testinfo.get('custom_environment', []):
  720. if e.startswith('ADT_TEST_TRIGGERS='):
  721. result_triggers = [i for i in e.split('=', 1)[1].split() if '/' in i]
  722. break
  723. else:
  724. self.logger.error('%s result has no ADT_TEST_TRIGGERS, ignoring')
  725. return
  726. run_id = os.path.basename(os.path.dirname(url))
  727. seen = round(calendar.timegm(time.strptime(run_id, '%Y%m%d_%H%M%S@')))
  728. # allow some skipped tests, but nothing else
  729. if exitcode in [0, 2]:
  730. result = Result.PASS
  731. elif exitcode == 8:
  732. result = Result.NEUTRAL
  733. else:
  734. result = Result.FAIL
  735. self.logger.info(
  736. 'Fetched test result for %s/%s/%s %s (triggers: %s): %s',
  737. src, ver, arch, run_id, result_triggers, result.name.lower())
  738. # remove matching test requests
  739. for trigger in result_triggers:
  740. self.remove_from_pending(trigger, src, arch)
  741. # add this result
  742. for trigger in result_triggers:
  743. self.add_trigger_to_results(trigger, src, ver, arch, run_id, seen, result)
  744. def remove_from_pending(self, trigger, src, arch):
  745. try:
  746. arch_list = self.pending_tests[trigger][src]
  747. arch_list.remove(arch)
  748. if not arch_list:
  749. del self.pending_tests[trigger][src]
  750. if not self.pending_tests[trigger]:
  751. del self.pending_tests[trigger]
  752. self.logger.info('-> matches pending request %s/%s for trigger %s', src, arch, trigger)
  753. except (KeyError, ValueError):
  754. self.logger.info('-> does not match any pending request for %s/%s', src, arch)
  755. def add_trigger_to_results(self, trigger, src, ver, arch, run_id, seen, status):
  756. # Ensure that we got a new enough version
  757. try:
  758. (trigsrc, trigver) = trigger.split('/', 1)
  759. except ValueError:
  760. self.logger.info('Ignoring invalid test trigger %s', trigger)
  761. return
  762. if trigsrc == src and apt_pkg.version_compare(ver, trigver) < 0:
  763. self.logger.debug('test trigger %s, but run for older version %s, ignoring', trigger, ver)
  764. return
  765. if self.options.adt_baseline == 'reference' and \
  766. not self.test_version_in_any_suite(src, ver):
  767. self.logger.debug(
  768. "Ignoring result for source %s and trigger %s as the tested version %s isn't found in any suite",
  769. src, trigger, ver)
  770. return
  771. result = self.test_results.setdefault(trigger, {}).setdefault(
  772. src, {}).setdefault(arch, [Result.FAIL, None, '', 0])
  773. # don't clobber existing passed results with non-passing ones from
  774. # re-runs, except for reference updates
  775. if status == Result.PASS or result[0] != Result.PASS or \
  776. (self.options.adt_baseline == 'reference' and trigger == REF_TRIG):
  777. result[0] = status
  778. result[1] = ver
  779. result[2] = run_id
  780. result[3] = seen
  781. def send_test_request(self, src, arch, trigger, huge=False):
  782. '''Send out AMQP request for testing src/arch for trigger
  783. If huge is true, then the request will be put into the -huge instead of
  784. normal queue.
  785. '''
  786. if self.options.dry_run:
  787. return
  788. params = {'triggers': [trigger]}
  789. if self.options.adt_ppas:
  790. params['ppas'] = self.options.adt_ppas
  791. qname = 'debci-ppa-%s-%s' % (self.options.series, arch)
  792. elif huge:
  793. qname = 'debci-huge-%s-%s' % (self.options.series, arch)
  794. else:
  795. qname = 'debci-%s-%s' % (self.options.series, arch)
  796. params = json.dumps(params)
  797. if self.amqp_channel:
  798. self.amqp_channel.basic_publish(amqp.Message(src + '\n' + params), routing_key=qname)
  799. else:
  800. assert self.amqp_file
  801. with open(self.amqp_file, 'a') as f:
  802. f.write('%s:%s %s\n' % (qname, src, params))
  803. def pkg_test_request(self, src, arch, full_trigger, huge=False):
  804. '''Request one package test for one particular trigger
  805. trigger is "pkgname/version" of the package that triggers the testing
  806. of src. If huge is true, then the request will be put into the -huge
  807. instead of normal queue.
  808. This will only be done if that test wasn't already requested in
  809. a previous run (i. e. if it's not already in self.pending_tests)
  810. or if there is already a fresh or a positive result for it. This
  811. ensures to download current results for this package before
  812. requesting any test.
  813. '''
  814. trigger = full_trigger.split()[0]
  815. uses_swift = not self.options.adt_swift_url.startswith('file://')
  816. try:
  817. result = self.test_results[trigger][src][arch]
  818. has_result = True
  819. except KeyError:
  820. has_result = False
  821. if has_result:
  822. result_state = result[0]
  823. version = result[1]
  824. baseline = self.result_in_baseline(src, arch)
  825. if result_state == Result.FAIL and \
  826. baseline[0] in {Result.PASS, Result.NEUTRAL} and \
  827. self.options.adt_retry_older_than and \
  828. result[3] + int(self.options.adt_retry_older_than) * SECPERDAY < self._now:
  829. # We might want to retry this failure, so continue
  830. pass
  831. elif not uses_swift:
  832. # We're done if we don't retrigger and we're not using swift
  833. return
  834. elif result_state in {Result.PASS, Result.NEUTRAL}:
  835. self.logger.debug('%s/%s triggered by %s already known', src, arch, trigger)
  836. return
  837. # Without swift we don't expect new results
  838. if uses_swift:
  839. self.logger.info('Checking for new results for failed %s/%s for trigger %s', src, arch, trigger)
  840. self.fetch_swift_results(self.options.adt_swift_url, src, arch)
  841. # do we have one now?
  842. try:
  843. self.test_results[trigger][src][arch]
  844. return
  845. except KeyError:
  846. pass
  847. self.request_test_if_not_queued(src, arch, trigger, full_trigger, huge=huge)
  848. def request_test_if_not_queued(self, src, arch, trigger, full_trigger=None, huge=False):
  849. if full_trigger is None:
  850. full_trigger = trigger
  851. # Don't re-request if it's already pending
  852. arch_list = self.pending_tests.setdefault(trigger, {}).setdefault(src, [])
  853. if arch in arch_list:
  854. self.logger.info('Test %s/%s for %s is already pending, not queueing', src, arch, trigger)
  855. else:
  856. self.logger.info('Requesting %s autopkgtest on %s to verify %s', src, arch, trigger)
  857. arch_list.append(arch)
  858. arch_list.sort()
  859. self.send_test_request(src, arch, full_trigger, huge=huge)
  860. def result_in_baseline(self, src, arch):
  861. '''Get the result for src on arch in the baseline
  862. The baseline is optionally all data or a reference set)
  863. '''
  864. # this requires iterating over all cached results and thus is expensive;
  865. # cache the results
  866. try:
  867. return self.result_in_baseline_cache[src][arch]
  868. except KeyError:
  869. pass
  870. result_reference = [Result.NONE, None, '', 0]
  871. if self.options.adt_baseline == 'reference':
  872. try:
  873. result_reference = self.test_results[REF_TRIG][src][arch]
  874. self.logger.debug('Found result for src %s in reference: %s',
  875. src, result_reference[0].name)
  876. except KeyError:
  877. self.logger.debug('Found NO result for src %s in reference: %s',
  878. src, result_reference[0].name)
  879. pass
  880. self.result_in_baseline_cache[src][arch] = deepcopy(result_reference)
  881. return result_reference
  882. result_ever = [Result.FAIL, None, '', 0]
  883. for srcmap in self.test_results.values():
  884. try:
  885. if srcmap[src][arch][0] != Result.FAIL:
  886. result_ever = srcmap[src][arch]
  887. # If we are not looking at a reference run, We don't really
  888. # care about anything except the status, so we're done
  889. # once we find a PASS.
  890. if result_ever[0] == Result.PASS:
  891. break
  892. except KeyError:
  893. pass
  894. self.result_in_baseline_cache[src][arch] = deepcopy(result_ever)
  895. self.logger.debug('Result for src %s ever: %s', src, result_ever[0].name)
  896. return result_ever
  897. def pkg_test_result(self, src, ver, arch, trigger):
  898. '''Get current test status of a particular package
  899. Return (status, real_version, run_id, log_url) tuple; status is a key in
  900. EXCUSES_LABELS. run_id is None if the test is still running.
  901. '''
  902. # determine current test result status
  903. baseline_result = self.result_in_baseline(src, arch)[0]
  904. url = None
  905. run_id = None
  906. try:
  907. r = self.test_results[trigger][src][arch]
  908. ver = r[1]
  909. run_id = r[2]
  910. if r[0] == Result.FAIL:
  911. # Special-case triggers from linux-meta*: we cannot compare
  912. # results against different kernels, as e. g. a DKMS module
  913. # might work against the default kernel but fail against a
  914. # different flavor; so for those, ignore the "ever
  915. # passed" check; FIXME: check against trigsrc only
  916. if self.options.adt_baseline != 'reference' and \
  917. (trigger.startswith('linux-meta') or trigger.startswith('linux/')):
  918. baseline_result = Result.FAIL
  919. if baseline_result == Result.FAIL:
  920. result = 'ALWAYSFAIL'
  921. elif self.has_force_badtest(src, ver, arch):
  922. result = 'IGNORE-FAIL'
  923. elif baseline_result == Result.NONE:
  924. # Check if the autopkgtest exists in the target suite and request it
  925. test_in_target = False
  926. try:
  927. srcinfo = self.suite_info.target_suite.sources[src]
  928. if 'autopkgtest' in srcinfo.testsuite or self.has_autodep8(srcinfo):
  929. test_in_target = True
  930. except KeyError:
  931. pass
  932. if test_in_target:
  933. self.request_test_if_not_queued(src, arch, REF_TRIG)
  934. result = 'RUNNING-REFERENCE'
  935. else:
  936. result = 'REGRESSION'
  937. else:
  938. result = 'REGRESSION'
  939. else:
  940. result = r[0].name
  941. if self.options.adt_swift_url.startswith('file://'):
  942. url = os.path.join(self.options.adt_ci_url,
  943. 'data',
  944. 'autopkgtest',
  945. self.options.series,
  946. arch,
  947. srchash(src),
  948. src,
  949. run_id,
  950. 'log.gz')
  951. else:
  952. url = os.path.join(self.options.adt_swift_url,
  953. self.swift_container,
  954. self.options.series,
  955. arch,
  956. srchash(src),
  957. src,
  958. run_id,
  959. 'log.gz')
  960. except KeyError:
  961. # no result for src/arch; still running?
  962. if arch in self.pending_tests.get(trigger, {}).get(src, []):
  963. if baseline_result != Result.FAIL and not self.has_force_badtest(src, ver, arch):
  964. result = 'RUNNING'
  965. else:
  966. result = 'RUNNING-ALWAYSFAIL'
  967. url = self.options.adt_ci_url + 'status/pending'
  968. else:
  969. raise RuntimeError('Result for %s/%s/%s (triggered by %s) is neither known nor pending!' %
  970. (src, ver, arch, trigger))
  971. return (result, ver, run_id, url)
  972. def has_force_badtest(self, src, ver, arch):
  973. '''Check if src/ver/arch has a force-badtest hint'''
  974. hints = self.hints.search('force-badtest', package=src)
  975. if hints:
  976. self.logger.info('Checking hints for %s/%s/%s: %s', src, ver, arch, [str(h) for h in hints])
  977. for hint in hints:
  978. if [mi for mi in hint.packages if mi.architecture in ['source', arch] and
  979. (mi.version == 'all' or apt_pkg.version_compare(ver, mi.version) <= 0)]:
  980. return True
  981. return False