Devuan deployment of britney2
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 

468 lines
21 KiB

  1. import apt_pkg
  2. import contextlib
  3. import copy
  4. from britney2.transaction import MigrationTransactionState
  5. from britney2.utils import (
  6. MigrationConstraintException, compute_reverse_tree, check_installability, clone_nuninst,
  7. find_smooth_updateable_binaries,
  8. )
  9. def compute_eqv_set(pkg_universe, updates, rms):
  10. eqv_set = set()
  11. # If we are removing *and* updating packages, then check for eqv. packages
  12. if rms and updates:
  13. eqv_table = {(x.package_name, x.architecture): x for x in rms}
  14. for new_pkg_id in updates:
  15. binary, _, parch = new_pkg_id
  16. key = (binary, parch)
  17. old_pkg_id = eqv_table.get(key)
  18. if old_pkg_id is not None:
  19. if pkg_universe.are_equivalent(new_pkg_id, old_pkg_id):
  20. eqv_set.add(key)
  21. return eqv_set
  22. def is_nuninst_worse(must_be_installable, nuninst_now_arch, nuninst_after_arch, allow_uninst):
  23. if len(nuninst_after_arch - allow_uninst) > \
  24. len(nuninst_now_arch - allow_uninst):
  25. return True
  26. regression = nuninst_after_arch - nuninst_now_arch
  27. if not regression.isdisjoint(must_be_installable):
  28. return True
  29. return False
  30. class MigrationManager(object):
  31. def __init__(self, options, suite_info, all_binaries, pkg_universe, constraints, allow_uninst, migration_item_factory):
  32. self.options = options
  33. self.suite_info = suite_info
  34. self.all_binaries = all_binaries
  35. self.pkg_universe = pkg_universe
  36. self.constraints = constraints
  37. self.allow_uninst = allow_uninst
  38. self._transactions = []
  39. self._all_architectures = frozenset(self.options.architectures)
  40. self._migration_item_factory = migration_item_factory
  41. @property
  42. def current_transaction(self):
  43. return self._transactions[-1] if self._transactions else None
  44. def compute_groups(self,
  45. item,
  46. allow_smooth_updates=True,
  47. removals=frozenset()):
  48. """Compute the groups of binaries being migrated by item
  49. This method will compute the binaries that will be added to,
  50. replaced in or removed from the target suite and which of
  51. the removals are smooth updatable.
  52. Parameters:
  53. * "item" is a MigrationItem
  54. * "allow_smooth_updates" is a boolean determining whether smooth-
  55. updates are permitted in this migration. When set to False,
  56. the "smoothbins" return value will always be the empty set.
  57. Any value that would have been there will now be in "rms"
  58. instead. (defaults: True)
  59. * "removals" is a set of binaries that is assumed to be
  60. removed at the same time as this migration (e.g. in the same
  61. "easy"-hint). This may affect what if some binaries are
  62. smooth updated or not. (defaults: empty-set)
  63. - Binaries must be given as ("package-name", "version",
  64. "architecture") tuples.
  65. Returns a tuple (adds, rms, smoothbins). "adds" is a set of
  66. binaries that will updated in or appear after the migration.
  67. "rms" is a set of binaries that are not smooth-updatable (or
  68. binaries that could be, but there is no reason to let them be
  69. smooth updated). "smoothbins" is set of binaries that are to
  70. be smooth-updated.
  71. Each "binary" in "adds", "rms" and "smoothbins" will be a
  72. tuple of ("package-name", "version", "architecture") and are
  73. thus tuples suitable for passing on to the
  74. InstallabilityTester.
  75. Unlike migrate_items_to_target_suite, this will not modify
  76. any data structure.
  77. """
  78. # local copies for better performances
  79. item_package = item.package
  80. target_suite = self.suite_info.target_suite
  81. binaries_t = target_suite.binaries
  82. adds = set()
  83. # remove all binary packages (if the source already exists)
  84. if item.architecture == 'source' or not item.is_removal:
  85. source_name = item_package
  86. if source_name in target_suite.sources:
  87. rms, smoothbins = self._compute_removals(item, allow_smooth_updates, removals)
  88. else:
  89. rms = set()
  90. smoothbins = set()
  91. # single binary removal; used for clearing up after smooth
  92. # updates but not supported as a manual hint
  93. else:
  94. assert item_package in binaries_t[item.architecture]
  95. pkg_id = binaries_t[item.architecture][item_package].pkg_id
  96. binary, ver, parch = pkg_id
  97. if ver != item.version:
  98. raise MigrationConstraintException(
  99. "trying cruft removal item %s, while %s has %s/%s on %s" % (
  100. item, target_suite.name,
  101. binary, ver, parch))
  102. source_name = binaries_t[item.architecture][item_package].source
  103. rms = {pkg_id}
  104. smoothbins = set()
  105. # add the new binary packages (if we are not removing)
  106. if not item.is_removal:
  107. source_suite = item.suite
  108. binaries_s = source_suite.binaries
  109. source_data = source_suite.sources[source_name]
  110. source_ver_new = source_data.version
  111. sources_t = target_suite.sources
  112. if source_name in sources_t:
  113. source_data_old = sources_t[source_name]
  114. source_ver_old = source_data_old.version
  115. if apt_pkg.version_compare(source_ver_old, source_ver_new) > 0:
  116. raise MigrationConstraintException("trying src:%s %s, while %s has %s" % (
  117. source_name, source_ver_new, target_suite.name, source_ver_old))
  118. for pkg_id in source_data.binaries:
  119. binary, ver, parch = pkg_id
  120. if item.architecture not in ['source', parch]:
  121. continue
  122. if binaries_s[parch][binary].source != source_name:
  123. # This binary package has been hijacked by some other source.
  124. # So don't add it as part of this update.
  125. #
  126. # Also, if this isn't a source update, don't remove
  127. # the package that's been hijacked if it's present.
  128. if item.architecture != 'source':
  129. for rm_b, rm_v, rm_p in list(rms):
  130. if (rm_b, rm_p) == (binary, parch):
  131. rms.remove((rm_b, rm_v, rm_p))
  132. continue
  133. # Don't add the binary if it is cruft; smooth updates will keep it if possible
  134. if (parch not in self.options.outofsync_arches and
  135. source_data.version != binaries_s[parch][binary].source_version):
  136. continue
  137. if binary in binaries_t[parch]:
  138. oldver = binaries_t[parch][binary].version
  139. if apt_pkg.version_compare(oldver, ver) > 0:
  140. raise MigrationConstraintException("trying %s %s from src:%s %s, while %s has %s" % (
  141. binary, ver, source_name, source_ver_new, target_suite.name, oldver))
  142. adds.add(pkg_id)
  143. return (source_name, adds, rms, smoothbins)
  144. def _compute_removals(self, item, allow_smooth_updates, removals):
  145. pkg_universe = self.pkg_universe
  146. source_suite = item.suite
  147. target_suite = self.suite_info.target_suite
  148. binaries_s = source_suite.binaries
  149. binaries_t = target_suite.binaries
  150. source_name = item.package
  151. source_data = target_suite.sources[source_name]
  152. bins = []
  153. # remove all the binaries
  154. # first, build a list of eligible binaries
  155. for pkg_id in source_data.binaries:
  156. binary, _, parch = pkg_id
  157. if item.architecture != 'source' and parch != item.architecture:
  158. continue
  159. # Work around #815995
  160. if item.architecture == 'source' and item.is_removal and binary not in binaries_t[parch]:
  161. continue
  162. bin_data = binaries_t[parch][binary]
  163. # Do not include hijacked binaries nor cruft (cruft is handled separately)
  164. if bin_data.source != source_name or bin_data.source_version != source_data.version:
  165. continue
  166. bins.append(pkg_id)
  167. if allow_smooth_updates and source_suite.suite_class.is_primary_source:
  168. smoothbins = find_smooth_updateable_binaries(bins,
  169. source_suite.sources[source_name],
  170. pkg_universe,
  171. target_suite,
  172. binaries_t,
  173. binaries_s,
  174. removals,
  175. self.options.smooth_updates)
  176. else:
  177. smoothbins = set()
  178. # remove all the binaries which aren't being smooth updated
  179. if item.architecture != 'source' and source_suite.suite_class.is_additional_source:
  180. # Special-case for pu/tpu:
  181. # if this is a binary migration from *pu, only the arch:any
  182. # packages will be present. ideally dak would also populate
  183. # the arch-indep packages, but as that's not the case we
  184. # must keep them around; they will not be re-added by the
  185. # migration so will end up missing from testing
  186. all_binaries = self.all_binaries
  187. rms = {pkg_id for pkg_id in bins
  188. if pkg_id not in smoothbins and all_binaries[pkg_id].architecture != 'all'}
  189. else:
  190. rms = {pkg_id for pkg_id in bins if pkg_id not in smoothbins}
  191. return rms, smoothbins
  192. def _apply_item_to_target_suite(self, item, removals=frozenset()):
  193. """Apply a change to the target suite as requested by `item`
  194. An optional set of binaries may be passed in "removals". Binaries listed
  195. in this set will be assumed to be removed at the same time as the "item"
  196. will migrate. This may change what binaries will be smooth-updated.
  197. - Binaries in this set must be instances of BinaryPackageId.
  198. This method applies the changes required by the action `item` tracking
  199. them so it will be possible to revert them.
  200. The method returns a tuple containing a set of packages
  201. affected by the change (as (name, arch)-tuples) and the
  202. dictionary undo which can be used to rollback the changes.
  203. """
  204. undo = {'binaries': {}, 'sources': {}, 'virtual': {}}
  205. affected_all = set()
  206. updated_binaries = set()
  207. # local copies for better performance
  208. source_suite = item.suite
  209. target_suite = self.suite_info.target_suite
  210. packages_t = target_suite.binaries
  211. provides_t = target_suite.provides_table
  212. pkg_universe = self.pkg_universe
  213. transaction = self.current_transaction
  214. source_name, updates, rms, smooth_updates = self.compute_groups(item, removals=removals)
  215. sources_t = target_suite.sources
  216. # Handle the source package
  217. old_source = sources_t.get(source_name)
  218. # add/update the source package
  219. if item.is_removal and item.architecture == 'source':
  220. del sources_t[source_name]
  221. else:
  222. # always create a copy of the SourcePackage object
  223. sources_t[source_name] = copy.copy(source_suite.sources[source_name])
  224. if old_source is not None:
  225. # always create a new list of binaries
  226. sources_t[source_name].binaries = copy.copy(old_source.binaries)
  227. else:
  228. sources_t[source_name].binaries = set()
  229. undo['sources'][source_name] = old_source
  230. eqv_set = compute_eqv_set(pkg_universe, updates, rms)
  231. # remove all the binaries which aren't being smooth updated
  232. for rm_pkg_id in rms:
  233. binary, version, parch = rm_pkg_id
  234. pkey = (binary, parch)
  235. binaries_t_a = packages_t[parch]
  236. provides_t_a = provides_t[parch]
  237. pkg_data = binaries_t_a[binary]
  238. # save the old binary for undo
  239. undo['binaries'][pkey] = rm_pkg_id
  240. if pkey not in eqv_set:
  241. # all the reverse dependencies are affected by
  242. # the change
  243. affected_all.update(pkg_universe.reverse_dependencies_of(rm_pkg_id))
  244. affected_all.update(pkg_universe.negative_dependencies_of(rm_pkg_id))
  245. # remove the provided virtual packages
  246. for provided_pkg, prov_version, _ in pkg_data.provides:
  247. key = (provided_pkg, parch)
  248. if key not in undo['virtual']:
  249. undo['virtual'][key] = provides_t_a[provided_pkg].copy()
  250. provides_t_a[provided_pkg].remove((binary, prov_version))
  251. if not provides_t_a[provided_pkg]:
  252. del provides_t_a[provided_pkg]
  253. # for source removal, the source is already gone
  254. if source_name in sources_t:
  255. sources_t[source_name].binaries.discard(rm_pkg_id)
  256. # finally, remove the binary package
  257. del binaries_t_a[binary]
  258. target_suite.remove_binary(rm_pkg_id)
  259. # Add/Update binary packages in testing
  260. if updates:
  261. packages_s = source_suite.binaries
  262. for updated_pkg_id in updates:
  263. binary, new_version, parch = updated_pkg_id
  264. key = (binary, parch)
  265. binaries_t_a = packages_t[parch]
  266. provides_t_a = provides_t[parch]
  267. equivalent_replacement = key in eqv_set
  268. # obviously, added/modified packages are affected
  269. if not equivalent_replacement:
  270. affected_all.add(updated_pkg_id)
  271. # if the binary already exists in testing, it is currently
  272. # built by another source package. we therefore remove the
  273. # version built by the other source package, after marking
  274. # all of its reverse dependencies as affected
  275. if binary in binaries_t_a:
  276. old_pkg_data = binaries_t_a[binary]
  277. old_pkg_id = old_pkg_data.pkg_id
  278. # save the old binary package
  279. undo['binaries'][key] = old_pkg_id
  280. if not equivalent_replacement:
  281. # all the reverse conflicts
  282. affected_all.update(pkg_universe.reverse_dependencies_of(old_pkg_id))
  283. target_suite.remove_binary(old_pkg_id)
  284. elif transaction and transaction.parent_transaction:
  285. # the binary isn't in the target suite, but it may have been at
  286. # the start of the current hint and have been removed
  287. # by an earlier migration. if that's the case then we
  288. # will have a record of the older instance of the binary
  289. # in the undo information. we can use that to ensure
  290. # that the reverse dependencies of the older binary
  291. # package are also checked.
  292. # reverse dependencies built from this source can be
  293. # ignored as their reverse trees are already handled
  294. # by this function
  295. for (tundo, tpkg) in transaction.parent_transaction.undo_items:
  296. if key in tundo['binaries']:
  297. tpkg_id = tundo['binaries'][key]
  298. affected_all.update(pkg_universe.reverse_dependencies_of(tpkg_id))
  299. # add/update the binary package from the source suite
  300. new_pkg_data = packages_s[parch][binary]
  301. binaries_t_a[binary] = new_pkg_data
  302. target_suite.add_binary(updated_pkg_id)
  303. updated_binaries.add(updated_pkg_id)
  304. # add the binary to the source package
  305. sources_t[source_name].binaries.add(updated_pkg_id)
  306. # register new provided packages
  307. for provided_pkg, prov_version, _ in new_pkg_data.provides:
  308. key = (provided_pkg, parch)
  309. if key not in undo['virtual']:
  310. restore_as = provides_t_a[provided_pkg].copy() if provided_pkg in provides_t_a else None
  311. undo['virtual'][key] = restore_as
  312. provides_t_a[provided_pkg].add((binary, prov_version))
  313. if not equivalent_replacement:
  314. # all the reverse dependencies are affected by the change
  315. affected_all.add(updated_pkg_id)
  316. affected_all.update(pkg_universe.negative_dependencies_of(updated_pkg_id))
  317. # Also include the transitive rdeps of the packages found so far
  318. compute_reverse_tree(pkg_universe, affected_all)
  319. if transaction:
  320. transaction.add_undo_item(undo, updated_binaries)
  321. # return the affected packages (direct and than all)
  322. return (affected_all, smooth_updates)
  323. def _apply_multiple_items_to_target_suite(self, items):
  324. is_source_migration = False
  325. if len(items) == 1:
  326. item = items[0]
  327. # apply the changes
  328. affected_all, smooth_updates = self._apply_item_to_target_suite(item)
  329. if item.architecture == 'source':
  330. affected_architectures = self._all_architectures
  331. is_source_migration = True
  332. else:
  333. affected_architectures = {item.architecture}
  334. else:
  335. affected_architectures = set()
  336. removals = set()
  337. affected_all = set()
  338. smooth_updates = set()
  339. for item in items:
  340. _, _, rms, _ = self.compute_groups(item, allow_smooth_updates=False)
  341. removals.update(rms)
  342. affected_architectures.add(item.architecture)
  343. if 'source' in affected_architectures:
  344. affected_architectures = self._all_architectures
  345. is_source_migration = True
  346. for item in items:
  347. item_affected_all, item_smooth = self._apply_item_to_target_suite(item,
  348. removals=removals)
  349. affected_all.update(item_affected_all)
  350. smooth_updates.update(item_smooth)
  351. return is_source_migration, affected_architectures, affected_all, smooth_updates
  352. def migrate_items_to_target_suite(self, items, nuninst_now, stop_on_first_regression=True):
  353. is_accepted = True
  354. target_suite = self.suite_info.target_suite
  355. packages_t = target_suite.binaries
  356. nobreakall_arches = self.options.nobreakall_arches
  357. new_arches = self.options.new_arches
  358. break_arches = self.options.break_arches
  359. arch = None
  360. is_source_migration, affected_architectures, affected_all, smooth_updates = \
  361. self._apply_multiple_items_to_target_suite(items)
  362. # Copy nuninst_comp - we have to deep clone affected
  363. # architectures.
  364. # NB: We do this *after* updating testing as we have to filter out
  365. # removed binaries. Otherwise, uninstallable binaries that were
  366. # removed by the item would still be counted.
  367. nuninst_after = clone_nuninst(nuninst_now, packages_s=packages_t, architectures=affected_architectures)
  368. must_be_installable = self.constraints['keep-installable']
  369. # check the affected packages on all the architectures
  370. for arch in affected_architectures:
  371. check_archall = arch in nobreakall_arches
  372. check_installability(target_suite, packages_t, arch, affected_all,
  373. check_archall, nuninst_after)
  374. # if the uninstallability counter is worse than before, break the loop
  375. if stop_on_first_regression:
  376. worse = is_nuninst_worse(must_be_installable, nuninst_now[arch], nuninst_after[arch], self.allow_uninst[arch])
  377. # ... except for a few special cases
  378. if worse and ((not is_source_migration and arch not in new_arches) or
  379. (arch not in break_arches)):
  380. is_accepted = False
  381. break
  382. new_cruft = {self._migration_item_factory.generate_removal_for_cruft_item(x) for x in smooth_updates}
  383. return (is_accepted, nuninst_after, arch, new_cruft)
  384. @contextlib.contextmanager
  385. def start_transaction(self):
  386. tmts = MigrationTransactionState(self.suite_info, self.all_binaries, self.current_transaction)
  387. self._transactions.append(tmts)
  388. try:
  389. yield tmts
  390. except Exception:
  391. if not tmts.is_committed and not tmts.is_rolled_back:
  392. tmts.rollback()
  393. raise
  394. finally:
  395. self._transactions.pop()
  396. assert tmts.is_rolled_back or tmts.is_committed