You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 

3689 lines
126 KiB

// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
// $Id: acquire-item.cc,v 1.46.2.9 2004/01/16 18:51:11 mdz Exp $
/* ######################################################################
Acquire Item - Item to acquire
Each item can download to exactly one file at a time. This means you
cannot create an item that fetches two uri's to two files at the same
time. The pkgAcqIndex class creates a second class upon instantiation
to fetch the other index files because of this.
##################################################################### */
/*}}}*/
// Include Files /*{{{*/
#include <config.h>
#include <apt-pkg/acquire-item.h>
#include <apt-pkg/configuration.h>
#include <apt-pkg/aptconfiguration.h>
#include <apt-pkg/sourcelist.h>
#include <apt-pkg/error.h>
#include <apt-pkg/strutl.h>
#include <apt-pkg/fileutl.h>
#include <apt-pkg/tagfile.h>
#include <apt-pkg/metaindex.h>
#include <apt-pkg/acquire.h>
#include <apt-pkg/hashes.h>
#include <apt-pkg/indexfile.h>
#include <apt-pkg/pkgcache.h>
#include <apt-pkg/cacheiterators.h>
#include <apt-pkg/pkgrecords.h>
#include <apt-pkg/gpgv.h>
#include <algorithm>
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include <iostream>
#include <vector>
#include <sys/stat.h>
#include <unistd.h>
#include <errno.h>
#include <string>
#include <stdio.h>
#include <ctime>
#include <sstream>
#include <numeric>
#include <apti18n.h>
/*}}}*/
using namespace std;
static void printHashSumComparison(std::string const &URI, HashStringList const &Expected, HashStringList const &Actual) /*{{{*/
{
if (_config->FindB("Debug::Acquire::HashSumMismatch", false) == false)
return;
std::cerr << std::endl << URI << ":" << std::endl << " Expected Hash: " << std::endl;
for (HashStringList::const_iterator hs = Expected.begin(); hs != Expected.end(); ++hs)
std::cerr << "\t- " << hs->toStr() << std::endl;
std::cerr << " Actual Hash: " << std::endl;
for (HashStringList::const_iterator hs = Actual.begin(); hs != Actual.end(); ++hs)
std::cerr << "\t- " << hs->toStr() << std::endl;
}
/*}}}*/
static std::string GetPartialFileName(std::string const &file) /*{{{*/
{
std::string DestFile = _config->FindDir("Dir::State::lists") + "partial/";
DestFile += file;
return DestFile;
}
/*}}}*/
static std::string GetPartialFileNameFromURI(std::string const &uri) /*{{{*/
{
return GetPartialFileName(URItoFileName(uri));
}
/*}}}*/
static std::string GetFinalFileNameFromURI(std::string const &uri) /*{{{*/
{
return _config->FindDir("Dir::State::lists") + URItoFileName(uri);
}
/*}}}*/
static std::string GetKeepCompressedFileName(std::string file, IndexTarget const &Target)/*{{{*/
{
if (Target.KeepCompressed == false)
return file;
std::string const KeepCompressedAs = Target.Option(IndexTarget::KEEPCOMPRESSEDAS);
if (KeepCompressedAs.empty() == false)
{
std::string const ext = KeepCompressedAs.substr(0, KeepCompressedAs.find(' '));
if (ext != "uncompressed")
file.append(".").append(ext);
}
return file;
}
/*}}}*/
static std::string GetMergeDiffsPatchFileName(std::string const &Final, std::string const &Patch)/*{{{*/
{
// rred expects the patch as $FinalFile.ed.$patchname.gz
return Final + ".ed." + Patch + ".gz";
}
/*}}}*/
static std::string GetDiffsPatchFileName(std::string const &Final) /*{{{*/
{
// rred expects the patch as $FinalFile.ed
return Final + ".ed";
}
/*}}}*/
static std::string GetExistingFilename(std::string const &File) /*{{{*/
{
if (RealFileExists(File))
return File;
for (auto const &type : APT::Configuration::getCompressorExtensions())
{
std::string const Final = File + type;
if (RealFileExists(Final))
return Final;
}
return "";
}
/*}}}*/
static std::string GetDiffIndexFileName(std::string const &Name) /*{{{*/
{
return Name + ".diff/Index";
}
/*}}}*/
static std::string GetDiffIndexURI(IndexTarget const &Target) /*{{{*/
{
return Target.URI + ".diff/Index";
}
/*}}}*/
static void ReportMirrorFailureToCentral(pkgAcquire::Item const &I, std::string const &FailCode, std::string const &Details)/*{{{*/
{
// we only act if a mirror was used at all
if(I.UsedMirror.empty())
return;
#if 0
std::cerr << "\nReportMirrorFailure: "
<< UsedMirror
<< " Uri: " << DescURI()
<< " FailCode: "
<< FailCode << std::endl;
#endif
string const report = _config->Find("Methods::Mirror::ProblemReporting",
"/usr/lib/apt/apt-report-mirror-failure");
if(!FileExists(report))
return;
std::vector<char const*> const Args = {
report.c_str(),
I.UsedMirror.c_str(),
I.DescURI().c_str(),
FailCode.c_str(),
Details.c_str(),
NULL
};
pid_t pid = ExecFork();
if(pid < 0)
{
_error->Error("ReportMirrorFailure Fork failed");
return;
}
else if(pid == 0)
{
execvp(Args[0], (char**)Args.data());
std::cerr << "Could not exec " << Args[0] << std::endl;
_exit(100);
}
if(!ExecWait(pid, "report-mirror-failure"))
_error->Warning("Couldn't report problem to '%s'", report.c_str());
}
/*}}}*/
static APT_NONNULL(2) bool MessageInsecureRepository(bool const isError, char const * const msg, std::string const &repo)/*{{{*/
{
std::string m;
strprintf(m, msg, repo.c_str());
if (isError)
{
_error->Error("%s", m.c_str());
_error->Notice("%s", _("Updating from such a repository can't be done securely, and is therefore disabled by default."));
}
else
{
_error->Warning("%s", m.c_str());
_error->Notice("%s", _("Data from such a repository can't be authenticated and is therefore potentially dangerous to use."));
}
_error->Notice("%s", _("See apt-secure(8) manpage for repository creation and user configuration details."));
return false;
}
/*}}}*/
// AllowInsecureRepositories /*{{{*/
enum class InsecureType { UNSIGNED, WEAK, NORELEASE };
static bool TargetIsAllowedToBe(IndexTarget const &Target, InsecureType const type)
{
if (_config->FindB("Acquire::AllowInsecureRepositories"))
return true;
if (Target.OptionBool(IndexTarget::ALLOW_INSECURE))
return true;
switch (type)
{
case InsecureType::UNSIGNED: break;
case InsecureType::NORELEASE: break;
case InsecureType::WEAK:
if (_config->FindB("Acquire::AllowWeakRepositories"))
return true;
if (Target.OptionBool(IndexTarget::ALLOW_WEAK))
return true;
break;
}
return false;
}
static bool APT_NONNULL(3, 4, 5) AllowInsecureRepositories(InsecureType const msg, std::string const &repo,
metaIndex const * const MetaIndexParser, pkgAcqMetaClearSig * const TransactionManager, pkgAcquire::Item * const I)
{
// we skip weak downgrades as its unlikely that a repository gets really weaker –
// its more realistic that apt got pickier in a newer version
if (msg != InsecureType::WEAK)
{
std::string const FinalInRelease = TransactionManager->GetFinalFilename();
std::string const FinalReleasegpg = FinalInRelease.substr(0, FinalInRelease.length() - strlen("InRelease")) + "Release.gpg";
if (RealFileExists(FinalReleasegpg) || RealFileExists(FinalInRelease))
{
char const * msgstr = nullptr;
switch (msg)
{
case InsecureType::UNSIGNED: msgstr = _("The repository '%s' is no longer signed."); break;
case InsecureType::NORELEASE: msgstr = _("The repository '%s' does no longer have a Release file."); break;
case InsecureType::WEAK: /* unreachable */ break;
}
if (_config->FindB("Acquire::AllowDowngradeToInsecureRepositories") ||
TransactionManager->Target.OptionBool(IndexTarget::ALLOW_DOWNGRADE_TO_INSECURE))
{
// meh, the users wants to take risks (we still mark the packages
// from this repository as unauthenticated)
_error->Warning(msgstr, repo.c_str());
_error->Warning(_("This is normally not allowed, but the option "
"Acquire::AllowDowngradeToInsecureRepositories was "
"given to override it."));
} else {
MessageInsecureRepository(true, msgstr, repo);
TransactionManager->AbortTransaction();
I->Status = pkgAcquire::Item::StatError;
return false;
}
}
}
if(MetaIndexParser->GetTrusted() == metaIndex::TRI_YES)
return true;
char const * msgstr = nullptr;
switch (msg)
{
case InsecureType::UNSIGNED: msgstr = _("The repository '%s' is not signed."); break;
case InsecureType::NORELEASE: msgstr = _("The repository '%s' does not have a Release file."); break;
case InsecureType::WEAK: msgstr = _("The repository '%s' provides only weak security information."); break;
}
if (TargetIsAllowedToBe(TransactionManager->Target, msg) == true)
{
MessageInsecureRepository(false, msgstr, repo);
return true;
}
MessageInsecureRepository(true, msgstr, repo);
TransactionManager->AbortTransaction();
I->Status = pkgAcquire::Item::StatError;
return false;
}
/*}}}*/
static HashStringList GetExpectedHashesFromFor(metaIndex * const Parser, std::string const &MetaKey)/*{{{*/
{
if (Parser == NULL)
return HashStringList();
metaIndex::checkSum * const R = Parser->Lookup(MetaKey);
if (R == NULL)
return HashStringList();
return R->Hashes;
}
/*}}}*/
// all ::HashesRequired and ::GetExpectedHashes implementations /*{{{*/
/* ::GetExpectedHashes is abstract and has to be implemented by all subclasses.
It is best to implement it as broadly as possible, while ::HashesRequired defaults
to true and should be as restrictive as possible for false cases. Note that if
a hash is returned by ::GetExpectedHashes it must match. Only if it doesn't
::HashesRequired is called to evaluate if its okay to have no hashes. */
APT_CONST bool pkgAcqTransactionItem::HashesRequired() const
{
/* signed repositories obviously have a parser and good hashes.
unsigned repositories, too, as even if we can't trust them for security,
we can at least trust them for integrity of the download itself.
Only repositories without a Release file can (obviously) not have
hashes – and they are very uncommon and strongly discouraged */
if (TransactionManager->MetaIndexParser->GetLoadedSuccessfully() != metaIndex::TRI_YES)
return false;
if (TargetIsAllowedToBe(Target, InsecureType::WEAK))
{
/* If we allow weak hashes, we check that we have some (weak) and then
declare hashes not needed. That will tip us in the right direction
as if hashes exist, they will be used, even if not required */
auto const hsl = GetExpectedHashes();
if (hsl.usable())
return true;
if (hsl.empty() == false)
return false;
}
return true;
}
HashStringList pkgAcqTransactionItem::GetExpectedHashes() const
{
return GetExpectedHashesFor(GetMetaKey());
}
APT_CONST bool pkgAcqMetaBase::HashesRequired() const
{
// Release and co have no hashes 'by design'.
return false;
}
HashStringList pkgAcqMetaBase::GetExpectedHashes() const
{
return HashStringList();
}
APT_CONST bool pkgAcqIndexDiffs::HashesRequired() const
{
/* We can't check hashes of rred result as we don't know what the
hash of the file will be. We just know the hash of the patch(es),
the hash of the file they will apply on and the hash of the resulting
file. */
if (State == StateFetchDiff)
return true;
return false;
}
HashStringList pkgAcqIndexDiffs::GetExpectedHashes() const
{
if (State == StateFetchDiff)
return available_patches[0].download_hashes;
return HashStringList();
}
APT_CONST bool pkgAcqIndexMergeDiffs::HashesRequired() const
{
/* @see #pkgAcqIndexDiffs::HashesRequired, with the difference that
we can check the rred result after all patches are applied as
we know the expected result rather than potentially apply more patches */
if (State == StateFetchDiff)
return true;
return State == StateApplyDiff;
}
HashStringList pkgAcqIndexMergeDiffs::GetExpectedHashes() const
{
if (State == StateFetchDiff)
return patch.download_hashes;
else if (State == StateApplyDiff)
return GetExpectedHashesFor(Target.MetaKey);
return HashStringList();
}
APT_CONST bool pkgAcqArchive::HashesRequired() const
{
return LocalSource == false;
}
HashStringList pkgAcqArchive::GetExpectedHashes() const
{
// figured out while parsing the records
return ExpectedHashes;
}
APT_CONST bool pkgAcqFile::HashesRequired() const
{
// supplied as parameter at creation time, so the caller decides
return ExpectedHashes.usable();
}
HashStringList pkgAcqFile::GetExpectedHashes() const
{
return ExpectedHashes;
}
/*}}}*/
// Acquire::Item::QueueURI and specialisations from child classes /*{{{*/
bool pkgAcquire::Item::QueueURI(pkgAcquire::ItemDesc &Item)
{
Owner->Enqueue(Item);
return true;
}
/* The idea here is that an item isn't queued if it exists on disk and the
transition manager was a hit as this means that the files it contains
the checksums for can't be updated either (or they are and we are asking
for a hashsum mismatch to happen which helps nobody) */
bool pkgAcqTransactionItem::QueueURI(pkgAcquire::ItemDesc &Item)
{
if (TransactionManager->State != TransactionStarted)
{
if (_config->FindB("Debug::Acquire::Transaction", false))
std::clog << "Skip " << Target.URI << " as transaction was already dealt with!" << std::endl;
return false;
}
std::string const FinalFile = GetFinalFilename();
if (TransactionManager->IMSHit == true && FileExists(FinalFile) == true)
{
PartialFile = DestFile = FinalFile;
Status = StatDone;
return false;
}
// If we got the InRelease file via a mirror, pick all indexes directly from this mirror, too
if (TransactionManager->BaseURI.empty() == false &&
URI::SiteOnly(Item.URI) != URI::SiteOnly(TransactionManager->BaseURI))
{
// this ensures we rewrite only once and only the first step
auto const OldBaseURI = Target.Option(IndexTarget::BASE_URI);
if (OldBaseURI.empty() == false && APT::String::Startswith(Item.URI, OldBaseURI))
{
auto const ExtraPath = Item.URI.substr(OldBaseURI.length());
Item.URI = flCombine(TransactionManager->BaseURI, ExtraPath);
UsedMirror = TransactionManager->UsedMirror;
if (Item.Description.find(" ") != string::npos)
Item.Description.replace(0, Item.Description.find(" "), UsedMirror);
}
}
return pkgAcquire::Item::QueueURI(Item);
}
/* The transition manager InRelease itself (or its older sisters-in-law
Release & Release.gpg) is always queued as this allows us to rerun gpgv
on it to verify that we aren't stalled with old files */
bool pkgAcqMetaBase::QueueURI(pkgAcquire::ItemDesc &Item)
{
return pkgAcquire::Item::QueueURI(Item);
}
/* the Diff/Index needs to queue also the up-to-date complete index file
to ensure that the list cleaner isn't eating it */
bool pkgAcqDiffIndex::QueueURI(pkgAcquire::ItemDesc &Item)
{
if (pkgAcqTransactionItem::QueueURI(Item) == true)
return true;
QueueOnIMSHit();
return false;
}
/*}}}*/
// Acquire::Item::GetFinalFilename and specialisations for child classes /*{{{*/
std::string pkgAcquire::Item::GetFinalFilename() const
{
// Beware: Desc.URI is modified by redirections
return GetFinalFileNameFromURI(Desc.URI);
}
std::string pkgAcqDiffIndex::GetFinalFilename() const
{
return GetFinalFileNameFromURI(GetDiffIndexURI(Target));
}
std::string pkgAcqIndex::GetFinalFilename() const
{
std::string const FinalFile = GetFinalFileNameFromURI(Target.URI);
return GetKeepCompressedFileName(FinalFile, Target);
}
std::string pkgAcqMetaSig::GetFinalFilename() const
{
return GetFinalFileNameFromURI(Target.URI);
}
std::string pkgAcqBaseIndex::GetFinalFilename() const
{
return GetFinalFileNameFromURI(Target.URI);
}
std::string pkgAcqMetaBase::GetFinalFilename() const
{
return GetFinalFileNameFromURI(Target.URI);
}
std::string pkgAcqArchive::GetFinalFilename() const
{
return _config->FindDir("Dir::Cache::Archives") + flNotDir(StoreFilename);
}
/*}}}*/
// pkgAcqTransactionItem::GetMetaKey and specialisations for child classes /*{{{*/
std::string pkgAcqTransactionItem::GetMetaKey() const
{
return Target.MetaKey;
}
std::string pkgAcqIndex::GetMetaKey() const
{
if (Stage == STAGE_DECOMPRESS_AND_VERIFY || CurrentCompressionExtension == "uncompressed")
return Target.MetaKey;
return Target.MetaKey + "." + CurrentCompressionExtension;
}
std::string pkgAcqDiffIndex::GetMetaKey() const
{
return GetDiffIndexFileName(Target.MetaKey);
}
/*}}}*/
//pkgAcqTransactionItem::TransactionState and specialisations for child classes /*{{{*/
bool pkgAcqTransactionItem::TransactionState(TransactionStates const state)
{
bool const Debug = _config->FindB("Debug::Acquire::Transaction", false);
switch(state)
{
case TransactionStarted: _error->Fatal("Item %s changed to invalid transaction start state!", Target.URI.c_str()); break;
case TransactionAbort:
if(Debug == true)
std::clog << " Cancel: " << DestFile << std::endl;
if (Status == pkgAcquire::Item::StatIdle)
{
Status = pkgAcquire::Item::StatDone;
Dequeue();
}
break;
case TransactionCommit:
if(PartialFile.empty() == false)
{
bool sameFile = (PartialFile == DestFile);
// we use symlinks on IMS-Hit to avoid copies
if (RealFileExists(DestFile))
{
struct stat Buf;
if (lstat(PartialFile.c_str(), &Buf) != -1)
{
if (S_ISLNK(Buf.st_mode) && Buf.st_size > 0)
{
char partial[Buf.st_size + 1];
ssize_t const sp = readlink(PartialFile.c_str(), partial, Buf.st_size);
if (sp == -1)
_error->Errno("pkgAcqTransactionItem::TransactionState-sp", _("Failed to readlink %s"), PartialFile.c_str());
else
{
partial[sp] = '\0';
sameFile = (DestFile == partial);
}
}
}
else
_error->Errno("pkgAcqTransactionItem::TransactionState-stat", _("Failed to stat %s"), PartialFile.c_str());
}
if (sameFile == false)
{
// ensure that even without lists-cleanup all compressions are nuked
std::string FinalFile = GetFinalFileNameFromURI(Target.URI);
if (FileExists(FinalFile))
{
if(Debug == true)
std::clog << "rm " << FinalFile << " # " << DescURI() << std::endl;
if (RemoveFile("TransactionStates-Cleanup", FinalFile) == false)
return false;
}
for (auto const &ext: APT::Configuration::getCompressorExtensions())
{
auto const Final = FinalFile + ext;
if (FileExists(Final))
{
if(Debug == true)
std::clog << "rm " << Final << " # " << DescURI() << std::endl;
if (RemoveFile("TransactionStates-Cleanup", Final) == false)
return false;
}
}
if(Debug == true)
std::clog << "mv " << PartialFile << " -> "<< DestFile << " # " << DescURI() << std::endl;
if (Rename(PartialFile, DestFile) == false)
return false;
}
else if(Debug == true)
std::clog << "keep " << PartialFile << " # " << DescURI() << std::endl;
} else {
if(Debug == true)
std::clog << "rm " << DestFile << " # " << DescURI() << std::endl;
if (RemoveFile("TransItem::TransactionCommit", DestFile) == false)
return false;
}
break;
}
return true;
}
bool pkgAcqMetaBase::TransactionState(TransactionStates const state)
{
// Do not remove InRelease on IMSHit of Release.gpg [yes, this is very edgecasey]
if (TransactionManager->IMSHit == false)
return pkgAcqTransactionItem::TransactionState(state);
return true;
}
bool pkgAcqIndex::TransactionState(TransactionStates const state)
{
if (pkgAcqTransactionItem::TransactionState(state) == false)
return false;
switch (state)
{
case TransactionStarted: _error->Fatal("AcqIndex %s changed to invalid transaction start state!", Target.URI.c_str()); break;
case TransactionAbort:
if (Stage == STAGE_DECOMPRESS_AND_VERIFY)
{
// keep the compressed file, but drop the decompressed
EraseFileName.clear();
if (PartialFile.empty() == false && flExtension(PartialFile) != CurrentCompressionExtension)
RemoveFile("TransactionAbort", PartialFile);
}
break;
case TransactionCommit:
if (EraseFileName.empty() == false)
RemoveFile("AcqIndex::TransactionCommit", EraseFileName);
break;
}
return true;
}
bool pkgAcqDiffIndex::TransactionState(TransactionStates const state)
{
if (pkgAcqTransactionItem::TransactionState(state) == false)
return false;
switch (state)
{
case TransactionStarted: _error->Fatal("Item %s changed to invalid transaction start state!", Target.URI.c_str()); break;
case TransactionCommit:
break;
case TransactionAbort:
std::string const Partial = GetPartialFileNameFromURI(Target.URI);
RemoveFile("TransactionAbort", Partial);
break;
}
return true;
}
/*}}}*/
class APT_HIDDEN NoActionItem : public pkgAcquire::Item /*{{{*/
/* The sole purpose of this class is having an item which does nothing to
reach its done state to prevent cleanup deleting the mentioned file.
Handy in cases in which we know we have the file already, like IMS-Hits. */
{
IndexTarget const Target;
public:
virtual std::string DescURI() const APT_OVERRIDE {return Target.URI;};
virtual HashStringList GetExpectedHashes() const APT_OVERRIDE {return HashStringList();};
NoActionItem(pkgAcquire * const Owner, IndexTarget const &Target) :
pkgAcquire::Item(Owner), Target(Target)
{
Status = StatDone;
DestFile = GetFinalFileNameFromURI(Target.URI);
}
NoActionItem(pkgAcquire * const Owner, IndexTarget const &Target, std::string const &FinalFile) :
pkgAcquire::Item(Owner), Target(Target)
{
Status = StatDone;
DestFile = FinalFile;
}
};
/*}}}*/
class APT_HIDDEN CleanupItem : public pkgAcqTransactionItem /*{{{*/
/* This class ensures that a file which was configured but isn't downloaded
for various reasons isn't kept in an old version in the lists directory.
In a way its the reverse of NoActionItem as it helps with removing files
even if the lists-cleanup is deactivated. */
{
public:
virtual std::string DescURI() const APT_OVERRIDE {return Target.URI;};
virtual HashStringList GetExpectedHashes() const APT_OVERRIDE {return HashStringList();};
CleanupItem(pkgAcquire * const Owner, pkgAcqMetaClearSig * const TransactionManager, IndexTarget const &Target) :
pkgAcqTransactionItem(Owner, TransactionManager, Target)
{
Status = StatDone;
DestFile = GetFinalFileNameFromURI(Target.URI);
}
bool TransactionState(TransactionStates const state) APT_OVERRIDE
{
switch (state)
{
case TransactionStarted:
break;
case TransactionAbort:
break;
case TransactionCommit:
if (_config->FindB("Debug::Acquire::Transaction", false) == true)
std::clog << "rm " << DestFile << " # " << DescURI() << std::endl;
if (RemoveFile("TransItem::TransactionCommit", DestFile) == false)
return false;
break;
}
return true;
}
};
/*}}}*/
// Acquire::Item::Item - Constructor /*{{{*/
APT_IGNORE_DEPRECATED_PUSH
pkgAcquire::Item::Item(pkgAcquire * const owner) :
FileSize(0), PartialSize(0), Mode(0), ID(0), Complete(false), Local(false),
QueueCounter(0), ExpectedAdditionalItems(0), Owner(owner), d(NULL)
{
Owner->Add(this);
Status = StatIdle;
}
APT_IGNORE_DEPRECATED_POP
/*}}}*/
// Acquire::Item::~Item - Destructor /*{{{*/
pkgAcquire::Item::~Item()
{
Owner->Remove(this);
}
/*}}}*/
std::string pkgAcquire::Item::Custom600Headers() const /*{{{*/
{
return std::string();
}
/*}}}*/
std::string pkgAcquire::Item::ShortDesc() const /*{{{*/
{
return DescURI();
}
/*}}}*/
APT_CONST void pkgAcquire::Item::Finished() /*{{{*/
{
}
/*}}}*/
APT_PURE pkgAcquire * pkgAcquire::Item::GetOwner() const /*{{{*/
{
return Owner;
}
/*}}}*/
APT_CONST pkgAcquire::ItemDesc &pkgAcquire::Item::GetItemDesc() /*{{{*/
{
return Desc;
}
/*}}}*/
APT_CONST bool pkgAcquire::Item::IsTrusted() const /*{{{*/
{
return false;
}
/*}}}*/
// Acquire::Item::Failed - Item failed to download /*{{{*/
// ---------------------------------------------------------------------
/* We return to an idle state if there are still other queues that could
fetch this object */
void pkgAcquire::Item::Failed(string const &Message,pkgAcquire::MethodConfig const * const Cnf)
{
if (QueueCounter <= 1)
{
/* This indicates that the file is not available right now but might
be sometime later. If we do a retry cycle then this should be
retried [CDROMs] */
if (Cnf != NULL && Cnf->LocalOnly == true &&
StringToBool(LookupTag(Message,"Transient-Failure"),false) == true)
{
Status = StatIdle;
Dequeue();
return;
}
switch (Status)
{
case StatIdle:
case StatFetching:
case StatDone:
Status = StatError;
break;
case StatAuthError:
case StatError:
case StatTransientNetworkError:
break;
}
Complete = false;
Dequeue();
}
string const FailReason = LookupTag(Message, "FailReason");
enum { MAXIMUM_SIZE_EXCEEDED, HASHSUM_MISMATCH, WEAK_HASHSUMS, OTHER } failreason = OTHER;
if ( FailReason == "MaximumSizeExceeded")
failreason = MAXIMUM_SIZE_EXCEEDED;
else if ( FailReason == "WeakHashSums")
failreason = WEAK_HASHSUMS;
else if (Status == StatAuthError)
failreason = HASHSUM_MISMATCH;
if(ErrorText.empty())
{
if (Status == StatAuthError)
{
std::ostringstream out;
switch (failreason)
{
case HASHSUM_MISMATCH:
out << _("Hash Sum mismatch") << std::endl;
break;
case WEAK_HASHSUMS:
out << _("Insufficient information available to perform this download securely") << std::endl;
break;
case MAXIMUM_SIZE_EXCEEDED:
case OTHER:
out << LookupTag(Message, "Message") << std::endl;
break;
}
auto const ExpectedHashes = GetExpectedHashes();
if (ExpectedHashes.empty() == false)
{
out << "Hashes of expected file:" << std::endl;
for (auto const &hs: ExpectedHashes)
{
out << " - " << hs.toStr();
if (hs.usable() == false)
out << " [weak]";
out << std::endl;
}
}
if (failreason == HASHSUM_MISMATCH)
{
out << "Hashes of received file:" << std::endl;
for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type)
{
std::string const tagname = std::string(*type) + "-Hash";
std::string const hashsum = LookupTag(Message, tagname.c_str());
if (hashsum.empty() == false)
{
auto const hs = HashString(*type, hashsum);
out << " - " << hs.toStr();
if (hs.usable() == false)
out << " [weak]";
out << std::endl;
}
}
out << "Last modification reported: " << LookupTag(Message, "Last-Modified", "<none>") << std::endl;
}
ErrorText = out.str();
}
else
ErrorText = LookupTag(Message,"Message");
}
switch (failreason)
{
case MAXIMUM_SIZE_EXCEEDED: RenameOnError(MaximumSizeExceeded); break;
case HASHSUM_MISMATCH: RenameOnError(HashSumMismatch); break;
case WEAK_HASHSUMS: break;
case OTHER: break;
}
if (FailReason.empty() == false)
ReportMirrorFailureToCentral(*this, FailReason, ErrorText);
else
ReportMirrorFailureToCentral(*this, ErrorText, ErrorText);
if (QueueCounter > 1)
Status = StatIdle;
}
/*}}}*/
// Acquire::Item::Start - Item has begun to download /*{{{*/
// ---------------------------------------------------------------------
/* Stash status and the file size. Note that setting Complete means
sub-phases of the acquire process such as decompresion are operating */
void pkgAcquire::Item::Start(string const &/*Message*/, unsigned long long const Size)
{
Status = StatFetching;
ErrorText.clear();
if (FileSize == 0 && Complete == false)
FileSize = Size;
}
/*}}}*/
// Acquire::Item::VerifyDone - check if Item was downloaded OK /*{{{*/
/* Note that hash-verification is 'hardcoded' in acquire-worker and has
* already passed if this method is called. */
bool pkgAcquire::Item::VerifyDone(std::string const &Message,
pkgAcquire::MethodConfig const * const /*Cnf*/)
{
std::string const FileName = LookupTag(Message,"Filename");
if (FileName.empty() == true)
{
Status = StatError;
ErrorText = "Method gave a blank filename";
return false;
}
return true;
}
/*}}}*/
// Acquire::Item::Done - Item downloaded OK /*{{{*/
void pkgAcquire::Item::Done(string const &/*Message*/, HashStringList const &Hashes,
pkgAcquire::MethodConfig const * const /*Cnf*/)
{
// We just downloaded something..
if (FileSize == 0)
{
unsigned long long const downloadedSize = Hashes.FileSize();
if (downloadedSize != 0)
{
FileSize = downloadedSize;
}
}
Status = StatDone;
ErrorText = string();
Owner->Dequeue(this);
}
/*}}}*/
// Acquire::Item::Rename - Rename a file /*{{{*/
// ---------------------------------------------------------------------
/* This helper function is used by a lot of item methods as their final
step */
bool pkgAcquire::Item::Rename(string const &From,string const &To)
{
if (From == To || rename(From.c_str(),To.c_str()) == 0)
return true;
std::string S;
strprintf(S, _("rename failed, %s (%s -> %s)."), strerror(errno),
From.c_str(),To.c_str());
Status = StatError;
if (ErrorText.empty())
ErrorText = S;
else
ErrorText = ErrorText + ": " + S;
return false;
}
/*}}}*/
void pkgAcquire::Item::Dequeue() /*{{{*/
{
Owner->Dequeue(this);
}
/*}}}*/
bool pkgAcquire::Item::RenameOnError(pkgAcquire::Item::RenameOnErrorState const error)/*{{{*/
{
if (RealFileExists(DestFile))
Rename(DestFile, DestFile + ".FAILED");
std::string errtext;
switch (error)
{
case HashSumMismatch:
errtext = _("Hash Sum mismatch");
break;
case SizeMismatch:
errtext = _("Size mismatch");
Status = StatAuthError;
break;
case InvalidFormat:
errtext = _("Invalid file format");
Status = StatError;
// do not report as usually its not the mirrors fault, but Portal/Proxy
break;
case SignatureError:
errtext = _("Signature error");
Status = StatError;
break;
case NotClearsigned:
strprintf(errtext, _("Clearsigned file isn't valid, got '%s' (does the network require authentication?)"), "NOSPLIT");
Status = StatAuthError;
break;
case MaximumSizeExceeded:
// the method is expected to report a good error for this
break;
case PDiffError:
// no handling here, done by callers
break;
}
if (ErrorText.empty())
ErrorText = errtext;
return false;
}
/*}}}*/
void pkgAcquire::Item::SetActiveSubprocess(const std::string &subprocess)/*{{{*/
{
ActiveSubprocess = subprocess;
APT_IGNORE_DEPRECATED(Mode = ActiveSubprocess.c_str();)
}
/*}}}*/
// Acquire::Item::ReportMirrorFailure /*{{{*/
void pkgAcquire::Item::ReportMirrorFailure(std::string const &FailCode)
{
ReportMirrorFailureToCentral(*this, FailCode, FailCode);
}
/*}}}*/
std::string pkgAcquire::Item::HashSum() const /*{{{*/
{
HashStringList const hashes = GetExpectedHashes();
HashString const * const hs = hashes.find(NULL);
return hs != NULL ? hs->toStr() : "";
}
/*}}}*/
pkgAcqTransactionItem::pkgAcqTransactionItem(pkgAcquire * const Owner, /*{{{*/
pkgAcqMetaClearSig * const transactionManager, IndexTarget const &target) :
pkgAcquire::Item(Owner), d(NULL), Target(target), TransactionManager(transactionManager)
{
if (TransactionManager != this)
TransactionManager->Add(this);
}
/*}}}*/
pkgAcqTransactionItem::~pkgAcqTransactionItem() /*{{{*/
{
}
/*}}}*/
HashStringList pkgAcqTransactionItem::GetExpectedHashesFor(std::string const &MetaKey) const /*{{{*/
{
return GetExpectedHashesFromFor(TransactionManager->MetaIndexParser, MetaKey);
}
/*}}}*/
static void LoadLastMetaIndexParser(pkgAcqMetaClearSig * const TransactionManager, std::string const &FinalRelease, std::string const &FinalInRelease)/*{{{*/
{
if (TransactionManager->IMSHit == true)
return;
if (RealFileExists(FinalInRelease) || RealFileExists(FinalRelease))
{
TransactionManager->LastMetaIndexParser = TransactionManager->MetaIndexParser->UnloadedClone();
if (TransactionManager->LastMetaIndexParser != NULL)
{
_error->PushToStack();
if (RealFileExists(FinalInRelease))
TransactionManager->LastMetaIndexParser->Load(FinalInRelease, NULL);
else
TransactionManager->LastMetaIndexParser->Load(FinalRelease, NULL);
// its unlikely to happen, but if what we have is bad ignore it
if (_error->PendingError())
{
delete TransactionManager->LastMetaIndexParser;
TransactionManager->LastMetaIndexParser = NULL;
}
_error->RevertToStack();
}
}
}
/*}}}*/
// AcqMetaBase - Constructor /*{{{*/
pkgAcqMetaBase::pkgAcqMetaBase(pkgAcquire * const Owner,
pkgAcqMetaClearSig * const TransactionManager,
IndexTarget const &DataTarget)
: pkgAcqTransactionItem(Owner, TransactionManager, DataTarget), d(NULL),
AuthPass(false), IMSHit(false), State(TransactionStarted)
{
}
/*}}}*/
// AcqMetaBase::Add - Add a item to the current Transaction /*{{{*/
void pkgAcqMetaBase::Add(pkgAcqTransactionItem * const I)
{
Transaction.push_back(I);
}
/*}}}*/
// AcqMetaBase::AbortTransaction - Abort the current Transaction /*{{{*/
void pkgAcqMetaBase::AbortTransaction()
{
if(_config->FindB("Debug::Acquire::Transaction", false) == true)
std::clog << "AbortTransaction: " << TransactionManager << std::endl;
switch (TransactionManager->State)
{
case TransactionStarted: break;
case TransactionAbort: _error->Fatal("Transaction %s was already aborted and is aborted again", TransactionManager->Target.URI.c_str()); return;
case TransactionCommit: _error->Fatal("Transaction %s was already aborted and is now committed", TransactionManager->Target.URI.c_str()); return;
}
TransactionManager->State = TransactionAbort;
// ensure the toplevel is in error state too
for (std::vector<pkgAcqTransactionItem*>::iterator I = Transaction.begin();
I != Transaction.end(); ++I)
{
if ((*I)->Status != pkgAcquire::Item::StatFetching)
Owner->Dequeue(*I);
(*I)->TransactionState(TransactionAbort);
}
Transaction.clear();
}
/*}}}*/
// AcqMetaBase::TransactionHasError - Check for errors in Transaction /*{{{*/
APT_PURE bool pkgAcqMetaBase::TransactionHasError() const
{
for (std::vector<pkgAcqTransactionItem*>::const_iterator I = Transaction.begin();
I != Transaction.end(); ++I)
{
switch((*I)->Status) {
case StatDone: break;
case StatIdle: break;
case StatAuthError: return true;
case StatError: return true;
case StatTransientNetworkError: return true;
case StatFetching: break;
}
}
return false;
}
/*}}}*/
// AcqMetaBase::CommitTransaction - Commit a transaction /*{{{*/
void pkgAcqMetaBase::CommitTransaction()
{
if(_config->FindB("Debug::Acquire::Transaction", false) == true)
std::clog << "CommitTransaction: " << this << std::endl;
switch (TransactionManager->State)
{
case TransactionStarted: break;
case TransactionAbort: _error->Fatal("Transaction %s was already committed and is now aborted", TransactionManager->Target.URI.c_str()); return;
case TransactionCommit: _error->Fatal("Transaction %s was already committed and is again committed", TransactionManager->Target.URI.c_str()); return;
}
TransactionManager->State = TransactionCommit;
// move new files into place *and* remove files that are not
// part of the transaction but are still on disk
for (std::vector<pkgAcqTransactionItem*>::iterator I = Transaction.begin();
I != Transaction.end(); ++I)
{
(*I)->TransactionState(TransactionCommit);
}
Transaction.clear();
}
/*}}}*/
// AcqMetaBase::TransactionStageCopy - Stage a file for copying /*{{{*/
void pkgAcqMetaBase::TransactionStageCopy(pkgAcqTransactionItem * const I,
const std::string &From,
const std::string &To)
{
I->PartialFile = From;
I->DestFile = To;
}
/*}}}*/
// AcqMetaBase::TransactionStageRemoval - Stage a file for removal /*{{{*/
void pkgAcqMetaBase::TransactionStageRemoval(pkgAcqTransactionItem * const I,
const std::string &FinalFile)
{
I->PartialFile = "";
I->DestFile = FinalFile;
}
/*}}}*/
// AcqMetaBase::GenerateAuthWarning - Check gpg authentication error /*{{{*/
/* This method is called from ::Failed handlers. If it returns true,
no fallback to other files or modi is performed */
bool pkgAcqMetaBase::CheckStopAuthentication(pkgAcquire::Item * const I, const std::string &Message)
{
string const Final = I->GetFinalFilename();
std::string const GPGError = LookupTag(Message, "Message");
if (FileExists(Final))
{
I->Status = StatTransientNetworkError;
_error->Warning(_("An error occurred during the signature verification. "
"The repository is not updated and the previous index files will be used. "
"GPG error: %s: %s"),
Desc.Description.c_str(),
GPGError.c_str());
RunScripts("APT::Update::Auth-Failure");
return true;
} else if (LookupTag(Message,"Message").find("NODATA") != string::npos) {
/* Invalid signature file, reject (LP: #346386) (Closes: #627642) */
_error->Error(_("GPG error: %s: %s"),
Desc.Description.c_str(),
GPGError.c_str());
I->Status = StatAuthError;
return true;
} else {
_error->Warning(_("GPG error: %s: %s"),
Desc.Description.c_str(),
GPGError.c_str());
}
// gpgv method failed
ReportMirrorFailureToCentral(*this, "GPGFailure", GPGError);
return false;
}
/*}}}*/
// AcqMetaBase::Custom600Headers - Get header for AcqMetaBase /*{{{*/
// ---------------------------------------------------------------------
string pkgAcqMetaBase::Custom600Headers() const
{
std::string Header = "\nIndex-File: true";
std::string MaximumSize;
strprintf(MaximumSize, "\nMaximum-Size: %i",
_config->FindI("Acquire::MaxReleaseFileSize", 10*1000*1000));
Header += MaximumSize;
string const FinalFile = GetFinalFilename();
struct stat Buf;
if (stat(FinalFile.c_str(),&Buf) == 0)
Header += "\nLast-Modified: " + TimeRFC1123(Buf.st_mtime, false);
return Header;
}
/*}}}*/
// AcqMetaBase::QueueForSignatureVerify /*{{{*/
void pkgAcqMetaBase::QueueForSignatureVerify(pkgAcqTransactionItem * const I, std::string const &File, std::string const &Signature)
{
AuthPass = true;
I->Desc.URI = "gpgv:" + Signature;
I->DestFile = File;
QueueURI(I->Desc);
I->SetActiveSubprocess("gpgv");
}
/*}}}*/
// AcqMetaBase::CheckDownloadDone /*{{{*/
bool pkgAcqMetaBase::CheckDownloadDone(pkgAcqTransactionItem * const I, const std::string &Message, HashStringList const &Hashes) const
{
// We have just finished downloading a Release file (it is not
// verified yet)
// Save the final base URI we got this Release file from
if (I->UsedMirror.empty() == false && _config->FindB("Acquire::SameMirrorForAllIndexes", true))
{
if (APT::String::Endswith(I->Desc.URI, "InRelease"))
TransactionManager->BaseURI = I->Desc.URI.substr(0, I->Desc.URI.length() - strlen("InRelease"));
else if (APT::String::Endswith(I->Desc.URI, "Release"))
TransactionManager->BaseURI = I->Desc.URI.substr(0, I->Desc.URI.length() - strlen("Release"));
}
std::string const FileName = LookupTag(Message,"Filename");
if (FileName != I->DestFile && RealFileExists(I->DestFile) == false)
{
I->Local = true;
I->Desc.URI = "copy:" + FileName;
I->QueueURI(I->Desc);
return false;
}
// make sure to verify against the right file on I-M-S hit
bool IMSHit = StringToBool(LookupTag(Message,"IMS-Hit"), false);
if (IMSHit == false && Hashes.usable())
{
// detect IMS-Hits servers haven't detected by Hash comparison
std::string const FinalFile = I->GetFinalFilename();
if (RealFileExists(FinalFile) && Hashes.VerifyFile(FinalFile) == true)
{
IMSHit = true;
RemoveFile("CheckDownloadDone", I->DestFile);
}
}
if(IMSHit == true)
{
// for simplicity, the transaction manager is always InRelease
// even if it doesn't exist.
TransactionManager->IMSHit = true;
I->PartialFile = I->DestFile = I->GetFinalFilename();
}
// set Item to complete as the remaining work is all local (verify etc)
I->Complete = true;
return true;
}
/*}}}*/
bool pkgAcqMetaBase::CheckAuthDone(string const &Message) /*{{{*/
{
// At this point, the gpgv method has succeeded, so there is a
// valid signature from a key in the trusted keyring. We
// perform additional verification of its contents, and use them
// to verify the indexes we are about to download
if (_config->FindB("Debug::pkgAcquire::Auth", false))
std::cerr << "Signature verification succeeded: " << DestFile << std::endl;
if (TransactionManager->IMSHit == false)
{
// open the last (In)Release if we have it
std::string const FinalFile = GetFinalFilename();
std::string FinalRelease;
std::string FinalInRelease;
if (APT::String::Endswith(FinalFile, "InRelease"))
{
FinalInRelease = FinalFile;
FinalRelease = FinalFile.substr(0, FinalFile.length() - strlen("InRelease")) + "Release";
}
else
{
FinalInRelease = FinalFile.substr(0, FinalFile.length() - strlen("Release")) + "InRelease";
FinalRelease = FinalFile;
}
LoadLastMetaIndexParser(TransactionManager, FinalRelease, FinalInRelease);
}
bool const GoodAuth = TransactionManager->MetaIndexParser->Load(DestFile, &ErrorText);
if (GoodAuth == false && AllowInsecureRepositories(InsecureType::WEAK, Target.Description, TransactionManager->MetaIndexParser, TransactionManager, this) == false)
{
Status = StatAuthError;
return false;
}
if (!VerifyVendor(Message))
{
Status = StatAuthError;
return false;
}
// Download further indexes with verification
TransactionManager->QueueIndexes(GoodAuth);
return GoodAuth;
}
/*}}}*/
void pkgAcqMetaClearSig::QueueIndexes(bool const verify) /*{{{*/
{
// at this point the real Items are loaded in the fetcher
ExpectedAdditionalItems = 0;
std::set<std::string> targetsSeen;
bool const hasReleaseFile = TransactionManager->MetaIndexParser != NULL;
bool const metaBaseSupportsByHash = hasReleaseFile && TransactionManager->MetaIndexParser->GetSupportsAcquireByHash();
bool hasHashes = true;
auto IndexTargets = TransactionManager->MetaIndexParser->GetIndexTargets();
if (hasReleaseFile && verify == false)
hasHashes = std::any_of(IndexTargets.begin(), IndexTargets.end(),
[&](IndexTarget const &Target) { return TransactionManager->MetaIndexParser->Exists(Target.MetaKey); });
for (auto&& Target: IndexTargets)
{
// if we have seen a target which is created-by a target this one here is declared a
// fallback to, we skip acquiring the fallback (but we make sure we clean up)
if (targetsSeen.find(Target.Option(IndexTarget::FALLBACK_OF)) != targetsSeen.end())
{
targetsSeen.emplace(Target.Option(IndexTarget::CREATED_BY));
new CleanupItem(Owner, TransactionManager, Target);
continue;
}
// all is an implementation detail. Users shouldn't use this as arch
// We need this support trickery here as e.g. Debian has binary-all files already,
// but arch:all packages are still in the arch:any files, so we would waste precious
// download time, bandwidth and diskspace for nothing, BUT Debian doesn't feature all
// in the set of supported architectures, so we can filter based on this property rather
// than invent an entirely new flag we would need to carry for all of eternity.
if (hasReleaseFile && Target.Option(IndexTarget::ARCHITECTURE) == "all")
{
if (TransactionManager->MetaIndexParser->IsArchitectureAllSupportedFor(Target) == false)
{
new CleanupItem(Owner, TransactionManager, Target);
continue;
}
}
bool trypdiff = Target.OptionBool(IndexTarget::PDIFFS);
if (hasReleaseFile == true)
{
if (TransactionManager->MetaIndexParser->Exists(Target.MetaKey) == false)
{
// optional targets that we do not have in the Release file are skipped
if (hasHashes == true && Target.IsOptional)
{
new CleanupItem(Owner, TransactionManager, Target);
continue;
}
std::string const &arch = Target.Option(IndexTarget::ARCHITECTURE);
if (arch.empty() == false)
{
if (TransactionManager->MetaIndexParser->IsArchitectureSupported(arch) == false)
{
new CleanupItem(Owner, TransactionManager, Target);
_error->Notice(_("Skipping acquire of configured file '%s' as repository '%s' doesn't support architecture '%s'"),
Target.MetaKey.c_str(), TransactionManager->Target.Description.c_str(), arch.c_str());
continue;
}
// if the architecture is officially supported but currently no packages for it available,
// ignore silently as this is pretty much the same as just shipping an empty file.
// if we don't know which architectures are supported, we do NOT ignore it to notify user about this
if (hasHashes == true && TransactionManager->MetaIndexParser->IsArchitectureSupported("*undefined*") == false)
{
new CleanupItem(Owner, TransactionManager, Target);
continue;
}
}
if (hasHashes == true)
{
Status = StatAuthError;
strprintf(ErrorText, _("Unable to find expected entry '%s' in Release file (Wrong sources.list entry or malformed file)"), Target.MetaKey.c_str());
return;
}
else
{
new pkgAcqIndex(Owner, TransactionManager, Target);
continue;
}
}
else if (verify)
{
auto const hashes = GetExpectedHashesFor(Target.MetaKey);
if (hashes.empty() == false)
{
if (hashes.usable() == false && TargetIsAllowedToBe(TransactionManager->Target, InsecureType::WEAK) == false)
{
new CleanupItem(Owner, TransactionManager, Target);
_error->Warning(_("Skipping acquire of configured file '%s' as repository '%s' provides only weak security information for it"),
Target.MetaKey.c_str(), TransactionManager->Target.Description.c_str());
continue;
}
// empty files are skipped as acquiring the very small compressed files is a waste of time
else if (hashes.FileSize() == 0)
{
new CleanupItem(Owner, TransactionManager, Target);
targetsSeen.emplace(Target.Option(IndexTarget::CREATED_BY));
continue;
}
}
}
// autoselect the compression method
std::vector<std::string> types = VectorizeString(Target.Option(IndexTarget::COMPRESSIONTYPES), ' ');
types.erase(std::remove_if(types.begin(), types.end(), [&](std::string const &t) {
if (t == "uncompressed")
return TransactionManager->MetaIndexParser->Exists(Target.MetaKey) == false;
std::string const MetaKey = Target.MetaKey + "." + t;
return TransactionManager->MetaIndexParser->Exists(MetaKey) == false;
}), types.end());
if (types.empty() == false)
{
std::ostringstream os;
// add the special compressiontype byhash first if supported
std::string const useByHashConf = Target.Option(IndexTarget::BY_HASH);
bool useByHash = false;
if(useByHashConf == "force")
useByHash = true;
else
useByHash = StringToBool(useByHashConf) == true && metaBaseSupportsByHash;
if (useByHash == true)
os << "by-hash ";
std::copy(types.begin(), types.end()-1, std::ostream_iterator<std::string>(os, " "));
os << *types.rbegin();
Target.Options["COMPRESSIONTYPES"] = os.str();
}
else
Target.Options["COMPRESSIONTYPES"].clear();
std::string filename = GetExistingFilename(GetFinalFileNameFromURI(Target.URI));
if (filename.empty() == false)
{
// if the Release file is a hit and we have an index it must be the current one
if (TransactionManager->IMSHit == true)
;
else if (TransactionManager->LastMetaIndexParser != NULL)
{
// see if the file changed since the last Release file
// we use the uncompressed files as we might compress differently compared to the server,
// so the hashes might not match, even if they contain the same data.
HashStringList const newFile = GetExpectedHashesFromFor(TransactionManager->MetaIndexParser, Target.MetaKey);
HashStringList const oldFile = GetExpectedHashesFromFor(TransactionManager->LastMetaIndexParser, Target.MetaKey);
if (newFile != oldFile)
filename.clear();
}
else
filename.clear();
}
else
trypdiff = false; // no file to patch
if (filename.empty() == false)
{
new NoActionItem(Owner, Target, filename);
std::string const idxfilename = GetFinalFileNameFromURI(GetDiffIndexURI(Target));
if (FileExists(idxfilename))
new NoActionItem(Owner, Target, idxfilename);
targetsSeen.emplace(Target.Option(IndexTarget::CREATED_BY));
continue;
}
// check if we have patches available
trypdiff &= TransactionManager->MetaIndexParser->Exists(GetDiffIndexFileName(Target.MetaKey));
}
else
{
// if we have no file to patch, no point in trying
trypdiff &= (GetExistingFilename(GetFinalFileNameFromURI(Target.URI)).empty() == false);
}
// no point in patching from local sources
if (trypdiff)
{
std::string const proto = Target.URI.substr(0, strlen("file:/"));
if (proto == "file:/" || proto == "copy:/" || proto == "cdrom:")
trypdiff = false;
}
// Queue the Index file (Packages, Sources, Translation-$foo, …)
targetsSeen.emplace(Target.Option(IndexTarget::CREATED_BY));
if (trypdiff)
new pkgAcqDiffIndex(Owner, TransactionManager, Target);
else
new pkgAcqIndex(Owner, TransactionManager, Target);
}
}
/*}}}*/
bool pkgAcqMetaBase::VerifyVendor(string const &) /*{{{*/
{
string Transformed = TransactionManager->MetaIndexParser->GetExpectedDist();
if (Transformed == "../project/experimental")
{
Transformed = "experimental";
}
auto pos = Transformed.rfind('/');
if (pos != string::npos)
{
Transformed = Transformed.substr(0, pos);
}
if (Transformed == ".")
{
Transformed = "";
}
if (TransactionManager->MetaIndexParser->GetValidUntil() > 0)
{
time_t const invalid_since = time(NULL) - TransactionManager->MetaIndexParser->GetValidUntil();
if (invalid_since > 0)
{
std::string errmsg;
strprintf(errmsg,
// TRANSLATOR: The first %s is the URL of the bad Release file, the second is
// the time since then the file is invalid - formatted in the same way as in
// the download progress display (e.g. 7d 3h 42min 1s)
_("Release file for %s is expired (invalid since %s). "
"Updates for this repository will not be applied."),
Target.URI.c_str(), TimeToStr(invalid_since).c_str());
if (ErrorText.empty())
ErrorText = errmsg;
return _error->Error("%s", errmsg.c_str());
}
}
/* Did we get a file older than what we have? This is a last minute IMS hit and doubles
as a prevention of downgrading us to older (still valid) files */
if (TransactionManager->IMSHit == false && TransactionManager->LastMetaIndexParser != NULL &&
TransactionManager->LastMetaIndexParser->GetDate() > TransactionManager->MetaIndexParser->GetDate())
{
TransactionManager->IMSHit = true;
RemoveFile("VerifyVendor", DestFile);
PartialFile = DestFile = GetFinalFilename();
// load the 'old' file in the 'new' one instead of flipping pointers as
// the new one isn't owned by us, while the old one is so cleanup would be confused.
TransactionManager->MetaIndexParser->swapLoad(TransactionManager->LastMetaIndexParser);
delete TransactionManager->LastMetaIndexParser;
TransactionManager->LastMetaIndexParser = NULL;
}
if (_config->FindB("Debug::pkgAcquire::Auth", false))
{
std::cerr << "Got Codename: " << TransactionManager->MetaIndexParser->GetCodename() << std::endl;
std::cerr << "Expecting Dist: " << TransactionManager->MetaIndexParser->GetExpectedDist() << std::endl;
std::cerr << "Transformed Dist: " << Transformed << std::endl;
}
if (TransactionManager->MetaIndexParser->CheckDist(Transformed) == false)
{
// This might become fatal one day
// Status = StatAuthError;
// ErrorText = "Conflicting distribution; expected "
// + MetaIndexParser->GetExpectedDist() + " but got "
// + MetaIndexParser->GetCodename();
// return false;
if (!Transformed.empty())
{
_error->Warning(_("Conflicting distribution: %s (expected %s but got %s)"),
Desc.Description.c_str(),
Transformed.c_str(),
TransactionManager->MetaIndexParser->GetCodename().c_str());
}
}
return true;
}
/*}}}*/
pkgAcqMetaBase::~pkgAcqMetaBase()
{
}
pkgAcqMetaClearSig::pkgAcqMetaClearSig(pkgAcquire * const Owner, /*{{{*/
IndexTarget const &ClearsignedTarget,
IndexTarget const &DetachedDataTarget, IndexTarget const &DetachedSigTarget,
metaIndex * const MetaIndexParser) :
pkgAcqMetaIndex(Owner, this, ClearsignedTarget, DetachedSigTarget),
d(NULL), DetachedDataTarget(DetachedDataTarget),
MetaIndexParser(MetaIndexParser), LastMetaIndexParser(NULL)
{
// index targets + (worst case:) Release/Release.gpg
ExpectedAdditionalItems = std::numeric_limits<decltype(ExpectedAdditionalItems)>::max();
TransactionManager->Add(this);
}
/*}}}*/
pkgAcqMetaClearSig::~pkgAcqMetaClearSig() /*{{{*/
{
if (LastMetaIndexParser != NULL)
delete LastMetaIndexParser;
}
/*}}}*/
// pkgAcqMetaClearSig::Custom600Headers - Insert custom request headers /*{{{*/
string pkgAcqMetaClearSig::Custom600Headers() const
{
string Header = pkgAcqMetaBase::Custom600Headers();
Header += "\nFail-Ignore: true";
std::string const key = TransactionManager->MetaIndexParser->GetSignedBy();
if (key.empty() == false)
Header += "\nSigned-By: " + key;
return Header;
}
/*}}}*/
void pkgAcqMetaClearSig::Finished() /*{{{*/
{
if(_config->FindB("Debug::Acquire::Transaction", false) == true)
std::clog << "Finished: " << DestFile <<std::endl;
if(TransactionManager->State == TransactionStarted &&
TransactionManager->TransactionHasError() == false)
TransactionManager->CommitTransaction();
}
/*}}}*/
bool pkgAcqMetaClearSig::VerifyDone(std::string const &Message, /*{{{*/
pkgAcquire::MethodConfig const * const Cnf)
{
Item::VerifyDone(Message, Cnf);
if (FileExists(DestFile) && !StartsWithGPGClearTextSignature(DestFile))
return RenameOnError(NotClearsigned);
return true;
}
/*}}}*/
// pkgAcqMetaClearSig::Done - We got a file /*{{{*/
void pkgAcqMetaClearSig::Done(std::string const &Message,
HashStringList const &Hashes,
pkgAcquire::MethodConfig const * const Cnf)
{
Item::Done(Message, Hashes, Cnf);
if(AuthPass == false)
{
if(CheckDownloadDone(this, Message, Hashes) == true)
QueueForSignatureVerify(this, DestFile, DestFile);
return;
}
else if(CheckAuthDone(Message) == true)
{
if (TransactionManager->IMSHit == false)
TransactionManager->TransactionStageCopy(this, DestFile, GetFinalFilename());
else if (RealFileExists(GetFinalFilename()) == false)
{
// We got an InRelease file IMSHit, but we haven't one, which means
// we had a valid Release/Release.gpg combo stepping in, which we have
// to 'acquire' now to ensure list cleanup isn't removing them
new NoActionItem(Owner, DetachedDataTarget);
new NoActionItem(Owner, DetachedSigTarget);
}
}
else if (Status != StatAuthError)
{
string const FinalFile = GetFinalFileNameFromURI(DetachedDataTarget.URI);
string const OldFile = GetFinalFilename();
if (TransactionManager->IMSHit == false)
TransactionManager->TransactionStageCopy(this, DestFile, FinalFile);
else if (RealFileExists(OldFile) == false)
new NoActionItem(Owner, DetachedDataTarget);
else
TransactionManager->TransactionStageCopy(this, OldFile, FinalFile);
}
}
/*}}}*/
void pkgAcqMetaClearSig::Failed(string const &Message,pkgAcquire::MethodConfig const * const Cnf) /*{{{*/
{
Item::Failed(Message, Cnf);
if (AuthPass == false)
{
if (Status == StatAuthError || Status == StatTransientNetworkError)
{
// if we expected a ClearTextSignature (InRelease) but got a network
// error or got a file, but it wasn't valid, we end up here (see VerifyDone).
// As these is usually called by web-portals we do not try Release/Release.gpg
// as this is gonna fail anyway and instead abort our try (LP#346386)
TransactionManager->AbortTransaction();
return;
}
// Queue the 'old' InRelease file for removal if we try Release.gpg
// as otherwise the file will stay around and gives a false-auth
// impression (CVE-2012-0214)
TransactionManager->TransactionStageRemoval(this, GetFinalFilename());
Status = StatDone;
new pkgAcqMetaIndex(Owner, TransactionManager, DetachedDataTarget, DetachedSigTarget);
}
else
{
if(CheckStopAuthentication(this, Message))
return;
if(AllowInsecureRepositories(InsecureType::UNSIGNED, Target.Description, TransactionManager->MetaIndexParser, TransactionManager, this) == true)
{
Status = StatDone;
/* InRelease files become Release files, otherwise
* they would be considered as trusted later on */
string const FinalRelease = GetFinalFileNameFromURI(DetachedDataTarget.URI);
string const PartialRelease = GetPartialFileNameFromURI(DetachedDataTarget.URI);
string const FinalReleasegpg = GetFinalFileNameFromURI(DetachedSigTarget.URI);
string const FinalInRelease = GetFinalFilename();
Rename(DestFile, PartialRelease);
TransactionManager->TransactionStageCopy(this, PartialRelease, FinalRelease);
LoadLastMetaIndexParser(TransactionManager, FinalRelease, FinalInRelease);
// we parse the indexes here because at this point the user wanted
// a repository that may potentially harm him
if (TransactionManager->MetaIndexParser->Load(PartialRelease, &ErrorText) == false || VerifyVendor(Message) == false)
/* expired Release files are still a problem you need extra force for */;
else
TransactionManager->QueueIndexes(true);
}
}
}
/*}}}*/
pkgAcqMetaIndex::pkgAcqMetaIndex(pkgAcquire * const Owner, /*{{{*/
pkgAcqMetaClearSig * const TransactionManager,
IndexTarget const &DataTarget,
IndexTarget const &DetachedSigTarget) :
pkgAcqMetaBase(Owner, TransactionManager, DataTarget), d(NULL),
DetachedSigTarget(DetachedSigTarget)
{
if(_config->FindB("Debug::Acquire::Transaction", false) == true)
std::clog << "New pkgAcqMetaIndex with TransactionManager "
<< this->TransactionManager << std::endl;
DestFile = GetPartialFileNameFromURI(DataTarget.URI);
// Create the item
Desc.Description = DataTarget.Description;
Desc.Owner = this;
Desc.ShortDesc = DataTarget.ShortDesc;
Desc.URI = DataTarget.URI;
QueueURI(Desc);
}
/*}}}*/
void pkgAcqMetaIndex::Done(string const &Message, /*{{{*/
HashStringList const &Hashes,
pkgAcquire::MethodConfig const * const Cfg)
{
Item::Done(Message,Hashes,Cfg);
if(CheckDownloadDone(this, Message, Hashes))
{
// we have a Release file, now download the Signature, all further
// verify/queue for additional downloads will be done in the
// pkgAcqMetaSig::Done() code
new pkgAcqMetaSig(Owner, TransactionManager, DetachedSigTarget, this);
}
}
/*}}}*/
// pkgAcqMetaIndex::Failed - no Release file present /*{{{*/
void pkgAcqMetaIndex::Failed(string const &Message,
pkgAcquire::MethodConfig const * const Cnf)
{
pkgAcquire::Item::Failed(Message, Cnf);
Status = StatDone;
// No Release file was present so fall
// back to queueing Packages files without verification
// only allow going further if the user explicitly wants it
if(AllowInsecureRepositories(InsecureType::NORELEASE, Target.Description, TransactionManager->MetaIndexParser, TransactionManager, this) == true)
{
// ensure old Release files are removed
TransactionManager->TransactionStageRemoval(this, GetFinalFilename());
// queue without any kind of hashsum support
TransactionManager->QueueIndexes(false);
}
}
/*}}}*/
std::string pkgAcqMetaIndex::DescURI() const /*{{{*/
{
return Target.URI;
}
/*}}}*/
pkgAcqMetaIndex::~pkgAcqMetaIndex() {}
// AcqMetaSig::AcqMetaSig - Constructor /*{{{*/
pkgAcqMetaSig::pkgAcqMetaSig(pkgAcquire * const Owner,
pkgAcqMetaClearSig * const TransactionManager,
IndexTarget const &Target,
pkgAcqMetaIndex * const MetaIndex) :
pkgAcqTransactionItem(Owner, TransactionManager, Target), d(NULL), MetaIndex(MetaIndex)
{
DestFile = GetPartialFileNameFromURI(Target.URI);
// remove any partial downloaded sig-file in partial/.
// it may confuse proxies and is too small to warrant a
// partial download anyway
RemoveFile("pkgAcqMetaSig", DestFile);
// set the TransactionManager
if(_config->FindB("Debug::Acquire::Transaction", false) == true)
std::clog << "New pkgAcqMetaSig with TransactionManager "
<< TransactionManager << std::endl;
// Create the item
Desc.Description = Target.Description;
Desc.Owner = this;
Desc.ShortDesc = Target.ShortDesc;
Desc.URI = Target.URI;
// If we got a hit for Release, we will get one for Release.gpg too (or obscure errors),
// so we skip the download step and go instantly to verification
if (TransactionManager->IMSHit == true && RealFileExists(GetFinalFilename()))
{
Complete = true;
Status = StatDone;
PartialFile = DestFile = GetFinalFilename();
MetaIndexFileSignature = DestFile;
MetaIndex->QueueForSignatureVerify(this, MetaIndex->DestFile, DestFile);
}
else
QueueURI(Desc);
}
/*}}}*/
pkgAcqMetaSig::~pkgAcqMetaSig() /*{{{*/
{
}
/*}}}*/
// pkgAcqMetaSig::Custom600Headers - Insert custom request headers /*{{{*/
std::string pkgAcqMetaSig::Custom600Headers() const
{
std::string Header = pkgAcqTransactionItem::Custom600Headers();
std::string const key = TransactionManager->MetaIndexParser->GetSignedBy();
if (key.empty() == false)
Header += "\nSigned-By: " + key;
return Header;
}
/*}}}*/
// AcqMetaSig::Done - The signature was downloaded/verified /*{{{*/
void pkgAcqMetaSig::Done(string const &Message, HashStringList const &Hashes,
pkgAcquire::MethodConfig const * const Cfg)
{
if (MetaIndexFileSignature.empty() == false)
{
DestFile = MetaIndexFileSignature;
MetaIndexFileSignature.clear();
}
Item::Done(Message, Hashes, Cfg);
if(MetaIndex->AuthPass == false)
{
if(MetaIndex->CheckDownloadDone(this, Message, Hashes) == true)
{
// destfile will be modified to point to MetaIndexFile for the
// gpgv method, so we need to save it here
MetaIndexFileSignature = DestFile;
MetaIndex->QueueForSignatureVerify(this, MetaIndex->DestFile, DestFile);
}
return;
}
else if(MetaIndex->CheckAuthDone(Message) == true)
{
if (TransactionManager->IMSHit == false)
{
TransactionManager->TransactionStageCopy(this, DestFile, GetFinalFilename());
TransactionManager->TransactionStageCopy(MetaIndex, MetaIndex->DestFile, MetaIndex->GetFinalFilename());
}
}
else if (MetaIndex->Status != StatAuthError)
{
std::string const FinalFile = MetaIndex->GetFinalFilename();
if (TransactionManager->IMSHit == false)
TransactionManager->TransactionStageCopy(MetaIndex, MetaIndex->DestFile, FinalFile);
else
TransactionManager->TransactionStageCopy(MetaIndex, FinalFile, FinalFile);
}
}
/*}}}*/
void pkgAcqMetaSig::Failed(string const &Message,pkgAcquire::MethodConfig const * const Cnf)/*{{{*/
{
Item::Failed(Message,Cnf);
// check if we need to fail at this point
if (MetaIndex->AuthPass == true && MetaIndex->CheckStopAuthentication(this, Message))
return;
// ensures that a Release.gpg file in the lists/ is removed by the transaction
TransactionManager->TransactionStageRemoval(this, DestFile);
// only allow going further if the user explicitly wants it
if (AllowInsecureRepositories(InsecureType::UNSIGNED, MetaIndex->Target.Description, TransactionManager->MetaIndexParser, TransactionManager, this) == true)
{
string const FinalRelease = MetaIndex->GetFinalFilename();
string const FinalInRelease = TransactionManager->GetFinalFilename();
LoadLastMetaIndexParser(TransactionManager, FinalRelease, FinalInRelease);
// we parse the indexes here because at this point the user wanted
// a repository that may potentially harm him
bool const GoodLoad = TransactionManager->MetaIndexParser->Load(MetaIndex->DestFile, &ErrorText);
if (MetaIndex->VerifyVendor(Message) == false)
/* expired Release files are still a problem you need extra force for */;
else
TransactionManager->QueueIndexes(GoodLoad);
TransactionManager->TransactionStageCopy(MetaIndex, MetaIndex->DestFile, FinalRelease);
}
else if (TransactionManager->IMSHit == false)
Rename(MetaIndex->DestFile, MetaIndex->DestFile + ".FAILED");
// FIXME: this is used often (e.g. in pkgAcqIndexTrans) so refactor
if (Cnf->LocalOnly == true ||
StringToBool(LookupTag(Message,"Transient-Failure"),false) == false)
{
// Ignore this
Status = StatDone;
}
}
/*}}}*/
// AcqBaseIndex - Constructor /*{{{*/
pkgAcqBaseIndex::pkgAcqBaseIndex(pkgAcquire * const Owner,
pkgAcqMetaClearSig * const TransactionManager,
IndexTarget const &Target)
: pkgAcqTransactionItem(Owner, TransactionManager, Target), d(NULL)
{
}
/*}}}*/
void pkgAcqBaseIndex::Failed(std::string const &Message,pkgAcquire::MethodConfig const * const Cnf)/*{{{*/
{
pkgAcquire::Item::Failed(Message, Cnf);
if (Status != StatAuthError)
return;
ErrorText.append("Release file created at: ");
auto const timespec = TransactionManager->MetaIndexParser->GetDate();
if (timespec == 0)
ErrorText.append("<unknown>");
else
ErrorText.append(TimeRFC1123(timespec, true));
ErrorText.append("\n");
}
/*}}}*/
pkgAcqBaseIndex::~pkgAcqBaseIndex() {}
// AcqDiffIndex::AcqDiffIndex - Constructor /*{{{*/
// ---------------------------------------------------------------------
/* Get the DiffIndex file first and see if there are patches available
* If so, create a pkgAcqIndexDiffs fetcher that will get and apply the
* patches. If anything goes wrong in that process, it will fall back to
* the original packages file
*/
pkgAcqDiffIndex::pkgAcqDiffIndex(pkgAcquire * const Owner,
pkgAcqMetaClearSig * const TransactionManager,
IndexTarget const &Target)
: pkgAcqBaseIndex(Owner, TransactionManager, Target), d(NULL), diffs(NULL)
{
// FIXME: Magic number as an upper bound on pdiffs we will reasonably acquire
ExpectedAdditionalItems = 40;
Debug = _config->FindB("Debug::pkgAcquire::Diffs",false);
Desc.Owner = this;
Desc.Description = GetDiffIndexFileName(Target.Description);
Desc.ShortDesc = Target.ShortDesc;
Desc.URI = GetDiffIndexURI(Target);
DestFile = GetPartialFileNameFromURI(Desc.URI);
if(Debug)
std::clog << "pkgAcqDiffIndex: " << Desc.URI << std::endl;
QueueURI(Desc);
}
/*}}}*/
// AcqIndex::Custom600Headers - Insert custom request headers /*{{{*/
// ---------------------------------------------------------------------
/* The only header we use is the last-modified header. */
string pkgAcqDiffIndex::Custom600Headers() const
{
if (TransactionManager->LastMetaIndexParser != NULL)
return "\nIndex-File: true";
string const Final = GetFinalFilename();
if(Debug)
std::clog << "Custom600Header-IMS: " << Final << std::endl;
struct stat Buf;
if (stat(Final.c_str(),&Buf) != 0)
return "\nIndex-File: true";
return "\nIndex-File: true\nLast-Modified: " + TimeRFC1123(Buf.st_mtime, false);
}
/*}}}*/
void pkgAcqDiffIndex::QueueOnIMSHit() const /*{{{*/
{
// list cleanup needs to know that this file as well as the already
// present index is ours, so we create an empty diff to save it for us
new pkgAcqIndexDiffs(Owner, TransactionManager, Target);
}
/*}}}*/
static bool RemoveFileForBootstrapLinking(bool const Debug, std::string const &For, std::string const &Boot)/*{{{*/
{
if (FileExists(Boot) && RemoveFile("Bootstrap-linking", Boot) == false)
{
if (Debug)
std::clog << "Bootstrap-linking for patching " << For
<< " by removing stale " << Boot << " failed!" << std::endl;
return false;
}
return true;
}
/*}}}*/
bool pkgAcqDiffIndex::ParseDiffIndex(string const &IndexDiffFile) /*{{{*/
{
ExpectedAdditionalItems = 0;
// failing here is fine: our caller will take care of trying to
// get the complete file if patching fails
if(Debug)
std::clog << "pkgAcqDiffIndex::ParseIndexDiff() " << IndexDiffFile
<< std::endl;
FileFd Fd(IndexDiffFile,FileFd::ReadOnly);
pkgTagFile TF(&Fd);
if (Fd.IsOpen() == false || Fd.Failed())
return false;
pkgTagSection Tags;
if(unlikely(TF.Step(Tags) == false))
return false;
HashStringList ServerHashes;
unsigned long long ServerSize = 0;
auto const &posix = std::locale("C.UTF-8");
for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type)
{
std::string tagname = *type;
tagname.append("-Current");
std::string const tmp = Tags.FindS(tagname.c_str());
if (tmp.empty() == true)
continue;
string hash;
unsigned long long size;
std::stringstream ss(tmp);
ss.imbue(posix);
ss >> hash >> size;
if (unlikely(hash.empty() == true))
continue;
if (unlikely(ServerSize != 0 && ServerSize != size))
continue;
ServerHashes.push_back(HashString(*type, hash));
ServerSize = size;
}
if (ServerHashes.usable() == false)
{
if (Debug == true)
std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": Did not find a good hashsum in the index" << std::endl;
return false;
}
std::string const CurrentPackagesFile = GetFinalFileNameFromURI(Target.URI);
HashStringList const TargetFileHashes = GetExpectedHashesFor(Target.MetaKey);
if (TargetFileHashes.usable() == false || ServerHashes != TargetFileHashes)
{
if (Debug == true)
{
std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": Index has different hashes than parser, probably older, so fail pdiffing" << std::endl;
printHashSumComparison(CurrentPackagesFile, ServerHashes, TargetFileHashes);
}
return false;
}
HashStringList LocalHashes;
// try avoiding calculating the hash here as this is costly
if (TransactionManager->LastMetaIndexParser != NULL)
LocalHashes = GetExpectedHashesFromFor(TransactionManager->LastMetaIndexParser, Target.MetaKey);
if (LocalHashes.usable() == false)
{
FileFd fd(CurrentPackagesFile, FileFd::ReadOnly, FileFd::Auto);
Hashes LocalHashesCalc(ServerHashes);
LocalHashesCalc.AddFD(fd);
LocalHashes = LocalHashesCalc.GetHashStringList();
}
if (ServerHashes == LocalHashes)
{
// we have the same sha1 as the server so we are done here
if(Debug)
std::clog << "pkgAcqDiffIndex: Package file " << CurrentPackagesFile << " is up-to-date" << std::endl;
QueueOnIMSHit();
return true;
}
if(Debug)
std::clog << "Server-Current: " << ServerHashes.find(NULL)->toStr() << " and we start at "
<< CurrentPackagesFile << " " << LocalHashes.FileSize() << " " << LocalHashes.find(NULL)->toStr() << std::endl;
// historically, older hashes have more info than newer ones, so start
// collecting with older ones first to avoid implementing complicated
// information merging techniques… a failure is after all always
// recoverable with a complete file and hashes aren't changed that often.
std::vector<char const *> types;
for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type)
types.push_back(*type);
// parse all of (provided) history
vector<DiffInfo> available_patches;
bool firstAcceptedHashes = true;
for (auto type = types.crbegin(); type != types.crend(); ++type)
{
if (LocalHashes.find(*type) == NULL)
continue;
std::string tagname = *type;
tagname.append("-History");
std::string const tmp = Tags.FindS(tagname.c_str());
if (tmp.empty() == true)
continue;
string hash, filename;
unsigned long long size;
std::stringstream ss(tmp);
ss.imbue(posix);
while (ss >> hash >> size >> filename)
{
if (unlikely(hash.empty() == true || filename.empty() == true))
continue;
// see if we have a record for this file already
std::vector<DiffInfo>::iterator cur = available_patches.begin();
for (; cur != available_patches.end(); ++cur)
{
if (cur->file != filename)
continue;
cur->result_hashes.push_back(HashString(*type, hash));
break;
}
if (cur != available_patches.end())
continue;
if (firstAcceptedHashes == true)
{
DiffInfo next;
next.file = filename;
next.result_hashes.push_back(HashString(*type, hash));
next.result_hashes.FileSize(size);
available_patches.push_back(next);
}
else
{
if (Debug == true)
std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": File " << filename
<< " wasn't in the list for the first parsed hash! (history)" << std::endl;
break;
}
}
firstAcceptedHashes = false;
}
if (unlikely(available_patches.empty() == true))
{
if (Debug)
std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": "
<< "Couldn't find any patches for the patch series." << std::endl;
return false;
}
for (auto type = types.crbegin(); type != types.crend(); ++type)
{
if (LocalHashes.find(*type) == NULL)
continue;
std::string tagname = *type;
tagname.append("-Patches");
std::string const tmp = Tags.FindS(tagname.c_str());
if (tmp.empty() == true)
continue;
string hash, filename;
unsigned long long size;
std::stringstream ss(tmp);
ss.imbue(posix);
while (ss >> hash >> size >> filename)
{
if (unlikely(hash.empty() == true || filename.empty() == true))
continue;
// see if we have a record for this file already
std::vector<DiffInfo>::iterator cur = available_patches.begin();
for (; cur != available_patches.end(); ++cur)
{
if (cur->file != filename)
continue;
if (cur->patch_hashes.empty())
cur->patch_hashes.FileSize(size);
cur->patch_hashes.push_back(HashString(*type, hash));
break;
}
if (cur != available_patches.end())
continue;
if (Debug == true)
std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": File " << filename
<< " wasn't in the list for the first parsed hash! (patches)" << std::endl;
break;
}
}
for (auto type = types.crbegin(); type != types.crend(); ++type)
{
std::string tagname = *type;
tagname.append("-Download");
std::string const tmp = Tags.FindS(tagname.c_str());
if (tmp.empty() == true)
continue;
string hash, filename;
unsigned long long size;
std::stringstream ss(tmp);
ss.imbue(posix);
// FIXME: all of pdiff supports only .gz compressed patches
while (ss >> hash >> size >> filename)
{
if (unlikely(hash.empty() == true || filename.empty() == true))
continue;
if (unlikely(APT::String::Endswith(filename, ".gz") == false))
continue;
filename.erase(filename.length() - 3);
// see if we have a record for this file already
std::vector<DiffInfo>::iterator cur = available_patches.begin();
for (; cur != available_patches.end(); ++cur)
{
if (cur->file != filename)
continue;
if (cur->download_hashes.empty())
cur->download_hashes.FileSize(size);
cur->download_hashes.push_back(HashString(*type, hash));
break;
}
if (cur != available_patches.end())
continue;
if (Debug == true)
std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": File " << filename
<< " wasn't in the list for the first parsed hash! (download)" << std::endl;
break;
}
}
bool foundStart = false;
for (std::vector<DiffInfo>::iterator cur = available_patches.begin();
cur != available_patches.end(); ++cur)
{
if (LocalHashes != cur->result_hashes)
continue;
available_patches.erase(available_patches.begin(), cur);
foundStart = true;
break;
}
if (foundStart == false || unlikely(available_patches.empty() == true))
{
if (Debug)
std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": "
<< "Couldn't find the start of the patch series." << std::endl;
return false;
}
for (auto const &patch: available_patches)
if (patch.result_hashes.usable() == false ||
patch.patch_hashes.usable() == false ||
patch.download_hashes.usable() == false)
{
if (Debug)
std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": provides no usable hashes for " << patch.file
<< " so fallback to complete download" << std::endl;
return false;
}
// patching with too many files is rather slow compared to a fast download
unsigned long const fileLimit = _config->FindI("Acquire::PDiffs::FileLimit", 0);
if (fileLimit != 0 && fileLimit < available_patches.