You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 

3163 lines
103 KiB

// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
// $Id: acquire-item.cc,v 1.46.2.9 2004/01/16 18:51:11 mdz Exp $
/* ######################################################################
Acquire Item - Item to acquire
Each item can download to exactly one file at a time. This means you
cannot create an item that fetches two uri's to two files at the same
time. The pkgAcqIndex class creates a second class upon instantiation
to fetch the other index files because of this.
##################################################################### */
/*}}}*/
// Include Files /*{{{*/
#include <config.h>
#include <apt-pkg/acquire-item.h>
#include <apt-pkg/configuration.h>
#include <apt-pkg/aptconfiguration.h>
#include <apt-pkg/sourcelist.h>
#include <apt-pkg/error.h>
#include <apt-pkg/strutl.h>
#include <apt-pkg/fileutl.h>
#include <apt-pkg/sha1.h>
#include <apt-pkg/tagfile.h>
#include <apt-pkg/indexrecords.h>
#include <apt-pkg/acquire.h>
#include <apt-pkg/hashes.h>
#include <apt-pkg/indexfile.h>
#include <apt-pkg/pkgcache.h>
#include <apt-pkg/cacheiterators.h>
#include <apt-pkg/pkgrecords.h>
#include <apt-pkg/gpgv.h>
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include <iostream>
#include <vector>
#include <sys/stat.h>
#include <unistd.h>
#include <errno.h>
#include <string>
#include <sstream>
#include <stdio.h>
#include <ctime>
#include <apti18n.h>
/*}}}*/
using namespace std;
static void printHashSumComparision(std::string const &URI, HashStringList const &Expected, HashStringList const &Actual) /*{{{*/
{
if (_config->FindB("Debug::Acquire::HashSumMismatch", false) == false)
return;
std::cerr << std::endl << URI << ":" << std::endl << " Expected Hash: " << std::endl;
for (HashStringList::const_iterator hs = Expected.begin(); hs != Expected.end(); ++hs)
std::cerr << "\t- " << hs->toStr() << std::endl;
std::cerr << " Actual Hash: " << std::endl;
for (HashStringList::const_iterator hs = Actual.begin(); hs != Actual.end(); ++hs)
std::cerr << "\t- " << hs->toStr() << std::endl;
}
/*}}}*/
static std::string GetPartialFileName(std::string const &file) /*{{{*/
{
std::string DestFile = _config->FindDir("Dir::State::lists") + "partial/";
DestFile += file;
return DestFile;
}
/*}}}*/
static std::string GetPartialFileNameFromURI(std::string const &uri) /*{{{*/
{
return GetPartialFileName(URItoFileName(uri));
}
/*}}}*/
static std::string GetFinalFileNameFromURI(std::string const &uri) /*{{{*/
{
return _config->FindDir("Dir::State::lists") + URItoFileName(uri);
}
/*}}}*/
static std::string GetCompressedFileName(std::string const &URI, std::string const &Name, std::string const &Ext) /*{{{*/
{
if (Ext.empty() || Ext == "uncompressed")
return Name;
// do not reverify cdrom sources as apt-cdrom may rewrite the Packages
// file when its doing the indexcopy
if (URI.substr(0,6) == "cdrom:")
return Name;
// adjust DestFile if its compressed on disk
if (_config->FindB("Acquire::GzipIndexes",false) == true)
return Name + '.' + Ext;
return Name;
}
/*}}}*/
static std::string GetMergeDiffsPatchFileName(std::string const &Final, std::string const &Patch)/*{{{*/
{
// rred expects the patch as $FinalFile.ed.$patchname.gz
return Final + ".ed." + Patch + ".gz";
}
/*}}}*/
static std::string GetDiffsPatchFileName(std::string const &Final) /*{{{*/
{
// rred expects the patch as $FinalFile.ed
return Final + ".ed";
}
/*}}}*/
static bool AllowInsecureRepositories(indexRecords const * const MetaIndexParser, pkgAcqMetaBase * const TransactionManager, pkgAcquire::Item * const I) /*{{{*/
{
if(MetaIndexParser->IsAlwaysTrusted() || _config->FindB("Acquire::AllowInsecureRepositories") == true)
return true;
_error->Error(_("Use --allow-insecure-repositories to force the update"));
TransactionManager->AbortTransaction();
I->Status = pkgAcquire::Item::StatError;
return false;
}
/*}}}*/
static HashStringList GetExpectedHashesFromFor(indexRecords * const Parser, std::string const MetaKey)/*{{{*/
{
if (Parser == NULL)
return HashStringList();
indexRecords::checkSum * const R = Parser->Lookup(MetaKey);
if (R == NULL)
return HashStringList();
return R->Hashes;
}
/*}}}*/
// all ::HashesRequired and ::GetExpectedHashes implementations /*{{{*/
/* ::GetExpectedHashes is abstract and has to be implemented by all subclasses.
It is best to implement it as broadly as possible, while ::HashesRequired defaults
to true and should be as restrictive as possible for false cases. Note that if
a hash is returned by ::GetExpectedHashes it must match. Only if it doesn't
::HashesRequired is called to evaluate if its okay to have no hashes. */
APT_CONST bool pkgAcqTransactionItem::HashesRequired() const
{
/* signed repositories obviously have a parser and good hashes.
unsigned repositories, too, as even if we can't trust them for security,
we can at least trust them for integrity of the download itself.
Only repositories without a Release file can (obviously) not have
hashes – and they are very uncommon and strongly discouraged */
return TransactionManager->MetaIndexParser != NULL;
}
HashStringList pkgAcqTransactionItem::GetExpectedHashes() const
{
return GetExpectedHashesFor(GetMetaKey());
}
APT_CONST bool pkgAcqMetaBase::HashesRequired() const
{
// Release and co have no hashes 'by design'.
return false;
}
HashStringList pkgAcqMetaBase::GetExpectedHashes() const
{
return HashStringList();
}
APT_CONST bool pkgAcqIndexDiffs::HashesRequired() const
{
/* We don't always have the diff of the downloaded pdiff file.
What we have for sure is hashes for the uncompressed file,
but rred uncompresses them on the fly while parsing, so not handled here.
Hashes are (also) checked while searching for (next) patch to apply. */
if (State == StateFetchDiff)
return available_patches[0].download_hashes.empty() == false;
return false;
}
HashStringList pkgAcqIndexDiffs::GetExpectedHashes() const
{
if (State == StateFetchDiff)
return available_patches[0].download_hashes;
return HashStringList();
}
APT_CONST bool pkgAcqIndexMergeDiffs::HashesRequired() const
{
/* @see #pkgAcqIndexDiffs::HashesRequired, with the difference that
we can check the rred result after all patches are applied as
we know the expected result rather than potentially apply more patches */
if (State == StateFetchDiff)
return patch.download_hashes.empty() == false;
return State == StateApplyDiff;
}
HashStringList pkgAcqIndexMergeDiffs::GetExpectedHashes() const
{
if (State == StateFetchDiff)
return patch.download_hashes;
else if (State == StateApplyDiff)
return GetExpectedHashesFor(Target.MetaKey);
return HashStringList();
}
APT_CONST bool pkgAcqArchive::HashesRequired() const
{
return LocalSource == false;
}
HashStringList pkgAcqArchive::GetExpectedHashes() const
{
// figured out while parsing the records
return ExpectedHashes;
}
APT_CONST bool pkgAcqFile::HashesRequired() const
{
// supplied as parameter at creation time, so the caller decides
return ExpectedHashes.usable();
}
HashStringList pkgAcqFile::GetExpectedHashes() const
{
return ExpectedHashes;
}
/*}}}*/
// Acquire::Item::QueueURI and specialisations from child classes /*{{{*/
bool pkgAcquire::Item::QueueURI(pkgAcquire::ItemDesc &Item)
{
Owner->Enqueue(Item);
return true;
}
/* The idea here is that an item isn't queued if it exists on disk and the
transition manager was a hit as this means that the files it contains
the checksums for can't be updated either (or they are and we are asking
for a hashsum mismatch to happen which helps nobody) */
bool pkgAcqTransactionItem::QueueURI(pkgAcquire::ItemDesc &Item)
{
std::string const FinalFile = GetFinalFilename();
if (TransactionManager != NULL && TransactionManager->IMSHit == true &&
FileExists(FinalFile) == true)
{
PartialFile = DestFile = FinalFile;
Status = StatDone;
return false;
}
return pkgAcquire::Item::QueueURI(Item);
}
/* The transition manager InRelease itself (or its older sisters-in-law
Release & Release.gpg) is always queued as this allows us to rerun gpgv
on it to verify that we aren't stalled with old files */
bool pkgAcqMetaBase::QueueURI(pkgAcquire::ItemDesc &Item)
{
return pkgAcquire::Item::QueueURI(Item);
}
/* the Diff/Index needs to queue also the up-to-date complete index file
to ensure that the list cleaner isn't eating it */
bool pkgAcqDiffIndex::QueueURI(pkgAcquire::ItemDesc &Item)
{
if (pkgAcqTransactionItem::QueueURI(Item) == true)
return true;
QueueOnIMSHit();
return false;
}
/*}}}*/
// Acquire::Item::GetFinalFilename and specialisations for child classes /*{{{*/
std::string pkgAcquire::Item::GetFinalFilename() const
{
return GetFinalFileNameFromURI(Desc.URI);
}
std::string pkgAcqDiffIndex::GetFinalFilename() const
{
// the logic we inherent from pkgAcqBaseIndex isn't what we need here
return pkgAcquire::Item::GetFinalFilename();
}
std::string pkgAcqIndex::GetFinalFilename() const
{
std::string const FinalFile = GetFinalFileNameFromURI(Target.URI);
return GetCompressedFileName(Target.URI, FinalFile, CurrentCompressionExtension);
}
std::string pkgAcqMetaSig::GetFinalFilename() const
{
return GetFinalFileNameFromURI(Target.URI);
}
std::string pkgAcqBaseIndex::GetFinalFilename() const
{
return GetFinalFileNameFromURI(Target.URI);
}
std::string pkgAcqMetaBase::GetFinalFilename() const
{
return GetFinalFileNameFromURI(Target.URI);
}
std::string pkgAcqArchive::GetFinalFilename() const
{
return _config->FindDir("Dir::Cache::Archives") + flNotDir(StoreFilename);
}
/*}}}*/
// pkgAcqTransactionItem::GetMetaKey and specialisations for child classes /*{{{*/
std::string pkgAcqTransactionItem::GetMetaKey() const
{
return Target.MetaKey;
}
std::string pkgAcqIndex::GetMetaKey() const
{
if (Stage == STAGE_DECOMPRESS_AND_VERIFY || CurrentCompressionExtension == "uncompressed")
return Target.MetaKey;
return Target.MetaKey + "." + CurrentCompressionExtension;
}
std::string pkgAcqDiffIndex::GetMetaKey() const
{
return Target.MetaKey + ".diff/Index";
}
/*}}}*/
//pkgAcqTransactionItem::TransactionState and specialisations for child classes /*{{{*/
bool pkgAcqTransactionItem::TransactionState(TransactionStates const state)
{
bool const Debug = _config->FindB("Debug::Acquire::Transaction", false);
switch(state)
{
case TransactionAbort:
if(Debug == true)
std::clog << " Cancel: " << DestFile << std::endl;
if (Status == pkgAcquire::Item::StatIdle)
{
Status = pkgAcquire::Item::StatDone;
Dequeue();
}
break;
case TransactionCommit:
if(PartialFile != "")
{
if(Debug == true)
std::clog << "mv " << PartialFile << " -> "<< DestFile << " # " << DescURI() << std::endl;
Rename(PartialFile, DestFile);
} else {
if(Debug == true)
std::clog << "rm " << DestFile << " # " << DescURI() << std::endl;
unlink(DestFile.c_str());
}
break;
}
return true;
}
bool pkgAcqMetaBase::TransactionState(TransactionStates const state)
{
// Do not remove InRelease on IMSHit of Release.gpg [yes, this is very edgecasey]
if (TransactionManager->IMSHit == false)
return pkgAcqTransactionItem::TransactionState(state);
return true;
}
bool pkgAcqIndex::TransactionState(TransactionStates const state)
{
if (pkgAcqTransactionItem::TransactionState(state) == false)
return false;
switch (state)
{
case TransactionAbort:
if (Stage == STAGE_DECOMPRESS_AND_VERIFY)
{
// keep the compressed file, but drop the decompressed
EraseFileName.clear();
if (PartialFile.empty() == false && flExtension(PartialFile) == "decomp")
unlink(PartialFile.c_str());
}
break;
case TransactionCommit:
if (EraseFileName.empty() == false)
unlink(EraseFileName.c_str());
break;
}
return true;
}
bool pkgAcqDiffIndex::TransactionState(TransactionStates const state)
{
if (pkgAcqTransactionItem::TransactionState(state) == false)
return false;
switch (state)
{
case TransactionCommit:
break;
case TransactionAbort:
std::string const Partial = GetPartialFileNameFromURI(Target.URI);
unlink(Partial.c_str());
break;
}
return true;
}
/*}}}*/
class APT_HIDDEN NoActionItem : public pkgAcquire::Item /*{{{*/
/* The sole purpose of this class is having an item which does nothing to
reach its done state to prevent cleanup deleting the mentioned file.
Handy in cases in which we know we have the file already, like IMS-Hits. */
{
IndexTarget const Target;
public:
virtual std::string DescURI() const {return Target.URI;};
virtual HashStringList GetExpectedHashes() const {return HashStringList();};
NoActionItem(pkgAcquire * const Owner, IndexTarget const Target) :
pkgAcquire::Item(Owner), Target(Target)
{
Status = StatDone;
DestFile = GetFinalFileNameFromURI(Target.URI);
}
};
/*}}}*/
// Acquire::Item::Item - Constructor /*{{{*/
APT_IGNORE_DEPRECATED_PUSH
pkgAcquire::Item::Item(pkgAcquire * const Owner) :
FileSize(0), PartialSize(0), Mode(0), Complete(false), Local(false),
QueueCounter(0), ExpectedAdditionalItems(0), Owner(Owner)
{
Owner->Add(this);
Status = StatIdle;
}
APT_IGNORE_DEPRECATED_POP
/*}}}*/
// Acquire::Item::~Item - Destructor /*{{{*/
pkgAcquire::Item::~Item()
{
Owner->Remove(this);
}
/*}}}*/
std::string pkgAcquire::Item::Custom600Headers() const /*{{{*/
{
return std::string();
}
/*}}}*/
std::string pkgAcquire::Item::ShortDesc() const /*{{{*/
{
return DescURI();
}
/*}}}*/
APT_CONST void pkgAcquire::Item::Finished() /*{{{*/
{
}
/*}}}*/
APT_PURE pkgAcquire * pkgAcquire::Item::GetOwner() const /*{{{*/
{
return Owner;
}
/*}}}*/
pkgAcquire::ItemDesc &pkgAcquire::Item::GetItemDesc() /*{{{*/
{
return Desc;
}
/*}}}*/
APT_CONST bool pkgAcquire::Item::IsTrusted() const /*{{{*/
{
return false;
}
/*}}}*/
// Acquire::Item::Failed - Item failed to download /*{{{*/
// ---------------------------------------------------------------------
/* We return to an idle state if there are still other queues that could
fetch this object */
void pkgAcquire::Item::Failed(string const &Message,pkgAcquire::MethodConfig const * const Cnf)
{
if(ErrorText.empty())
ErrorText = LookupTag(Message,"Message");
UsedMirror = LookupTag(Message,"UsedMirror");
if (QueueCounter <= 1)
{
/* This indicates that the file is not available right now but might
be sometime later. If we do a retry cycle then this should be
retried [CDROMs] */
if (Cnf != NULL && Cnf->LocalOnly == true &&
StringToBool(LookupTag(Message,"Transient-Failure"),false) == true)
{
Status = StatIdle;
Dequeue();
return;
}
switch (Status)
{
case StatIdle:
case StatFetching:
case StatDone:
Status = StatError;
break;
case StatAuthError:
case StatError:
case StatTransientNetworkError:
break;
}
Complete = false;
Dequeue();
}
string const FailReason = LookupTag(Message, "FailReason");
if (FailReason == "MaximumSizeExceeded")
RenameOnError(MaximumSizeExceeded);
else if (Status == StatAuthError)
RenameOnError(HashSumMismatch);
// report mirror failure back to LP if we actually use a mirror
if (FailReason.empty() == false)
ReportMirrorFailure(FailReason);
else
ReportMirrorFailure(ErrorText);
if (QueueCounter > 1)
Status = StatIdle;
}
/*}}}*/
// Acquire::Item::Start - Item has begun to download /*{{{*/
// ---------------------------------------------------------------------
/* Stash status and the file size. Note that setting Complete means
sub-phases of the acquire process such as decompresion are operating */
void pkgAcquire::Item::Start(string const &/*Message*/, unsigned long long const Size)
{
Status = StatFetching;
ErrorText.clear();
if (FileSize == 0 && Complete == false)
FileSize = Size;
}
/*}}}*/
// Acquire::Item::Done - Item downloaded OK /*{{{*/
void pkgAcquire::Item::Done(string const &Message, HashStringList const &Hashes,
pkgAcquire::MethodConfig const * const /*Cnf*/)
{
// We just downloaded something..
string FileName = LookupTag(Message,"Filename");
UsedMirror = LookupTag(Message,"UsedMirror");
unsigned long long const downloadedSize = Hashes.FileSize();
if (downloadedSize != 0)
{
if (Complete == false && !Local && FileName == DestFile)
{
if (Owner->Log != 0)
Owner->Log->Fetched(Hashes.FileSize(),atoi(LookupTag(Message,"Resume-Point","0").c_str()));
}
if (FileSize == 0)
FileSize= downloadedSize;
}
Status = StatDone;
ErrorText = string();
Owner->Dequeue(this);
}
/*}}}*/
// Acquire::Item::Rename - Rename a file /*{{{*/
// ---------------------------------------------------------------------
/* This helper function is used by a lot of item methods as their final
step */
bool pkgAcquire::Item::Rename(string const &From,string const &To)
{
if (From == To || rename(From.c_str(),To.c_str()) == 0)
return true;
std::string S;
strprintf(S, _("rename failed, %s (%s -> %s)."), strerror(errno),
From.c_str(),To.c_str());
Status = StatError;
if (ErrorText.empty())
ErrorText = S;
else
ErrorText = ErrorText + ": " + S;
return false;
}
/*}}}*/
void pkgAcquire::Item::Dequeue() /*{{{*/
{
Owner->Dequeue(this);
}
/*}}}*/
bool pkgAcquire::Item::RenameOnError(pkgAcquire::Item::RenameOnErrorState const error)/*{{{*/
{
if (RealFileExists(DestFile))
Rename(DestFile, DestFile + ".FAILED");
std::string errtext;
switch (error)
{
case HashSumMismatch:
errtext = _("Hash Sum mismatch");
Status = StatAuthError;
ReportMirrorFailure("HashChecksumFailure");
break;
case SizeMismatch:
errtext = _("Size mismatch");
Status = StatAuthError;
ReportMirrorFailure("SizeFailure");
break;
case InvalidFormat:
errtext = _("Invalid file format");
Status = StatError;
// do not report as usually its not the mirrors fault, but Portal/Proxy
break;
case SignatureError:
errtext = _("Signature error");
Status = StatError;
break;
case NotClearsigned:
errtext = _("Does not start with a cleartext signature");
Status = StatError;
break;
case MaximumSizeExceeded:
// the method is expected to report a good error for this
Status = StatError;
break;
case PDiffError:
// no handling here, done by callers
break;
}
if (ErrorText.empty())
ErrorText = errtext;
return false;
}
/*}}}*/
void pkgAcquire::Item::SetActiveSubprocess(const std::string &subprocess)/*{{{*/
{
ActiveSubprocess = subprocess;
APT_IGNORE_DEPRECATED(Mode = ActiveSubprocess.c_str();)
}
/*}}}*/
// Acquire::Item::ReportMirrorFailure /*{{{*/
void pkgAcquire::Item::ReportMirrorFailure(string const &FailCode)
{
// we only act if a mirror was used at all
if(UsedMirror.empty())
return;
#if 0
std::cerr << "\nReportMirrorFailure: "
<< UsedMirror
<< " Uri: " << DescURI()
<< " FailCode: "
<< FailCode << std::endl;
#endif
string report = _config->Find("Methods::Mirror::ProblemReporting",
"/usr/lib/apt/apt-report-mirror-failure");
if(!FileExists(report))
return;
std::vector<char const*> Args;
Args.push_back(report.c_str());
Args.push_back(UsedMirror.c_str());
Args.push_back(DescURI().c_str());
Args.push_back(FailCode.c_str());
Args.push_back(NULL);
pid_t pid = ExecFork();
if(pid < 0)
{
_error->Error("ReportMirrorFailure Fork failed");
return;
}
else if(pid == 0)
{
execvp(Args[0], (char**)Args.data());
std::cerr << "Could not exec " << Args[0] << std::endl;
_exit(100);
}
if(!ExecWait(pid, "report-mirror-failure"))
{
_error->Warning("Couldn't report problem to '%s'",
_config->Find("Methods::Mirror::ProblemReporting").c_str());
}
}
/*}}}*/
std::string pkgAcquire::Item::HashSum() const /*{{{*/
{
HashStringList const hashes = GetExpectedHashes();
HashString const * const hs = hashes.find(NULL);
return hs != NULL ? hs->toStr() : "";
}
/*}}}*/
pkgAcqTransactionItem::pkgAcqTransactionItem(pkgAcquire * const Owner, /*{{{*/
pkgAcqMetaBase * const TransactionManager, IndexTarget const Target) :
pkgAcquire::Item(Owner), Target(Target), TransactionManager(TransactionManager)
{
if (TransactionManager != this)
TransactionManager->Add(this);
}
/*}}}*/
pkgAcqTransactionItem::~pkgAcqTransactionItem() /*{{{*/
{
}
/*}}}*/
HashStringList pkgAcqTransactionItem::GetExpectedHashesFor(std::string const MetaKey) const /*{{{*/
{
return GetExpectedHashesFromFor(TransactionManager->MetaIndexParser, MetaKey);
}
/*}}}*/
// AcqMetaBase - Constructor /*{{{*/
pkgAcqMetaBase::pkgAcqMetaBase(pkgAcquire * const Owner,
pkgAcqMetaBase * const TransactionManager,
std::vector<IndexTarget> const IndexTargets,
IndexTarget const &DataTarget,
indexRecords * const MetaIndexParser)
: pkgAcqTransactionItem(Owner, TransactionManager, DataTarget),
MetaIndexParser(MetaIndexParser), LastMetaIndexParser(NULL), IndexTargets(IndexTargets),
AuthPass(false), IMSHit(false)
{
}
/*}}}*/
// AcqMetaBase::Add - Add a item to the current Transaction /*{{{*/
void pkgAcqMetaBase::Add(pkgAcqTransactionItem * const I)
{
Transaction.push_back(I);
}
/*}}}*/
// AcqMetaBase::AbortTransaction - Abort the current Transaction /*{{{*/
void pkgAcqMetaBase::AbortTransaction()
{
if(_config->FindB("Debug::Acquire::Transaction", false) == true)
std::clog << "AbortTransaction: " << TransactionManager << std::endl;
// ensure the toplevel is in error state too
for (std::vector<pkgAcqTransactionItem*>::iterator I = Transaction.begin();
I != Transaction.end(); ++I)
{
(*I)->TransactionState(TransactionAbort);
}
Transaction.clear();
}
/*}}}*/
// AcqMetaBase::TransactionHasError - Check for errors in Transaction /*{{{*/
APT_PURE bool pkgAcqMetaBase::TransactionHasError() const
{
for (std::vector<pkgAcqTransactionItem*>::const_iterator I = Transaction.begin();
I != Transaction.end(); ++I)
{
switch((*I)->Status) {
case StatDone: break;
case StatIdle: break;
case StatAuthError: return true;
case StatError: return true;
case StatTransientNetworkError: return true;
case StatFetching: break;
}
}
return false;
}
/*}}}*/
// AcqMetaBase::CommitTransaction - Commit a transaction /*{{{*/
void pkgAcqMetaBase::CommitTransaction()
{
if(_config->FindB("Debug::Acquire::Transaction", false) == true)
std::clog << "CommitTransaction: " << this << std::endl;
// move new files into place *and* remove files that are not
// part of the transaction but are still on disk
for (std::vector<pkgAcqTransactionItem*>::iterator I = Transaction.begin();
I != Transaction.end(); ++I)
{
(*I)->TransactionState(TransactionCommit);
}
Transaction.clear();
}
/*}}}*/
// AcqMetaBase::TransactionStageCopy - Stage a file for copying /*{{{*/
void pkgAcqMetaBase::TransactionStageCopy(pkgAcqTransactionItem * const I,
const std::string &From,
const std::string &To)
{
I->PartialFile = From;
I->DestFile = To;
}
/*}}}*/
// AcqMetaBase::TransactionStageRemoval - Stage a file for removal /*{{{*/
void pkgAcqMetaBase::TransactionStageRemoval(pkgAcqTransactionItem * const I,
const std::string &FinalFile)
{
I->PartialFile = "";
I->DestFile = FinalFile;
}
/*}}}*/
// AcqMetaBase::GenerateAuthWarning - Check gpg authentication error /*{{{*/
bool pkgAcqMetaBase::CheckStopAuthentication(pkgAcquire::Item * const I, const std::string &Message)
{
// FIXME: this entire function can do now that we disallow going to
// a unauthenticated state and can cleanly rollback
string const Final = I->GetFinalFilename();
if(FileExists(Final))
{
I->Status = StatTransientNetworkError;
_error->Warning(_("An error occurred during the signature "
"verification. The repository is not updated "
"and the previous index files will be used. "
"GPG error: %s: %s\n"),
Desc.Description.c_str(),
LookupTag(Message,"Message").c_str());
RunScripts("APT::Update::Auth-Failure");
return true;
} else if (LookupTag(Message,"Message").find("NODATA") != string::npos) {
/* Invalid signature file, reject (LP: #346386) (Closes: #627642) */
_error->Error(_("GPG error: %s: %s"),
Desc.Description.c_str(),
LookupTag(Message,"Message").c_str());
I->Status = StatError;
return true;
} else {
_error->Warning(_("GPG error: %s: %s"),
Desc.Description.c_str(),
LookupTag(Message,"Message").c_str());
}
// gpgv method failed
ReportMirrorFailure("GPGFailure");
return false;
}
/*}}}*/
// AcqMetaBase::Custom600Headers - Get header for AcqMetaBase /*{{{*/
// ---------------------------------------------------------------------
string pkgAcqMetaBase::Custom600Headers() const
{
std::string Header = "\nIndex-File: true";
std::string MaximumSize;
strprintf(MaximumSize, "\nMaximum-Size: %i",
_config->FindI("Acquire::MaxReleaseFileSize", 10*1000*1000));
Header += MaximumSize;
string const FinalFile = GetFinalFilename();
struct stat Buf;
if (stat(FinalFile.c_str(),&Buf) == 0)
Header += "\nLast-Modified: " + TimeRFC1123(Buf.st_mtime);
return Header;
}
/*}}}*/
// AcqMetaBase::QueueForSignatureVerify /*{{{*/
void pkgAcqMetaBase::QueueForSignatureVerify(pkgAcqTransactionItem * const I, std::string const &File, std::string const &Signature)
{
AuthPass = true;
I->Desc.URI = "gpgv:" + Signature;
I->DestFile = File;
QueueURI(I->Desc);
I->SetActiveSubprocess("gpgv");
}
/*}}}*/
// AcqMetaBase::CheckDownloadDone /*{{{*/
bool pkgAcqMetaBase::CheckDownloadDone(pkgAcqTransactionItem * const I, const std::string &Message, HashStringList const &Hashes) const
{
// We have just finished downloading a Release file (it is not
// verified yet)
string const FileName = LookupTag(Message,"Filename");
if (FileName.empty() == true)
{
I->Status = StatError;
I->ErrorText = "Method gave a blank filename";
return false;
}
if (FileName != I->DestFile && RealFileExists(I->DestFile) == false)
{
I->Local = true;
I->Desc.URI = "copy:" + FileName;
I->QueueURI(I->Desc);
return false;
}
// make sure to verify against the right file on I-M-S hit
bool IMSHit = StringToBool(LookupTag(Message,"IMS-Hit"), false);
if (IMSHit == false && Hashes.usable())
{
// detect IMS-Hits servers haven't detected by Hash comparison
std::string const FinalFile = I->GetFinalFilename();
if (RealFileExists(FinalFile) && Hashes.VerifyFile(FinalFile) == true)
{
IMSHit = true;
unlink(I->DestFile.c_str());
}
}
if(IMSHit == true)
{
// for simplicity, the transaction manager is always InRelease
// even if it doesn't exist.
if (TransactionManager != NULL)
TransactionManager->IMSHit = true;
I->PartialFile = I->DestFile = I->GetFinalFilename();
}
// set Item to complete as the remaining work is all local (verify etc)
I->Complete = true;
return true;
}
/*}}}*/
bool pkgAcqMetaBase::CheckAuthDone(string const &Message) /*{{{*/
{
// At this point, the gpgv method has succeeded, so there is a
// valid signature from a key in the trusted keyring. We
// perform additional verification of its contents, and use them
// to verify the indexes we are about to download
if (TransactionManager->IMSHit == false)
{
// open the last (In)Release if we have it
std::string const FinalFile = GetFinalFilename();
std::string FinalRelease;
std::string FinalInRelease;
if (APT::String::Endswith(FinalFile, "InRelease"))
{
FinalInRelease = FinalFile;
FinalRelease = FinalFile.substr(0, FinalFile.length() - strlen("InRelease")) + "Release";
}
else
{
FinalInRelease = FinalFile.substr(0, FinalFile.length() - strlen("Release")) + "InRelease";
FinalRelease = FinalFile;
}
if (RealFileExists(FinalInRelease) || RealFileExists(FinalRelease))
{
TransactionManager->LastMetaIndexParser = new indexRecords;
_error->PushToStack();
if (RealFileExists(FinalInRelease))
TransactionManager->LastMetaIndexParser->Load(FinalInRelease);
else
TransactionManager->LastMetaIndexParser->Load(FinalRelease);
// its unlikely to happen, but if what we have is bad ignore it
if (_error->PendingError())
{
delete TransactionManager->LastMetaIndexParser;
TransactionManager->LastMetaIndexParser = NULL;
}
_error->RevertToStack();
}
}
if (TransactionManager->MetaIndexParser->Load(DestFile) == false)
{
Status = StatAuthError;
ErrorText = TransactionManager->MetaIndexParser->ErrorText;
return false;
}
if (!VerifyVendor(Message))
{
Status = StatAuthError;
return false;
}
if (_config->FindB("Debug::pkgAcquire::Auth", false))
std::cerr << "Signature verification succeeded: "
<< DestFile << std::endl;
// Download further indexes with verification
QueueIndexes(true);
return true;
}
/*}}}*/
void pkgAcqMetaBase::QueueIndexes(bool const verify) /*{{{*/
{
// at this point the real Items are loaded in the fetcher
ExpectedAdditionalItems = 0;
for (std::vector <IndexTarget>::const_iterator Target = IndexTargets.begin();
Target != IndexTargets.end();
++Target)
{
bool trypdiff = _config->FindB("Acquire::PDiffs", true);
if (verify == true)
{
if (TransactionManager->MetaIndexParser->Exists(Target->MetaKey) == false)
{
// optional targets that we do not have in the Release file are skipped
if (Target->IsOptional)
continue;
Status = StatAuthError;
strprintf(ErrorText, _("Unable to find expected entry '%s' in Release file (Wrong sources.list entry or malformed file)"), Target->MetaKey.c_str());
return;
}
if (RealFileExists(GetFinalFileNameFromURI(Target->URI)))
{
if (TransactionManager->LastMetaIndexParser != NULL)
{
HashStringList const newFile = GetExpectedHashesFromFor(TransactionManager->MetaIndexParser, Target->MetaKey);
HashStringList const oldFile = GetExpectedHashesFromFor(TransactionManager->LastMetaIndexParser, Target->MetaKey);
if (newFile == oldFile)
{
// we have the file already, no point in trying to acquire it again
new NoActionItem(Owner, *Target);
continue;
}
}
}
else
trypdiff = false; // no file to patch
// check if we have patches available
trypdiff &= TransactionManager->MetaIndexParser->Exists(Target->MetaKey + ".diff/Index");
}
// if we have no file to patch, no point in trying
trypdiff &= RealFileExists(GetFinalFileNameFromURI(Target->URI));
// no point in patching from local sources
if (trypdiff)
{
std::string const proto = Target->URI.substr(0, strlen("file:/"));
if (proto == "file:/" || proto == "copy:/" || proto == "cdrom:")
trypdiff = false;
}
// Queue the Index file (Packages, Sources, Translation-$foo, …)
if (trypdiff)
new pkgAcqDiffIndex(Owner, TransactionManager, *Target);
else
new pkgAcqIndex(Owner, TransactionManager, *Target);
}
}
/*}}}*/
bool pkgAcqMetaBase::VerifyVendor(string const &Message) /*{{{*/
{
string::size_type pos;
// check for missing sigs (that where not fatal because otherwise we had
// bombed earlier)
string missingkeys;
string msg = _("There is no public key available for the "
"following key IDs:\n");
pos = Message.find("NO_PUBKEY ");
if (pos != std::string::npos)
{
string::size_type start = pos+strlen("NO_PUBKEY ");
string Fingerprint = Message.substr(start, Message.find("\n")-start);
missingkeys += (Fingerprint);
}
if(!missingkeys.empty())
_error->Warning("%s", (msg + missingkeys).c_str());
string Transformed = TransactionManager->MetaIndexParser->GetExpectedDist();
if (Transformed == "../project/experimental")
{
Transformed = "experimental";
}
pos = Transformed.rfind('/');
if (pos != string::npos)
{
Transformed = Transformed.substr(0, pos);
}
if (Transformed == ".")
{
Transformed = "";
}
if (_config->FindB("Acquire::Check-Valid-Until", true) == true &&
TransactionManager->MetaIndexParser->GetValidUntil() > 0) {
time_t const invalid_since = time(NULL) - TransactionManager->MetaIndexParser->GetValidUntil();
if (invalid_since > 0)
{
std::string errmsg;
strprintf(errmsg,
// TRANSLATOR: The first %s is the URL of the bad Release file, the second is
// the time since then the file is invalid - formated in the same way as in
// the download progress display (e.g. 7d 3h 42min 1s)
_("Release file for %s is expired (invalid since %s). "
"Updates for this repository will not be applied."),
Target.URI.c_str(), TimeToStr(invalid_since).c_str());
if (ErrorText.empty())
ErrorText = errmsg;
return _error->Error("%s", errmsg.c_str());
}
}
/* Did we get a file older than what we have? This is a last minute IMS hit and doubles
as a prevention of downgrading us to older (still valid) files */
if (TransactionManager->IMSHit == false && TransactionManager->LastMetaIndexParser != NULL &&
TransactionManager->LastMetaIndexParser->GetDate() > TransactionManager->MetaIndexParser->GetDate())
{
TransactionManager->IMSHit = true;
unlink(DestFile.c_str());
PartialFile = DestFile = GetFinalFilename();
delete TransactionManager->MetaIndexParser;
TransactionManager->MetaIndexParser = TransactionManager->LastMetaIndexParser;
TransactionManager->LastMetaIndexParser = NULL;
}
if (_config->FindB("Debug::pkgAcquire::Auth", false))
{
std::cerr << "Got Codename: " << TransactionManager->MetaIndexParser->GetDist() << std::endl;
std::cerr << "Expecting Dist: " << TransactionManager->MetaIndexParser->GetExpectedDist() << std::endl;
std::cerr << "Transformed Dist: " << Transformed << std::endl;
}
if (TransactionManager->MetaIndexParser->CheckDist(Transformed) == false)
{
// This might become fatal one day
// Status = StatAuthError;
// ErrorText = "Conflicting distribution; expected "
// + MetaIndexParser->GetExpectedDist() + " but got "
// + MetaIndexParser->GetDist();
// return false;
if (!Transformed.empty())
{
_error->Warning(_("Conflicting distribution: %s (expected %s but got %s)"),
Desc.Description.c_str(),
Transformed.c_str(),
TransactionManager->MetaIndexParser->GetDist().c_str());
}
}
return true;
}
/*}}}*/
pkgAcqMetaClearSig::pkgAcqMetaClearSig(pkgAcquire * const Owner, /*{{{*/
IndexTarget const &ClearsignedTarget,
IndexTarget const &DetachedDataTarget, IndexTarget const &DetachedSigTarget,
std::vector<IndexTarget> const IndexTargets,
indexRecords * const MetaIndexParser) :
pkgAcqMetaIndex(Owner, this, ClearsignedTarget, DetachedSigTarget, IndexTargets, MetaIndexParser),
ClearsignedTarget(ClearsignedTarget),
DetachedDataTarget(DetachedDataTarget), DetachedSigTarget(DetachedSigTarget)
{
// index targets + (worst case:) Release/Release.gpg
ExpectedAdditionalItems = IndexTargets.size() + 2;
TransactionManager->Add(this);
}
/*}}}*/
pkgAcqMetaClearSig::~pkgAcqMetaClearSig() /*{{{*/
{
}
/*}}}*/
// pkgAcqMetaClearSig::Custom600Headers - Insert custom request headers /*{{{*/
string pkgAcqMetaClearSig::Custom600Headers() const
{
string Header = pkgAcqMetaBase::Custom600Headers();
Header += "\nFail-Ignore: true";
return Header;
}
/*}}}*/
// pkgAcqMetaClearSig::Done - We got a file /*{{{*/
void pkgAcqMetaClearSig::Done(std::string const &Message,
HashStringList const &Hashes,
pkgAcquire::MethodConfig const * const Cnf)
{
Item::Done(Message, Hashes, Cnf);
// if we expect a ClearTextSignature (InRelease), ensure that
// this is what we get and if not fail to queue a
// Release/Release.gpg, see #346386
if (FileExists(DestFile) && !StartsWithGPGClearTextSignature(DestFile))
{
pkgAcquire::Item::Failed(Message, Cnf);
RenameOnError(NotClearsigned);
TransactionManager->AbortTransaction();
return;
}
if(AuthPass == false)
{
if(CheckDownloadDone(this, Message, Hashes) == true)
QueueForSignatureVerify(this, DestFile, DestFile);
return;
}
else if(CheckAuthDone(Message) == true)
{
if (TransactionManager->IMSHit == false)
TransactionManager->TransactionStageCopy(this, DestFile, GetFinalFilename());
else if (RealFileExists(GetFinalFilename()) == false)
{
// We got an InRelease file IMSHit, but we haven't one, which means
// we had a valid Release/Release.gpg combo stepping in, which we have
// to 'acquire' now to ensure list cleanup isn't removing them
new NoActionItem(Owner, DetachedDataTarget);
new NoActionItem(Owner, DetachedSigTarget);
}
}
}
/*}}}*/
void pkgAcqMetaClearSig::Failed(string const &Message,pkgAcquire::MethodConfig const * const Cnf) /*{{{*/
{
Item::Failed(Message, Cnf);
// we failed, we will not get additional items from this method
ExpectedAdditionalItems = 0;
if (AuthPass == false)
{
// Queue the 'old' InRelease file for removal if we try Release.gpg
// as otherwise the file will stay around and gives a false-auth
// impression (CVE-2012-0214)
TransactionManager->TransactionStageRemoval(this, GetFinalFilename());
Status = StatDone;
new pkgAcqMetaIndex(Owner, TransactionManager, DetachedDataTarget, DetachedSigTarget, IndexTargets, TransactionManager->MetaIndexParser);
}
else
{
if(CheckStopAuthentication(this, Message))
return;
_error->Warning(_("The data from '%s' is not signed. Packages "
"from that repository can not be authenticated."),
ClearsignedTarget.Description.c_str());
// No Release file was present, or verification failed, so fall
// back to queueing Packages files without verification
// only allow going further if the users explicitely wants it
if(AllowInsecureRepositories(TransactionManager->MetaIndexParser, TransactionManager, this) == true)
{
Status = StatDone;
/* InRelease files become Release files, otherwise
* they would be considered as trusted later on */
string const FinalRelease = GetFinalFileNameFromURI(DetachedDataTarget.URI);
string const PartialRelease = GetPartialFileNameFromURI(DetachedDataTarget.URI);
string const FinalReleasegpg = GetFinalFileNameFromURI(DetachedSigTarget.URI);
string const FinalInRelease = GetFinalFilename();
Rename(DestFile, PartialRelease);
TransactionManager->TransactionStageCopy(this, PartialRelease, FinalRelease);
if (RealFileExists(FinalReleasegpg) || RealFileExists(FinalInRelease))
{
// open the last Release if we have it
if (TransactionManager->IMSHit == false)
{
TransactionManager->LastMetaIndexParser = new indexRecords;
_error->PushToStack();
if (RealFileExists(FinalInRelease))
TransactionManager->LastMetaIndexParser->Load(FinalInRelease);
else
TransactionManager->LastMetaIndexParser->Load(FinalRelease);
// its unlikely to happen, but if what we have is bad ignore it
if (_error->PendingError())
{
delete TransactionManager->LastMetaIndexParser;
TransactionManager->LastMetaIndexParser = NULL;
}
_error->RevertToStack();
}
}
// we parse the indexes here because at this point the user wanted
// a repository that may potentially harm him
if (TransactionManager->MetaIndexParser->Load(PartialRelease) == false || VerifyVendor(Message) == false)
/* expired Release files are still a problem you need extra force for */;
else
QueueIndexes(true);
}
}
}
/*}}}*/
pkgAcqMetaIndex::pkgAcqMetaIndex(pkgAcquire * const Owner, /*{{{*/
pkgAcqMetaBase * const TransactionManager,
IndexTarget const &DataTarget,
IndexTarget const &DetachedSigTarget,
vector<IndexTarget> const IndexTargets,
indexRecords * const MetaIndexParser) :
pkgAcqMetaBase(Owner, TransactionManager, IndexTargets, DataTarget, MetaIndexParser),
DetachedSigTarget(DetachedSigTarget)
{
if(_config->FindB("Debug::Acquire::Transaction", false) == true)
std::clog << "New pkgAcqMetaIndex with TransactionManager "
<< this->TransactionManager << std::endl;
DestFile = GetPartialFileNameFromURI(DataTarget.URI);
// Create the item
Desc.Description = DataTarget.Description;
Desc.Owner = this;
Desc.ShortDesc = DataTarget.ShortDesc;
Desc.URI = DataTarget.URI;
// we expect more item
ExpectedAdditionalItems = IndexTargets.size();
QueueURI(Desc);
}
/*}}}*/
void pkgAcqMetaIndex::Done(string const &Message, /*{{{*/
HashStringList const &Hashes,
pkgAcquire::MethodConfig const * const Cfg)
{
Item::Done(Message,Hashes,Cfg);
if(CheckDownloadDone(this, Message, Hashes))
{
// we have a Release file, now download the Signature, all further
// verify/queue for additional downloads will be done in the
// pkgAcqMetaSig::Done() code
new pkgAcqMetaSig(Owner, TransactionManager, DetachedSigTarget, this);
}
}
/*}}}*/
// pkgAcqMetaIndex::Failed - no Release file present /*{{{*/
void pkgAcqMetaIndex::Failed(string const &Message,
pkgAcquire::MethodConfig const * const Cnf)
{
pkgAcquire::Item::Failed(Message, Cnf);
Status = StatDone;
_error->Warning(_("The repository '%s' does not have a Release file. "
"This is deprecated, please contact the owner of the "
"repository."), Target.Description.c_str());
// No Release file was present so fall
// back to queueing Packages files without verification
// only allow going further if the users explicitely wants it
if(AllowInsecureRepositories(TransactionManager->MetaIndexParser, TransactionManager, this) == true)
{
// ensure old Release files are removed
TransactionManager->TransactionStageRemoval(this, GetFinalFilename());
delete TransactionManager->MetaIndexParser;
TransactionManager->MetaIndexParser = NULL;
// queue without any kind of hashsum support
QueueIndexes(false);
}
}
/*}}}*/
void pkgAcqMetaIndex::Finished() /*{{{*/
{
if(_config->FindB("Debug::Acquire::Transaction", false) == true)
std::clog << "Finished: " << DestFile <<std::endl;
if(TransactionManager != NULL &&
TransactionManager->TransactionHasError() == false)
TransactionManager->CommitTransaction();
}
/*}}}*/
std::string pkgAcqMetaIndex::DescURI() const /*{{{*/
{
return Target.URI;
}
/*}}}*/
// AcqMetaSig::AcqMetaSig - Constructor /*{{{*/
pkgAcqMetaSig::pkgAcqMetaSig(pkgAcquire * const Owner,
pkgAcqMetaBase * const TransactionManager,
IndexTarget const Target,
pkgAcqMetaIndex * const MetaIndex) :
pkgAcqTransactionItem(Owner, TransactionManager, Target), MetaIndex(MetaIndex)
{
DestFile = GetPartialFileNameFromURI(Target.URI);
// remove any partial downloaded sig-file in partial/.
// it may confuse proxies and is too small to warrant a
// partial download anyway
unlink(DestFile.c_str());
// set the TransactionManager
if(_config->FindB("Debug::Acquire::Transaction", false) == true)
std::clog << "New pkgAcqMetaSig with TransactionManager "
<< TransactionManager << std::endl;
// Create the item
Desc.Description = Target.Description;
Desc.Owner = this;
Desc.ShortDesc = Target.ShortDesc;
Desc.URI = Target.URI;
// If we got a hit for Release, we will get one for Release.gpg too (or obscure errors),
// so we skip the download step and go instantly to verification
if (TransactionManager->IMSHit == true && RealFileExists(GetFinalFilename()))
{
Complete = true;
Status = StatDone;
PartialFile = DestFile = GetFinalFilename();
MetaIndexFileSignature = DestFile;
MetaIndex->QueueForSignatureVerify(this, MetaIndex->DestFile, DestFile);
}
else
QueueURI(Desc);
}
/*}}}*/
pkgAcqMetaSig::~pkgAcqMetaSig() /*{{{*/
{
}
/*}}}*/
// AcqMetaSig::Done - The signature was downloaded/verified /*{{{*/
void pkgAcqMetaSig::Done(string const &Message, HashStringList const &Hashes,
pkgAcquire::MethodConfig const * const Cfg)
{
if (MetaIndexFileSignature.empty() == false)
{
DestFile = MetaIndexFileSignature;
MetaIndexFileSignature.clear();
}
Item::Done(Message, Hashes, Cfg);
if(MetaIndex->AuthPass == false)
{
if(MetaIndex->CheckDownloadDone(this, Message, Hashes) == true)
{
// destfile will be modified to point to MetaIndexFile for the
// gpgv method, so we need to save it here
MetaIndexFileSignature = DestFile;
MetaIndex->QueueForSignatureVerify(this, MetaIndex->DestFile, DestFile);
}
return;
}
else if(MetaIndex->CheckAuthDone(Message) == true)
{
if (TransactionManager->IMSHit == false)
{
TransactionManager->TransactionStageCopy(this, DestFile, GetFinalFilename());
TransactionManager->TransactionStageCopy(MetaIndex, MetaIndex->DestFile, MetaIndex->GetFinalFilename());
}
}
}
/*}}}*/
void pkgAcqMetaSig::Failed(string const &Message,pkgAcquire::MethodConfig const * const Cnf)/*{{{*/
{
Item::Failed(Message,Cnf);
// check if we need to fail at this point
if (MetaIndex->AuthPass == true && MetaIndex->CheckStopAuthentication(this, Message))
return;
string const FinalRelease = MetaIndex->GetFinalFilename();
string const FinalReleasegpg = GetFinalFilename();
string const FinalInRelease = TransactionManager->GetFinalFilename();
if (RealFileExists(FinalReleasegpg) || RealFileExists(FinalInRelease))
{
std::string downgrade_msg;
strprintf(downgrade_msg, _("The repository '%s' is no longer signed."),
MetaIndex->Target.Description.c_str());
if(_config->FindB("Acquire::AllowDowngradeToInsecureRepositories"))
{
// meh, the users wants to take risks (we still mark the packages
// from this repository as unauthenticated)
_error->Warning("%s", downgrade_msg.c_str());
_error->Warning(_("This is normally not allowed, but the option "
"Acquire::AllowDowngradeToInsecureRepositories was "
"given to override it."));
Status = StatDone;
} else {
_error->Error("%s", downgrade_msg.c_str());
if (TransactionManager->IMSHit == false)
Rename(MetaIndex->DestFile, MetaIndex->DestFile + ".FAILED");
Item::Failed("Message: " + downgrade_msg, Cnf);
TransactionManager->AbortTransaction();
return;
}
}
else
_error->Warning(_("The data from '%s' is not signed. Packages "
"from that repository can not be authenticated."),
MetaIndex->Target.Description.c_str());
// ensures that a Release.gpg file in the lists/ is removed by the transaction
TransactionManager->TransactionStageRemoval(this, DestFile);
// only allow going further if the users explicitely wants it
if(AllowInsecureRepositories(TransactionManager->MetaIndexParser, TransactionManager, this) == true)
{
if (RealFileExists(FinalReleasegpg) || RealFileExists(FinalInRelease))
{
// open the last Release if we have it
if (TransactionManager->IMSHit == false)
{
TransactionManager->LastMetaIndexParser = new indexRecords;
_error->PushToStack();
if (RealFileExists(FinalInRelease))
TransactionManager->LastMetaIndexParser->Load(FinalInRelease);
else
TransactionManager->LastMetaIndexParser->Load(FinalRelease);
// its unlikely to happen, but if what we have is bad ignore it
if (_error->PendingError())
{
delete TransactionManager->LastMetaIndexParser;
TransactionManager->LastMetaIndexParser = NULL;
}
_error->RevertToStack();
}
}
// we parse the indexes here because at this point the user wanted
// a repository that may potentially harm him
if (TransactionManager->MetaIndexParser->Load(MetaIndex->DestFile) == false || MetaIndex->VerifyVendor(Message) == false)
/* expired Release files are still a problem you need extra force for */;
else
MetaIndex->QueueIndexes(true);
TransactionManager->TransactionStageCopy(MetaIndex, MetaIndex->DestFile, MetaIndex->GetFinalFilename());
}
// FIXME: this is used often (e.g. in pkgAcqIndexTrans) so refactor
if (Cnf->LocalOnly == true ||
StringToBool(LookupTag(Message,"Transient-Failure"),false) == false)
{
// Ignore this
Status = StatDone;
}
}
/*}}}*/
// AcqBaseIndex - Constructor /*{{{*/
pkgAcqBaseIndex::pkgAcqBaseIndex(pkgAcquire * const Owner,
pkgAcqMetaBase * const TransactionManager,
IndexTarget const Target)
: pkgAcqTransactionItem(Owner, TransactionManager, Target)
{
}
/*}}}*/
// AcqDiffIndex::AcqDiffIndex - Constructor /*{{{*/
// ---------------------------------------------------------------------
/* Get the DiffIndex file first and see if there are patches available
* If so, create a pkgAcqIndexDiffs fetcher that will get and apply the
* patches. If anything goes wrong in that process, it will fall back to
* the original packages file
*/
pkgAcqDiffIndex::pkgAcqDiffIndex(pkgAcquire * const Owner,
pkgAcqMetaBase * const TransactionManager,
IndexTarget const Target)
: pkgAcqBaseIndex(Owner, TransactionManager, Target)
{
Debug = _config->FindB("Debug::pkgAcquire::Diffs",false);
Desc.Owner = this;
Desc.Description = Target.Description + ".diff/Index";
Desc.ShortDesc = Target.ShortDesc;
Desc.URI = Target.URI + ".diff/Index";
DestFile = GetPartialFileNameFromURI(Desc.URI);
if(Debug)
std::clog << "pkgAcqDiffIndex: " << Desc.URI << std::endl;
QueueURI(Desc);
}
/*}}}*/
// AcqIndex::Custom600Headers - Insert custom request headers /*{{{*/
// ---------------------------------------------------------------------
/* The only header we use is the last-modified header. */
string pkgAcqDiffIndex::Custom600Headers() const
{
string const Final = GetFinalFilename();
if(Debug)
std::clog << "Custom600Header-IMS: " << Final << std::endl;
struct stat Buf;
if (stat(Final.c_str(),&Buf) != 0)
return "\nIndex-File: true";
return "\nIndex-File: true\nLast-Modified: " + TimeRFC1123(Buf.st_mtime);
}
/*}}}*/
void pkgAcqDiffIndex::QueueOnIMSHit() const /*{{{*/
{
// list cleanup needs to know that this file as well as the already
// present index is ours, so we create an empty diff to save it for us
new pkgAcqIndexDiffs(Owner, TransactionManager, Target);
}
/*}}}*/
bool pkgAcqDiffIndex::ParseDiffIndex(string const &IndexDiffFile) /*{{{*/
{
// failing here is fine: our caller will take care of trying to
// get the complete file if patching fails
if(Debug)
std::clog << "pkgAcqDiffIndex::ParseIndexDiff() " << IndexDiffFile
<< std::endl;
FileFd Fd(IndexDiffFile,FileFd::ReadOnly);
pkgTagFile TF(&Fd);
if (_error->PendingError() == true)
return false;
pkgTagSection Tags;
if(unlikely(TF.Step(Tags) == false))
return false;
HashStringList ServerHashes;
unsigned long long ServerSize = 0;
for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type)
{
std::string tagname = *type;
tagname.append("-Current");
std::string const tmp = Tags.FindS(tagname.c_str());
if (tmp.empty() == true)
continue;
string hash;
unsigned long long size;
std::stringstream ss(tmp);
ss >> hash >> size;
if (unlikely(hash.empty() == true))
continue;
if (unlikely(ServerSize != 0 && ServerSize != size))
continue;
ServerHashes.push_back(HashString(*type, hash));
ServerSize = size;
}
if (ServerHashes.usable() == false)
{
if (Debug == true)
std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": Did not find a good hashsum in the index" << std::endl;
return false;
}
std::string const CurrentPackagesFile = GetFinalFileNameFromURI(Target.URI);
HashStringList const TargetFileHashes = GetExpectedHashesFor(Target.MetaKey);
if (TargetFileHashes.usable() == false || ServerHashes != TargetFileHashes)
{
if (Debug == true)
{
std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": Index has different hashes than parser, probably older, so fail pdiffing" << std::endl;
printHashSumComparision(CurrentPackagesFile, ServerHashes, TargetFileHashes);
}
return false;
}
HashStringList LocalHashes;
// try avoiding calculating the hash here as this is costly
if (TransactionManager->LastMetaIndexParser != NULL)
LocalHashes = GetExpectedHashesFromFor(TransactionManager->LastMetaIndexParser, Target.MetaKey);
if (LocalHashes.usable() == false)
{
FileFd fd(CurrentPackagesFile, FileFd::ReadOnly);
Hashes LocalHashesCalc(ServerHashes);
LocalHashesCalc.AddFD(fd);
LocalHashes = LocalHashesCalc.GetHashStringList();
}
if (ServerHashes == LocalHashes)
{
// we have the same sha1 as the server so we are done here
if(Debug)
std::clog << "pkgAcqDiffIndex: Package file " << CurrentPackagesFile << " is up-to-date" << std::endl;
QueueOnIMSHit();
return true;
}
if(Debug)
std::clog << "Server-Current: " << ServerHashes.find(NULL)->toStr() << " and we start at "
<< CurrentPackagesFile << " " << LocalHashes.FileSize() << " " << LocalHashes.find(NULL)->toStr() << std::endl;
// parse all of (provided) history
vector<DiffInfo> available_patches;
bool firstAcceptedHashes = true;
for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type)
{
if (LocalHashes.find(*type) == NULL)
continue;
std::string tagname = *type;
tagname.append("-History");
std::string const tmp = Tags.FindS(tagname.c_str());
if (tmp.empty() == true)
continue;
string hash, filename;
unsigned long long size;
std::stringstream ss(tmp);
while (ss >> hash >> size >> filename)
{
if (unlikely(hash.empty() == true || filename.empty() == true))
continue;
// see if we have a record for this file already
std::vector<DiffInfo>::iterator cur = available_patches.begin();
for (; cur != available_patches.end(); ++cur)
{
if (cur->file != filename)
continue;
cur->result_hashes.push_back(HashString(*type, hash));
break;
}
if (cur != available_patches.end())
continue;
if (firstAcceptedHashes == true)
{
DiffInfo next;
next.file = filename;
next.result_hashes.push_back(HashString(*type, hash));
next.result_hashes.FileSize(size);
available_patches.push_back(next);
}
else
{
if (Debug == true)
std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": File " << filename
<< " wasn't in the list for the first parsed hash! (history)" << std::endl;
break;
}
}
firstAcceptedHashes = false;
}
if (unlikely(available_patches.empty() == true))
{
if (Debug)
std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": "
<< "Couldn't find any patches for the patch series." << std::endl;
return false;
}
for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type)
{
if (LocalHashes.find(*type) == NULL)
continue;
std::string tagname = *type;
tagname.append("-Patches");
std::string const tmp = Tags.FindS(tagname.c_str());
if (tmp.empty() == true)
continue;
string hash, filename;
unsigned long long size;
std::stringstream ss(tmp);
while (ss >> hash >> size >> filename)
{
if (unlikely(hash.empty() == true || filename.empty() == true))
continue;
// see if we have a record for this file already
std::vector<DiffInfo>::iterator cur = available_patches.begin();
for (; cur != available_patches.end(); ++cur)
{
if (cur->file != filename)
continue;
if (cur->patch_hashes.empty())
cur->patch_hashes.FileSize(size);
cur->patch_hashes.push_back(HashString(*type, hash));
break;
}
if (cur != available_patches.end())
continue;
if (Debug == true)
std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": File " << filename
<< " wasn't in the list for the first parsed hash! (patches)" << std::endl;
break;
}
}
for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type)
{
std::string tagname = *type;
tagname.append("-Download");
std::string const tmp = Tags.FindS(tagname.c_str());
if (tmp.empty() == true)
continue;
string hash, filename;
unsigned long long size;
std::stringstream ss(tmp);
// FIXME: all of pdiff supports only .gz compressed patches
while (ss >> hash >> size >> filename)
{
if (unlikely(hash.empty() == true || filename.empty() == true))
continue;
if (unlikely(APT::String::Endswith(filename, ".gz") == false))
continue;
filename.erase(filename.length() - 3);
// see if we have a record for this file already
std::vector<DiffInfo>::iterator cur = available_patches.begin();
for (; cur != available_patches.end(); ++cur)
{
if (cur->file != filename)
continue;
if (cur->download_hashes.empty())
cur->download_hashes.FileSize(size);
cur->download_hashes.push_back(HashString(*type, hash));
break;
}
if (cur != available_patches.end())
continue;
if (Debug == true)
std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": File " << filename
<< " wasn't in the list for the first parsed hash! (download)" << std::endl;
break;
}
}
bool foundStart = false;
for (std::vector<DiffInfo>::iterator cur = available_patches.begin();
cur != available_patches.end(); ++cur)
{
if (LocalHashes != cur->result_hashes)
continue;
available_patches.erase(available_patches.begin(), cur);
foundStart = true;
break;
}
if (foundStart == false || unlikely(available_patches.empty() == true))
{
if (Debug)
std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": "
<< "Couldn't find the start of the patch series." << std::endl;
return false;
}
// patching with too many files is rather slow compared to a fast download
unsigned long const fileLimit = _config->FindI("Acquire::PDiffs::FileLimit", 0);
if (fileLimit != 0 && fileLimit < available_patches.size())
{
if (Debug)
std::clog << "Need " << available_patches.size() << " diffs (Limit is " << fileLimit
<< ") so fallback to complete download" << std::endl;
return false;
}
// calculate the size of all patches we have to get
// note that all sizes are uncompressed, while we download compressed files
unsigned long long patchesSize = 0;
for (std::vector<DiffInfo>::const_iterator cur = available_patches.begin();
cur != available_patches.end(); ++cur)
patchesSize += cur->patch_hashes.FileSize();
unsigned long long const sizeLimit = ServerSize * _config->FindI("Acquire::PDiffs::SizeLimit", 100);
if (sizeLimit > 0 && (sizeLimit/100) < patchesSize)
{
if (Debug)
std::clog << "Need " << patchesSize << " bytes (Limit is " << sizeLimit/100
<< ") so fallback to complete download" << std::endl;
return false;
}
// we have something, queue the diffs
string::size_type const last_space = Description.rfind(" ");
if(last_space != string::npos)
Description.erase(last_space, Description.size()-last_space);
/* decide if we should download patches one by one or in one go:
The first is good if the server merges patches, but many don't so client
based merging can be attempt in which case the second is better.
"bad things" will happen if patches are merged on the server,
but client side merging is attempt as well */
bool pdiff_merge = _config->FindB("Acquire::PDiffs::Merge", true);
if (pdiff_merge == true)
{
// reprepro adds this flag if it has merged patches on the server
std::string const precedence = Tags.FindS("X-Patch-Precedence");
pdiff_merge = (precedence != "merged");
}
if (pdiff_merge == false)
new pkgAcqIndexDiffs(Owner, TransactionManager, Target, available_patches);
else
{
std::vector<pkgAcqIndexMergeDiffs*> *diffs = new std::vector<pkgAcqIndexMergeDiffs*>(available_patches.size());
for(size_t i = 0; i < available_patches.size(); ++i)
(*diffs)[i] = new pkgAcqIndexMergeDiffs(Owner, TransactionManager,
Target,
available_patches[i],
diffs);
}
Complete = false;
Status = StatDone;
Dequeue();
return true;
}
/*}}}*/
void pkgAcqDiffIndex::Failed(string const &Message,pkgAcquire::MethodConfig const * const Cnf)/*{{{*/
{
Item::Failed(Message,Cnf);
Status = StatDone;
if(Debug)
std::clog << "pkgAcqDiffIndex failed: " << Desc.URI << " with " << Message << std::endl
<< "Falling back to normal index file acquire" << std::endl;
new pkgAcqIndex(Owner, TransactionManager, Target);
}
/*}}}*/
void pkgAcqDiffIndex::Done(string const &Message,HashStringList const &Hashes, /*{{{*/
pkgAcquire::MethodConfig const * const Cnf)
{
if(Debug)
std::clog << "pkgAcqDiffIndex::Done(): " << Desc.URI << std::endl;
Item::Done(Message, Hashes, Cnf);
string const FinalFile = GetFinalFilename();
if(StringToBool(LookupTag(Message,"IMS-Hit"),false))
DestFile = FinalFile;
if(ParseDiffIndex(DestFile) == false)
{
Failed("Message: Couldn't parse pdiff index", Cnf);
// queue for final move - this should happen even if we fail
// while parsing (e.g. on sizelimit) and download the complete file.
TransactionManager->TransactionStageCopy(this, DestFile, FinalFile);
return;
}
TransactionManager->TransactionStageCopy(this, DestFile, FinalFile);
Complete = true;
Status = StatDone;
Dequeue();
return;
}
/*}}}*/
// AcqIndexDiffs::AcqIndexDiffs - Constructor /*{{{*/
// ---------------------------------------------------------------------
/* The package diff is added to the queue. one object is constructed
* for each diff and the index
*/
pkgAcqIndexDiffs::pkgAcqIndexDiffs(pkgAcquire * const Owner,
pkgAcqMetaBase * const TransactionManager,
IndexTarget const Target,
vector<DiffInfo> const &diffs)
: pkgAcqBaseIndex(Owner, TransactionManager, Target),
available_patches(diffs)
{
DestFile = GetPartialFileNameFromURI(Target.URI);
Debug = _config->FindB("Debug::pkgAcquire::Diffs",false);
Desc.Owner = this;
Description = Target.Description;
Desc.ShortDesc = Target.ShortDesc;
if(available_patches.empty() == true)
{
// we are done (yeah!), check hashes against the final file
DestFile = GetFinalFileNameFromURI(Target.URI);
Finish(true);
}
else
{
// patching needs to be bootstrapped with the 'old' version
std::string const PartialFile = GetPartialFileNameFromURI(Target.URI);
if (RealFileExists(PartialFile) == false)
{
if (symlink(GetFinalFilename().c_str(), PartialFile.c_str()) != 0)
{
Failed("Link creation of " + PartialFile + " to " + GetFinalFilename() + " failed", NULL);
return;
}
}
// get the next diff
State = StateFetchDiff;
QueueNextDiff();
}
}
/*}}}*/
void pkgAcqIndexDiffs::Failed(string const &Message,pkgAcquire::MethodConfig const * const Cnf)/*{{{*/
{
Item::Failed(Message,Cnf);
Status = StatDone;
if(Debug)
std::clog << "pkgAcqIndexDiffs failed: " << Desc.URI << " with " << Message << std::endl
<< "Falling back to normal index file acquire" << std::endl;
DestFile = GetPartialFileNameFromURI(Target.URI);
RenameOnError(PDiffError);
std::string const patchname = GetDiffsPatchFileName(DestFile);
if (RealFileExists(patchname))
rename(patchname.c_str(), std::string(patchname + ".FAILED").c_str());
new pkgAcqIndex(Owner, TransactionManager, Target);
Finish();
}
/*}}}*/
// Finish - helper that cleans the item out of the fetcher queue /*{{{*/
void pkgAcqIndexDiffs::Finish(bool allDone)
{
if(Debug)
std::clog << "pkgAcqIndexDiffs::Finish(): "
<< allDone << " "
<< Desc.URI << std::endl;
// we restore the original name, this is required, otherwise
// the file will be cleaned
if(allDone)
{
TransactionManager->TransactionStageCopy(this, DestFile, GetFinalFilename());
// this is for the "real" finish
Complete = true;
Status = StatDone;
Dequeue();
if(Debug)
std::clog << "\n\nallDone: " << DestFile << "\n" << std::endl;
return;
}
if(Debug)
std::clog << "Finishing: " << Desc.URI << std::endl;
Complete = false;
Status = StatDone;
Dequeue();
return;
}
/*}}}*/
bool pkgAcqIndexDiffs::QueueNextDiff() /*{{{*/
{
// calc sha1 of the just patched file
std::string const FinalFile = GetPartialFileNameFromURI(Target.URI);
if(!FileExists(FinalFile))
{
Failed("Message: No FinalFile " + FinalFile + " available", NULL);
return false;
}
FileFd fd(FinalFile, FileFd::ReadOnly);
Hashes LocalHashesCalc;
LocalHashesCalc.AddFD(fd);
HashStringList const LocalHashes = LocalHashesCalc.GetHashStringList();
if(Debug)
std::clog << "QueueNextDiff: " << FinalFile << " (" << LocalHashes.find(NULL)->toStr() << ")" << std::endl;
HashStringList const TargetFileHashes = GetExpectedHashesFor(Target.MetaKey);
if (unlikely(LocalHashes.usable() == false || TargetFileHashes.usable() == false))
{
Failed("Local/Expected hashes are not usable", NULL);
return false;
}
// final file reached before all patches are applied
if(LocalHashes == TargetFileHashes)
{
Finish(true);
return true;
}
// remove all patches until the next matching patch is found
// this requires the Index file to be ordered
for(vector<DiffInfo>::iterator I = available_patches.begin();
available_patches.empty() == false &&
I != available_patches.end() &&
I->result_hashes != LocalHashes;
++I)
{
available_patches.erase(I);
}
// error checking and falling back if no patch was found
if(available_patches.empty() == true)
{
Failed("No patches left to reach target", NULL);
return false;
}
// queue the right diff
Desc.URI = Target.URI + ".diff/" + available_patches[0].file + ".gz";
Desc.Description = Description + " " + available_patches[0].file + string(".pdiff");
DestFile = GetPartialFileNameFromURI(Target.URI + ".diff/" + available_patches[0].file);
if(Debug)
std::clog << "pkgAcqIndexDiffs::QueueNextDiff(): " << Desc.URI << std::endl;
QueueURI(Desc);
return true;
}
/*}}}*/
void pkgAcqIndexDiffs::Done(string const &Message, HashStringList const &Hashes, /*{{{*/
pkgAcquire::MethodConfig const * const Cnf)
{
if(Debug)
std::clog << "pkgAcqIndexDiffs::Done(): " << Desc.URI << std::endl;
Item::Done(Message, Hashes, Cnf);
std::string const FinalFile = GetPartialFileNameFromURI(Target.URI);
std::string const PatchFile = GetDiffsPatchFileName(FinalFile);
// success in downloading a diff, enter ApplyDiff state
if(State == StateFetchDiff)
{
Rename(DestFile, PatchFile);
if(Debug)
std::clog << "Sending to rred method: " << FinalFile << std::endl;
State = StateApplyDiff;
Local = true;
Desc.URI = "rred:" + FinalFile;
QueueURI(Desc);
SetActiveSubprocess("rred");
return;
}
// success in download/apply a diff, queue next (if needed)
if(State == StateApplyDiff)
{
// remove the just applied patch
available_patches.erase(available_patches.begin());
unlink(PatchFile.c_str());
// move into place
if(Debug)
{
std::clog << "Moving patched file in place: " << std::endl
<< DestFile << " -> " << FinalFile << std::endl;
}
Rename(DestFile,FinalFile);
chmod(FinalFile.c_str(),0644);
// see if there is more to download
if(available_patches.empty() == false) {
new pkgAcqIndexDiffs(Owner, TransactionManager, Target,
available_patches);
return Finish();
} else
// update
DestFile = FinalFile;
return Finish(true);
}
}
/*}}}*/
std::string pkgAcqIndexDiffs::Custom600Headers() const /*{{{*/
{
if(State != StateApplyDiff)
return pkgAcqBaseIndex::Custom600Headers();
std::ostringstream patchhashes;
HashStringList const ExpectedHashes = available_patches[0].patch_hashes;
for (HashStringList::const_iterator hs = ExpectedHashes.begin(); hs != ExpectedHashes.end(); ++hs)
patchhashes << "\nPatch-0-" << hs->HashType() << "-Hash: " << hs->HashValue();
patchhashes << pkgAcqBaseIndex::Custom600Headers();
return patchhashes.str();
}
/*}}}*/
// AcqIndexMergeDiffs::AcqIndexMergeDiffs - Constructor /*{{{*/
pkgAcqIndexMergeDiffs::pkgAcqIndexMergeDiffs(pkgAcquire * const Owner,
pkgAcqMetaBase * const TransactionManager,
IndexTarget const Target,
DiffInfo const &patch,
std::vector<pkgAcqIndexMergeDiffs*> const * const allPatches)
: pkgAcqBaseIndex(Owner, TransactionManager, Target),
patch(patch), allPatches(allPatches), State(StateFetchDiff)
{
Debug = _config->FindB("Debug::pkgAcquire::Diffs",false);
Desc.Owner = this;
Description = Target.Description;
Desc.ShortDesc = Target.ShortDesc;
Desc.URI = Target.URI + ".diff/" + patch.file + ".gz";
Desc.Description = Description + " " + patch.file + string(".pdiff");
DestFile = GetPartialFileNameFromURI(Target.URI + ".diff/" + patch.file);
if(Debug)
std::clog << "pkgAcqIndexMergeDiffs: " << Desc.URI << std::endl;
QueueURI(Desc);
}
/*}}}*/
void pkgAcqIndexMergeDiffs::Failed(string const &Message,pkgAcquire::MethodConfig const * const Cnf)/*{{{*/
{
if(Debug)
std::clog << "pkgAcqIndexMergeDiffs failed: " << Desc.URI << " with " << Message << std::endl;
Item::Failed(Message,Cnf);
Status = StatDone;
// check if we are the first to fail, otherwise we are done here
State = StateDoneDiff;
for (std::vector<pkgAcqIndexMergeDiffs *>::const_iterator I = allPatches->begin();
I != allPatches->end(); ++I)
if ((*I)->State == StateErrorDiff)
return;
// first failure means we should fallback
State = StateErrorDiff;
if (Debug)
std::clog << "Falling back to normal index file acquire" << std::endl;
DestFile = GetPartialFileNameFromURI(Target.URI);
RenameOnError(PDiffError);
std::string const patchname = GetMergeDiffsPatchFileName(DestFile, patch.file);
if (RealFileExists(patchname))
rename(patchname.c_str(), std::string(patchname + ".FAILED").c_str());
new pkgAcqIndex(Owner, TransactionManager, Target);
}
/*}}}*/
void pkgAcqIndexMergeDiffs::Done(string const &Message, HashStringList const &Hashes, /*{{{*/
pkgAcquire::MethodConfig const * const Cnf)
{
if(Debug)
std::clog << "pkgAcqIndexMergeDiffs::Done(): " << Desc.URI << std::endl;
Item::Done(Message, Hashes, Cnf);
string const FinalFile = GetPartialFileNameFromURI(Target.URI);
if (State == StateFetchDiff)
{
Rename(DestFile, GetMergeDiffsPatchFileName(FinalFile, patch.file));
// check if this is the last completed diff
State = StateDoneDiff;
for (std::vector<pkgAcqIndexMergeDiffs *>::const_iterator I = allPatches->begin();
I != allPatches->end(); ++I)
if ((*I)->State != StateDoneDiff)
{
if(Debug)
std::clog << "Not the last done diff in the batch: " << Desc.URI << std::endl;
return;
}
// this is the last completed diff, so we are ready to apply now
State = StateApplyDiff;
// patching needs to be bootstrapped with the 'old' version
if (symlink(GetFinalFilename().c_str(), FinalFile.c_str()) != 0)
{
Failed("Link creation of " + FinalFile + " to " + GetFinalFilename() + " failed", NULL);
return;
}
if(Debug)
std::clog << "Sending to rred method: " << FinalFile << std::endl;
Local = true;
Desc.URI = "rred:" + FinalFile;
QueueURI(Desc);
SetActiveSubprocess("rred");
return;
}
// success in download/apply all diffs, clean up
else if (State == StateApplyDiff)
{
// move the result into place
std::string const Final = GetFinalFilename();
if(Debug)
std::clog << "Queue patched file in place: " << std::endl
<< DestFile << " -> " << Final << std::endl;
// queue for copy by the transaction manager
TransactionManager->TransactionStageCopy(this, DestFile, Final);
// ensure the ed's are gone regardless of list-cleanup
for (std::vector<pkgAcqIndexMergeDiffs *>::const_iterator I = allPatches->begin();
I != allPatches->end(); ++I)
{
std::string const PartialFile = GetPartialFileNameFromURI(Target.URI);
std::string const patch = GetMergeDiffsPatchFileName(PartialFile, (*I)->patch.file);
unlink(patch.c_str());
}
unlink(FinalFile.c_str());
// all set and done
Complete = true;
if(Debug)
std::clog << "allDone: " << DestFile << "\n" << std::endl;
}
}
/*}}}*/
std::string pkgAcqIndexMergeDiffs::Custom600Headers() const /*{{{*/
{
if(State != StateApplyDiff)
return pkgAcqBaseIndex::Custom600Headers();
std::ostringstream patchhashes;
unsigned int seen_patches = 0;
for (std::vector<pkgAcqIndexMergeDiffs *>::const_iterator I = allPatches->begin();
I != allPatches->end(); ++I)
{
HashStringList const ExpectedHashes = (*I)->patch.patch_hashes;
for (HashStringList::const_iterator hs = ExpectedHashes.begin(); hs != ExpectedHashes.end(); ++hs)
patchhashes << "\nPatch-" << seen_patches << "-" << hs->HashType() << "-Hash: " << hs->HashValue();
++seen_patches;
}
patchhashes << pkgAcqBaseIndex::Custom600Headers();
return patchhashes.str();
}
/*}}}*/
// AcqIndex::AcqIndex - Constructor /*{{{*/
pkgAcqIndex::pkgAcqIndex(pkgAcquire * const Owner,
pkgAcqMetaBase * const TransactionManager,
IndexTarget const Target)
: pkgAcqBaseIndex(Owner, TransactionManager, Target)
{
// autoselect the compression method
AutoSelectCompression();
Init(Target.URI, Target.Description, Target.ShortDesc);
if(_config->FindB("Debug::Acquire::Transaction", false) == true)
std::clog << "New pkgIndex with TransactionManager "
<< TransactionManager << std::endl;
}
/*}}}*/
// AcqIndex::AutoSelectCompression - Select compression /*{{{*/
void pkgAcqIndex::AutoSelectCompression()
{
std::vector<std::string> types = APT::Configuration::getCompressionTypes();
CompressionExtensions = "";
if (TransactionManager->MetaIndexParser != NULL && TransactionManager->MetaIndexParser->Exists(Target.MetaKey))
{
for (std::vector<std::string>::const_iterator t = types.begin();
t != types.end(); ++t)
{
std::string CompressedMetaKey = string(Target.MetaKey).append(".").append(*t);
if (*t == "uncompressed" ||
TransactionManager->MetaIndexParser->Exists(CompressedMetaKey) == true)
CompressionExtensions.append(*t).append(" ");
}