Browse Source

Join with aliencode

Author: jgg
Date: 2001-02-20 07:03:16 GMT
Join with aliencode
tags/debian/0.7.21
Arch Librarian 17 years ago
parent
commit
b2e465d6d3
100 changed files with 9118 additions and 2488 deletions
  1. +5
    -1
      AUTHORS
  2. +8
    -7
      Makefile
  3. +154
    -0
      apt-inst/contrib/arfile.cc
  4. +68
    -0
      apt-inst/contrib/arfile.h
  5. +342
    -0
      apt-inst/contrib/extracttar.cc
  6. +54
    -0
      apt-inst/contrib/extracttar.h
  7. +30
    -0
      apt-inst/database.cc
  8. +56
    -0
      apt-inst/database.h
  9. +262
    -0
      apt-inst/deb/debfile.cc
  10. +92
    -0
      apt-inst/deb/debfile.h
  11. +490
    -0
      apt-inst/deb/dpkgdb.cc
  12. +53
    -0
      apt-inst/deb/dpkgdb.h
  13. +103
    -0
      apt-inst/dirstream.cc
  14. +61
    -0
      apt-inst/dirstream.h
  15. +5
    -0
      apt-inst/dpkg-diffs.txt
  16. +509
    -0
      apt-inst/extract.cc
  17. +52
    -0
      apt-inst/extract.h
  18. +588
    -0
      apt-inst/filelist.cc
  19. +314
    -0
      apt-inst/filelist.h
  20. +30
    -0
      apt-inst/makefile
  21. +82
    -60
      apt-pkg/acquire-item.cc
  22. +11
    -9
      apt-pkg/acquire-item.h
  23. +6
    -4
      apt-pkg/acquire-method.cc
  24. +4
    -2
      apt-pkg/acquire-method.h
  25. +9
    -7
      apt-pkg/acquire-worker.cc
  26. +3
    -3
      apt-pkg/acquire-worker.h
  27. +14
    -12
      apt-pkg/acquire.cc
  28. +13
    -12
      apt-pkg/acquire.h
  29. +156
    -67
      apt-pkg/algorithms.cc
  30. +21
    -4
      apt-pkg/algorithms.h
  31. +56
    -34
      apt-pkg/cachefile.cc
  32. +18
    -13
      apt-pkg/cachefile.h
  33. +31
    -19
      apt-pkg/cacheiterators.h
  34. +19
    -9
      apt-pkg/clean.cc
  35. +8
    -6
      apt-pkg/contrib/cdromutl.cc
  36. +16
    -14
      apt-pkg/contrib/cmndline.cc
  37. +340
    -66
      apt-pkg/contrib/configuration.cc
  38. +39
    -18
      apt-pkg/contrib/configuration.h
  39. +2
    -2
      apt-pkg/contrib/error.cc
  40. +17
    -5
      apt-pkg/contrib/error.h
  41. +81
    -26
      apt-pkg/contrib/fileutl.cc
  42. +5
    -2
      apt-pkg/contrib/fileutl.h
  43. +2
    -2
      apt-pkg/contrib/md5.h
  44. +15
    -8
      apt-pkg/contrib/mmap.cc
  45. +10
    -4
      apt-pkg/contrib/progress.cc
  46. +66
    -0
      apt-pkg/contrib/sptr.h
  47. +153
    -10
      apt-pkg/contrib/strutl.cc
  48. +34
    -4
      apt-pkg/contrib/strutl.h
  49. +506
    -0
      apt-pkg/deb/debindexfile.cc
  50. +112
    -0
      apt-pkg/deb/debindexfile.h
  51. +143
    -78
      apt-pkg/deb/deblistparser.cc
  52. +22
    -14
      apt-pkg/deb/deblistparser.h
  53. +20
    -3
      apt-pkg/deb/debrecords.cc
  54. +7
    -5
      apt-pkg/deb/debrecords.h
  55. +69
    -28
      apt-pkg/deb/debsrcrecords.cc
  56. +14
    -10
      apt-pkg/deb/debsrcrecords.h
  57. +197
    -0
      apt-pkg/deb/debsystem.cc
  58. +41
    -0
      apt-pkg/deb/debsystem.h
  59. +266
    -0
      apt-pkg/deb/debversion.cc
  60. +72
    -0
      apt-pkg/deb/debversion.h
  61. +0
    -119
      apt-pkg/deb/dpkginit.cc
  62. +0
    -34
      apt-pkg/deb/dpkginit.h
  63. +136
    -26
      apt-pkg/deb/dpkgpm.cc
  64. +5
    -3
      apt-pkg/deb/dpkgpm.h
  65. +164
    -91
      apt-pkg/depcache.cc
  66. +48
    -24
      apt-pkg/depcache.h
  67. +77
    -0
      apt-pkg/indexfile.cc
  68. +80
    -0
      apt-pkg/indexfile.h
  69. +82
    -23
      apt-pkg/init.cc
  70. +25
    -3
      apt-pkg/init.h
  71. +24
    -19
      apt-pkg/makefile
  72. +74
    -45
      apt-pkg/orderlist.cc
  73. +6
    -15
      apt-pkg/orderlist.h
  74. +29
    -27
      apt-pkg/packagemanager.cc
  75. +8
    -17
      apt-pkg/packagemanager.h
  76. +151
    -94
      apt-pkg/pkgcache.cc
  77. +47
    -17
      apt-pkg/pkgcache.h
  78. +249
    -439
      apt-pkg/pkgcachegen.cc
  79. +25
    -16
      apt-pkg/pkgcachegen.h
  80. +17
    -28
      apt-pkg/pkgrecords.cc
  81. +10
    -17
      apt-pkg/pkgrecords.h
  82. +45
    -0
      apt-pkg/pkgsystem.cc
  83. +95
    -0
      apt-pkg/pkgsystem.h
  84. +275
    -0
      apt-pkg/policy.cc
  85. +83
    -0
      apt-pkg/policy.h
  86. +127
    -297
      apt-pkg/sourcelist.cc
  87. +35
    -26
      apt-pkg/sourcelist.h
  88. +34
    -34
      apt-pkg/srcrecords.cc
  89. +32
    -15
      apt-pkg/srcrecords.h
  90. +259
    -23
      apt-pkg/tagfile.cc
  91. +34
    -15
      apt-pkg/tagfile.h
  92. +17
    -244
      apt-pkg/version.cc
  93. +58
    -11
      apt-pkg/version.h
  94. +210
    -0
      apt-pkg/versionmatch.cc
  95. +69
    -0
      apt-pkg/versionmatch.h
  96. +1
    -0
      buildlib/apti18n.h.in
  97. +22
    -32
      buildlib/archtable
  98. +327
    -112
      buildlib/config.guess
  99. +5
    -11
      buildlib/config.h.in
  100. +157
    -43
      buildlib/config.sub

+ 5
- 1
AUTHORS View File

@@ -6,16 +6,20 @@ CVS:jgg Jason Gunthorpe <jgg@debian.org>
CVS:che Ben Gertzfield <che@debian.org>
- Packaging and Releases

CVS:bod Brendan O'Dea <bod@debian.org>
- Perl Bindings

Past Contributures:

Brian White <bcwhite@verisim.com> - Project originator
Tom Lees <tom@lpsg.demon.co.uk> - DPKG documentation and ideas
Behan Webster <behanw@verisim.com> - Original GUI design
Scott Ellis <storm@gate.net> - Original packaging and beta releases
Branden Branden Robinson <branden@purdue.edu> - Man Page Documentation
Branden Robinson <branden@purdue.edu> - Man Page Documentation
Manoj Srivastava <srivasta@datasync.com> - 1st Generation FTP method and
dselect setup script
Adam Heath <doogie@debian.org> - 2nd Generation FTP method author
Ben Collins <bcollins@debian.org> - Initial RSH method
Many other bug reports through the Debian Bug system

NOTE: The ChangeLog generator will parse for names and email addresses. The


+ 8
- 7
Makefile View File

@@ -6,17 +6,17 @@ ifndef NOISY
.SILENT:
endif

.PHONY: default
default: startup all

.PHONY: headers library clean veryclean all binary program doc
all headers library clean veryclean binary program doc:
all headers library clean veryclean binary program doc dirs:
$(MAKE) -C apt-pkg $@
$(MAKE) -C apt-inst $@
$(MAKE) -C methods $@
# $(MAKE) -C methods/ftp $@
$(MAKE) -C cmdline $@
$(MAKE) -C ftparchive $@
$(MAKE) -C dselect $@
ifdef GUI
$(MAKE) -C deity $@
$(MAKE) -C gui $@
endif
$(MAKE) -C doc $@

# Some very common aliases
@@ -25,8 +25,9 @@ maintainer-clean dist-clean distclean pristine sanity: veryclean

# The startup target builds the necessary configure scripts. It should
# be used after a CVS checkout.
CONVERTED=environment.mak include/config.h makefile
CONVERTED=environment.mak include/config.h include/apti18n.h makefile
include buildlib/configure.mak
$(BUILDDIR)/include/config.h: buildlib/config.h.in
$(BUILDDIR)/include/apti18n.h: buildlib/apti18n.h.in
$(BUILDDIR)/environment.mak: buildlib/environment.mak.in
$(BUILDDIR)/makefile: buildlib/makefile.in

+ 154
- 0
apt-inst/contrib/arfile.cc View File

@@ -0,0 +1,154 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
// $Id: arfile.cc,v 1.2 2001/02/20 07:03:16 jgg Exp $
/* ######################################################################

AR File - Handle an 'AR' archive
AR Archives have plain text headers at the start of each file
section. The headers are aligned on a 2 byte boundry.
Information about the structure of AR files can be found in ar(5)
on a BSD system, or in the binutils source.

##################################################################### */
/*}}}*/
// Include Files /*{{{*/
#ifdef __GNUG__
#pragma implementation "apt-pkg/arfile.h"
#endif
#include <apt-pkg/arfile.h>
#include <apt-pkg/strutl.h>
#include <apt-pkg/error.h>

#include <stdlib.h>
/*}}}*/

struct ARArchive::MemberHeader
{
char Name[16];
char MTime[12];
char UID[6];
char GID[6];
char Mode[8];
char Size[10];
char Magic[2];
};

// ARArchive::ARArchive - Constructor /*{{{*/
// ---------------------------------------------------------------------
/* */
ARArchive::ARArchive(FileFd &File) : List(0), File(File)
{
LoadHeaders();
}
/*}}}*/
// ARArchive::~ARArchive - Destructor /*{{{*/
// ---------------------------------------------------------------------
/* */
ARArchive::~ARArchive()
{
while (List != 0)
{
Member *Tmp = List;
List = List->Next;
delete Tmp;
}
}
/*}}}*/
// ARArchive::LoadHeaders - Load the headers from each file /*{{{*/
// ---------------------------------------------------------------------
/* AR files are structured with a 8 byte magic string followed by a 60
byte plain text header then the file data, another header, data, etc */
bool ARArchive::LoadHeaders()
{
signed long Left = File.Size();
// Check the magic byte
char Magic[8];
if (File.Read(Magic,sizeof(Magic)) == false)
return false;
if (memcmp(Magic,"!<arch>\012",sizeof(Magic)) != 0)
return _error->Error("Invalid archive signature");
Left -= sizeof(Magic);
// Read the member list
while (Left > 0)
{
MemberHeader Head;
if (File.Read(&Head,sizeof(Head)) == false)
return _error->Error("Error reading archive member header");
Left -= sizeof(Head);

// Convert all of the integer members
Member *Memb = new Member();
if (StrToNum(Head.MTime,Memb->MTime,sizeof(Head.MTime)) == false ||
StrToNum(Head.UID,Memb->UID,sizeof(Head.UID)) == false ||
StrToNum(Head.GID,Memb->GID,sizeof(Head.GID)) == false ||
StrToNum(Head.Mode,Memb->Mode,sizeof(Head.Mode),8) == false ||
StrToNum(Head.Size,Memb->Size,sizeof(Head.Size)) == false)
{
delete Memb;
return _error->Error("Invalid archive member header");
}
// Check for an extra long name string
if (memcmp(Head.Name,"#1/",3) == 0)
{
char S[300];
unsigned long Len;
if (StrToNum(Head.Name+3,Len,sizeof(Head.Size)-3) == false ||
Len >= strlen(S))
{
delete Memb;
return _error->Error("Invalid archive member header");
}
if (File.Read(S,Len) == false)
return false;
S[Len] = 0;
Memb->Name = S;
Memb->Size -= Len;
Left -= Len;
}
else
{
unsigned int I = sizeof(Head.Name) - 1;
for (; Head.Name[I] == ' '; I--);
Memb->Name = string(Head.Name,0,I+1);
}

// Account for the AR header alignment
unsigned Skip = Memb->Size % 2;
// Add it to the list
Memb->Next = List;
List = Memb;
Memb->Start = File.Tell();
if (File.Skip(Memb->Size + Skip) == false)
return false;
if (Left < (signed)(Memb->Size + Skip))
return _error->Error("Archive is too short");
Left -= Memb->Size + Skip;
}
if (Left != 0)
return _error->Error("Failed to read the archive headers");
return true;
}
/*}}}*/
// ARArchive::FindMember - Find a name in the member list /*{{{*/
// ---------------------------------------------------------------------
/* Find a member with the given name */
const ARArchive::Member *ARArchive::FindMember(const char *Name) const
{
const Member *Res = List;
while (Res != 0)
{
if (Res->Name == Name)
return Res;
Res = Res->Next;
}
return 0;
}
/*}}}*/

+ 68
- 0
apt-inst/contrib/arfile.h View File

@@ -0,0 +1,68 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
// $Id: arfile.h,v 1.2 2001/02/20 07:03:16 jgg Exp $
/* ######################################################################

AR File - Handle an 'AR' archive
This is a reader for the usual 4.4 BSD AR format. It allows raw
stream access to a single member at a time. Basically all this class
provides is header parsing and verification. It is up to the client
to correctly make use of the stream start/stop points.
##################################################################### */
/*}}}*/
#ifndef PKGLIB_ARFILE_H
#define PKGLIB_ARFILE_H

#ifdef __GNUG__
#pragma interface "apt-pkg/arfile.h"
#endif

#include <string>
#include <apt-pkg/fileutl.h>

class ARArchive
{
struct MemberHeader;
public:
struct Member;
protected:

// Linked list of members
Member *List;
bool LoadHeaders();

public:
// The stream file
FileFd &File;

// Locate a member by name
const Member *FindMember(const char *Name) const;
ARArchive(FileFd &File);
~ARArchive();
};

// A member of the archive
struct ARArchive::Member
{
// Fields from the header
string Name;
unsigned long MTime;
unsigned long UID;
unsigned long GID;
unsigned long Mode;
unsigned long Size;
// Location of the data.
unsigned long Start;
Member *Next;
Member() : Start(0), Next(0) {};
};

#endif

+ 342
- 0
apt-inst/contrib/extracttar.cc View File

@@ -0,0 +1,342 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
// $Id: extracttar.cc,v 1.2 2001/02/20 07:03:17 jgg Exp $
/* ######################################################################

Extract a Tar - Tar Extractor

Some performance measurements showed that zlib performed quite poorly
in comparision to a forked gzip process. This tar extractor makes use
of the fact that dup'd file descriptors have the same seek pointer
and that gzip will not read past the end of a compressed stream,
even if there is more data. We use the dup property to track extraction
progress and the gzip feature to just feed gzip a fd in the middle
of an AR file.
##################################################################### */
/*}}}*/
// Include Files /*{{{*/
#ifdef __GNUG__
#pragma implementation "apt-pkg/extracttar.h"
#endif
#include <apt-pkg/extracttar.h>

#include <apt-pkg/error.h>
#include <apt-pkg/strutl.h>
#include <apt-pkg/configuration.h>
#include <system.h>

#include <stdlib.h>
#include <unistd.h>
#include <signal.h>
#include <fcntl.h>
/*}}}*/

// The on disk header for a tar file.
struct ExtractTar::TarHeader
{
char Name[100];
char Mode[8];
char UserID[8];
char GroupID[8];
char Size[12];
char MTime[12];
char Checksum[8];
char LinkFlag;
char LinkName[100];
char MagicNumber[8];
char UserName[32];
char GroupName[32];
char Major[8];
char Minor[8];
};
// ExtractTar::ExtractTar - Constructor /*{{{*/
// ---------------------------------------------------------------------
/* */
ExtractTar::ExtractTar(FileFd &Fd,unsigned long Max) : File(Fd),
MaxInSize(Max)

{
GZPid = -1;
InFd = -1;
Eof = false;
}
/*}}}*/
// ExtractTar::ExtractTar - Destructor /*{{{*/
// ---------------------------------------------------------------------
/* */
ExtractTar::~ExtractTar()
{
Done(false);
}
/*}}}*/
// ExtractTar::Done - Reap the gzip sub process /*{{{*/
// ---------------------------------------------------------------------
/* If the force flag is given then error messages are suppressed - this
means we hit the end of the tar file but there was still gzip data. */
bool ExtractTar::Done(bool Force)
{
InFd.Close();
if (GZPid <= 0)
return true;

/* If there is a pending error then we are cleaning up gzip and are
not interested in it's failures */
if (_error->PendingError() == true)
Force = true;
// Make sure we clean it up!
kill(GZPid,SIGINT);
if (ExecWait(GZPid,_config->Find("dir::bin::gzip","/bin/gzip").c_str(),
Force) == false)
{
GZPid = -1;
return Force;
}
GZPid = -1;
return true;
}
/*}}}*/
// ExtractTar::StartGzip - Startup gzip /*{{{*/
// ---------------------------------------------------------------------
/* This creates a gzip sub process that has its input as the file itself.
If this tar file is embedded into something like an ar file then
gzip will efficiently ignore the extra bits. */
bool ExtractTar::StartGzip()
{
int Pipes[2];
if (pipe(Pipes) != 0)
return _error->Errno("pipe","Failed to create pipes");
// Fork off the process
GZPid = ExecFork();

// Spawn the subprocess
if (GZPid == 0)
{
// Setup the FDs
dup2(Pipes[1],STDOUT_FILENO);
dup2(File.Fd(),STDIN_FILENO);
int Fd = open("/dev/null",O_RDWR);
if (Fd == -1)
_exit(101);
dup2(Fd,STDERR_FILENO);
close(Fd);
SetCloseExec(STDOUT_FILENO,false);
SetCloseExec(STDIN_FILENO,false);
SetCloseExec(STDERR_FILENO,false);
const char *Args[3];
Args[0] = _config->Find("dir::bin::gzip","/bin/gzip").c_str();
Args[1] = "-d";
Args[2] = 0;
execv(Args[0],(char **)Args);
cerr << "Failed to exec gzip " << Args[0] << endl;
_exit(100);
}

// Fix up our FDs
InFd.Fd(Pipes[0]);
close(Pipes[1]);
return true;
}
/*}}}*/
// ExtractTar::Go - Perform extraction /*{{{*/
// ---------------------------------------------------------------------
/* This reads each 512 byte block from the archive and extracts the header
information into the Item structure. Then it resolves the UID/GID and
invokes the correct processing function. */
bool ExtractTar::Go(pkgDirStream &Stream)
{
if (StartGzip() == false)
return false;
// Loop over all blocks
string LastLongLink;
string LastLongName;
while (1)
{
bool BadRecord = false;
unsigned char Block[512];
if (InFd.Read(Block,sizeof(Block),true) == false)
return false;
if (InFd.Eof() == true)
break;

// Get the checksum
TarHeader *Tar = (TarHeader *)Block;
unsigned long CheckSum;
if (StrToNum(Tar->Checksum,CheckSum,sizeof(Tar->Checksum),8) == false)
return _error->Error("Corrupted archive");
/* Compute the checksum field. The actual checksum is blanked out
with spaces so it is not included in the computation */
unsigned long NewSum = 0;
memset(Tar->Checksum,' ',sizeof(Tar->Checksum));
for (int I = 0; I != sizeof(Block); I++)
NewSum += Block[I];
/* Check for a block of nulls - in this case we kill gzip, GNU tar
does this.. */
if (NewSum == ' '*sizeof(Tar->Checksum))
return Done(true);
if (NewSum != CheckSum)
return _error->Error("Tar Checksum failed, archive corrupted");
// Decode all of the fields
pkgDirStream::Item Itm;
unsigned long UID;
unsigned long GID;
if (StrToNum(Tar->Mode,Itm.Mode,sizeof(Tar->Mode),8) == false ||
StrToNum(Tar->UserID,UID,sizeof(Tar->UserID),8) == false ||
StrToNum(Tar->GroupID,GID,sizeof(Tar->GroupID),8) == false ||
StrToNum(Tar->Size,Itm.Size,sizeof(Tar->Size),8) == false ||
StrToNum(Tar->MTime,Itm.MTime,sizeof(Tar->MTime),8) == false ||
StrToNum(Tar->Major,Itm.Major,sizeof(Tar->Major),8) == false ||
StrToNum(Tar->Minor,Itm.Minor,sizeof(Tar->Minor),8) == false)
return _error->Error("Corrupted archive");
// Grab the filename
if (LastLongName.empty() == false)
Itm.Name = (char *)LastLongName.c_str();
else
{
Tar->Name[sizeof(Tar->Name)] = 0;
Itm.Name = Tar->Name;
}
if (Itm.Name[0] == '.' && Itm.Name[1] == '/' && Itm.Name[2] != 0)
Itm.Name += 2;
// Grab the link target
Tar->Name[sizeof(Tar->LinkName)] = 0;
Itm.LinkTarget = Tar->LinkName;

if (LastLongLink.empty() == false)
Itm.LinkTarget = (char *)LastLongLink.c_str();
// Convert the type over
switch (Tar->LinkFlag)
{
case NormalFile0:
case NormalFile:
Itm.Type = pkgDirStream::Item::File;
break;
case HardLink:
Itm.Type = pkgDirStream::Item::HardLink;
break;
case SymbolicLink:
Itm.Type = pkgDirStream::Item::SymbolicLink;
break;
case CharacterDevice:
Itm.Type = pkgDirStream::Item::CharDevice;
break;
case BlockDevice:
Itm.Type = pkgDirStream::Item::BlockDevice;
break;
case Directory:
Itm.Type = pkgDirStream::Item::Directory;
break;
case FIFO:
Itm.Type = pkgDirStream::Item::FIFO;
break;

case GNU_LongLink:
{
unsigned long Length = Itm.Size;
unsigned char Block[512];
while (Length > 0)
{
if (InFd.Read(Block,sizeof(Block),true) == false)
return false;
if (Length <= sizeof(Block))
{
LastLongLink.append(Block,Block+sizeof(Block));
break;
}
LastLongLink.append(Block,Block+sizeof(Block));
Length -= sizeof(Block);
}
continue;
}
case GNU_LongName:
{
unsigned long Length = Itm.Size;
unsigned char Block[512];
while (Length > 0)
{
if (InFd.Read(Block,sizeof(Block),true) == false)
return false;
if (Length < sizeof(Block))
{
LastLongName.append(Block,Block+sizeof(Block));
break;
}
LastLongName.append(Block,Block+sizeof(Block));
Length -= sizeof(Block);
}
continue;
}
default:
BadRecord = true;
_error->Warning("Unkown TAR header type %u, member %s",(unsigned)Tar->LinkFlag,Tar->Name);
break;
}
int Fd = -1;
if (BadRecord == false)
if (Stream.DoItem(Itm,Fd) == false)
return false;
// Copy the file over the FD
unsigned long Size = Itm.Size;
while (Size != 0)
{
unsigned char Junk[32*1024];
unsigned long Read = MIN(Size,sizeof(Junk));
if (InFd.Read(Junk,((Read+511)/512)*512) == false)
return false;
if (BadRecord == false)
{
if (Fd > 0)
{
if (write(Fd,Junk,Read) != (signed)Read)
return Stream.Fail(Itm,Fd);
}
else
{
/* An Fd of -2 means to send to a special processing
function */
if (Fd == -2)
if (Stream.Process(Itm,Junk,Read,Itm.Size - Size) == false)
return Stream.Fail(Itm,Fd);
}
}
Size -= Read;
}
// And finish up
if (Itm.Size != 0 && BadRecord == false)
if (Stream.FinishedFile(Itm,Fd) == false)
return false;
LastLongName.erase();
LastLongLink.erase();
}
return Done(false);
}
/*}}}*/

+ 54
- 0
apt-inst/contrib/extracttar.h View File

@@ -0,0 +1,54 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
// $Id: extracttar.h,v 1.2 2001/02/20 07:03:17 jgg Exp $
/* ######################################################################

Extract a Tar - Tar Extractor
The tar extractor takes an ordinary gzip compressed tar stream from
the given file and explodes it, passing the individual items to the
given Directory Stream for processing.
##################################################################### */
/*}}}*/
#ifndef PKGLIB_EXTRACTTAR_H
#define PKGLIB_EXTRACTTAR_H

#ifdef __GNUG__
#pragma interface "apt-pkg/extracttar.h"
#endif

#include <apt-pkg/fileutl.h>
#include <apt-pkg/dirstream.h>

class ExtractTar
{
protected:
struct TarHeader;
// The varios types items can be
enum ItemType {NormalFile0 = '\0',NormalFile = '0',HardLink = '1',
SymbolicLink = '2',CharacterDevice = '3',
BlockDevice = '4',Directory = '5',FIFO = '6',
GNU_LongLink = 'K',GNU_LongName = 'L'};

FileFd &File;
unsigned long MaxInSize;
int GZPid;
FileFd InFd;
bool Eof;
// Fork and reap gzip
bool StartGzip();
bool Done(bool Force);
public:

bool Go(pkgDirStream &Stream);
ExtractTar(FileFd &Fd,unsigned long Max);
virtual ~ExtractTar();
};

#endif

+ 30
- 0
apt-inst/database.cc View File

@@ -0,0 +1,30 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
// $Id: database.cc,v 1.2 2001/02/20 07:03:16 jgg Exp $
/* ######################################################################

Data Base Abstraction
##################################################################### */
/*}}}*/
// Include Files /*{{{*/
#ifdef __GNUG__
#pragma implementation "apt-pkg/database.h"
#endif

#include <apt-pkg/database.h>
/*}}}*/

// DataBase::GetMetaTmp - Get the temp dir /*{{{*/
// ---------------------------------------------------------------------
/* This re-initializes the meta temporary directory if it hasn't yet
been inited for this cycle. The flag is the emptyness of MetaDir */
bool pkgDataBase::GetMetaTmp(string &Dir)
{
if (MetaDir.empty() == true)
if (InitMetaTmp(MetaDir) == false)
return false;
Dir = MetaDir;
return true;
}
/*}}}*/

+ 56
- 0
apt-inst/database.h View File

@@ -0,0 +1,56 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
// $Id: database.h,v 1.2 2001/02/20 07:03:16 jgg Exp $
/* ######################################################################

Data Base Abstraction
This class provides a simple interface to an abstract notion of a
database directory for storing state information about the system.

The 'Meta' information for a package is the control information and
setup scripts stored inside the archive. GetMetaTmp returns the name of
a directory that is used to store named files containing the control
information.
The File Listing is the database of installed files. It is loaded
into the memory/persistent cache structure by the ReadFileList method.
##################################################################### */
/*}}}*/
#ifndef PKGLIB_DATABASE_H
#define PKGLIB_DATABASE_H

#ifdef __GNUG__
#pragma interface "apt-pkg/database.h"
#endif

#include <apt-pkg/filelist.h>
#include <apt-pkg/pkgcachegen.h>

class pkgDataBase
{
protected:
pkgCacheGenerator *Cache;
pkgFLCache *FList;
string MetaDir;
virtual bool InitMetaTmp(string &Dir) = 0;
public:

// Some manipulators for the cache and generator
inline pkgCache &GetCache() {return Cache->GetCache();};
inline pkgFLCache &GetFLCache() {return *FList;};
inline pkgCacheGenerator &GetGenerator() {return *Cache;};
bool GetMetaTmp(string &Dir);
virtual bool ReadyFileList(OpProgress &Progress) = 0;
virtual bool ReadyPkgCache(OpProgress &Progress) = 0;
virtual bool LoadChanges() = 0;

pkgDataBase() : Cache(0), FList(0) {};
virtual ~pkgDataBase() {delete Cache; delete FList;};
};

#endif

+ 262
- 0
apt-inst/deb/debfile.cc View File

@@ -0,0 +1,262 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
// $Id: debfile.cc,v 1.2 2001/02/20 07:03:17 jgg Exp $
/* ######################################################################

Debian Archive File (.deb)
.DEB archives are AR files containing two tars and an empty marker
member called 'debian-binary'. The two tars contain the meta data and
the actual archive contents. Thus this class is a very simple wrapper
around ar/tar to simply extract the right tar files.
It also uses the deb package list parser to parse the control file
into the cache.
##################################################################### */
/*}}}*/
// Include Files /*{{{*/
#ifdef __GNUG__
#pragma implementation "apt-pkg/debfile.h"
#endif

#include <apt-pkg/debfile.h>
#include <apt-pkg/extracttar.h>
#include <apt-pkg/error.h>
#include <apt-pkg/deblistparser.h>

#include <sys/stat.h>
#include <unistd.h>
/*}}}*/

// DebFile::debDebFile - Constructor /*{{{*/
// ---------------------------------------------------------------------
/* Open the AR file and check for consistency */
debDebFile::debDebFile(FileFd &File) : File(File), AR(File)
{
if (_error->PendingError() == true)
return;
// Check the members for validity
if (CheckMember("debian-binary") == false ||
CheckMember("control.tar.gz") == false ||
CheckMember("data.tar.gz") == false)
return;
}
/*}}}*/
// DebFile::CheckMember - Check if a named member is in the archive /*{{{*/
// ---------------------------------------------------------------------
/* This is used to check for a correct deb and to give nicer error messages
for people playing around. */
bool debDebFile::CheckMember(const char *Name)
{
if (AR.FindMember(Name) == 0)
return _error->Error("This is not a valid DEB archive, missing '%s' member",Name);
return true;
}
/*}}}*/
// DebFile::GotoMember - Jump to a Member /*{{{*/
// ---------------------------------------------------------------------
/* Jump in the file to the start of a named member and return the information
about that member. The caller can then read from the file up to the
returned size. Note, since this relies on the file position this is
a destructive operation, it also changes the last returned Member
structure - so don't nest them! */
const ARArchive::Member *debDebFile::GotoMember(const char *Name)
{
// Get the archive member and positition the file
const ARArchive::Member *Member = AR.FindMember(Name);
if (Member == 0)
{
_error->Error("Internal Error, could not locate member %s",Name);
return 0;
}
if (File.Seek(Member->Start) == false)
return 0;
return Member;
}
/*}}}*/
// DebFile::ExtractControl - Extract Control information /*{{{*/
// ---------------------------------------------------------------------
/* Extract the control information into the Database's temporary
directory. */
bool debDebFile::ExtractControl(pkgDataBase &DB)
{
// Get the archive member and positition the file
const ARArchive::Member *Member = GotoMember("control.tar.gz");
if (Member == 0)
return false;
// Prepare Tar
ControlExtract Extract;
ExtractTar Tar(File,Member->Size);
if (_error->PendingError() == true)
return false;
// Get into the temporary directory
string Cwd = SafeGetCWD();
string Tmp;
if (DB.GetMetaTmp(Tmp) == false)
return false;
if (chdir(Tmp.c_str()) != 0)
return _error->Errno("chdir","Couldn't change to %s",Tmp.c_str());
// Do extraction
if (Tar.Go(Extract) == false)
return false;
// Switch out of the tmp directory.
if (chdir(Cwd.c_str()) != 0)
chdir("/");
return true;
}
/*}}}*/
// DebFile::ExtractArchive - Extract the archive data itself /*{{{*/
// ---------------------------------------------------------------------
/* Simple wrapper around tar.. */
bool debDebFile::ExtractArchive(pkgDirStream &Stream)
{
// Get the archive member and positition the file
const ARArchive::Member *Member = AR.FindMember("data.tar.gz");
if (Member == 0)
return _error->Error("Internal Error, could not locate member");
if (File.Seek(Member->Start) == false)
return false;
// Prepare Tar
ExtractTar Tar(File,Member->Size);
if (_error->PendingError() == true)
return false;
return Tar.Go(Stream);
}
/*}}}*/
// DebFile::MergeControl - Merge the control information /*{{{*/
// ---------------------------------------------------------------------
/* This reads the extracted control file into the cache and returns the
version that was parsed. All this really does is select the correct
parser and correct file to parse. */
pkgCache::VerIterator debDebFile::MergeControl(pkgDataBase &DB)
{
// Open the control file
string Tmp;
if (DB.GetMetaTmp(Tmp) == false)
return pkgCache::VerIterator(DB.GetCache());
FileFd Fd(Tmp + "control",FileFd::ReadOnly);
if (_error->PendingError() == true)
return pkgCache::VerIterator(DB.GetCache());
// Parse it
debListParser Parse(&Fd);
pkgCache::VerIterator Ver(DB.GetCache());
if (DB.GetGenerator().MergeList(Parse,&Ver) == false)
return pkgCache::VerIterator(DB.GetCache());
if (Ver.end() == true)
_error->Error("Failed to locate a valid control file");
return Ver;
}
/*}}}*/

// DebFile::ControlExtract::DoItem - Control Tar Extraction /*{{{*/
// ---------------------------------------------------------------------
/* This directory stream handler for the control tar handles extracting
it into the temporary meta directory. It only extracts files, it does
not create directories, links or anything else. */
bool debDebFile::ControlExtract::DoItem(Item &Itm,int &Fd)
{
if (Itm.Type != Item::File)
return true;
/* Cleanse the file name, prevent people from trying to unpack into
absolute paths, .., etc */
for (char *I = Itm.Name; *I != 0; I++)
if (*I == '/')
*I = '_';

/* Force the ownership to be root and ensure correct permissions,
go-w, the rest are left untouched */
Itm.UID = 0;
Itm.GID = 0;
Itm.Mode &= ~(S_IWGRP | S_IWOTH);
return pkgDirStream::DoItem(Itm,Fd);
}
/*}}}*/

// MemControlExtract::DoItem - Check if it is the control file /*{{{*/
// ---------------------------------------------------------------------
/* This sets up to extract the control block member file into a memory
block of just the right size. All other files go into the bit bucket. */
bool debDebFile::MemControlExtract::DoItem(Item &Itm,int &Fd)
{
// At the control file, allocate buffer memory.
if (Member == Itm.Name)
{
delete [] Control;
Control = new char[Itm.Size+2];
IsControl = true;
Fd = -2; // Signal to pass to Process
Length = Itm.Size;
}
else
IsControl = false;
return true;
}
/*}}}*/
// MemControlExtract::Process - Process extracting the control file /*{{{*/
// ---------------------------------------------------------------------
/* Just memcopy the block from the tar extractor and put it in the right
place in the pre-allocated memory block. */
bool debDebFile::MemControlExtract::Process(Item &Itm,const unsigned char *Data,
unsigned long Size,unsigned long Pos)
{
memcpy(Control + Pos, Data,Size);
return true;
}
/*}}}*/
// MemControlExtract::Read - Read the control information from the deb /*{{{*/
// ---------------------------------------------------------------------
/* This uses the internal tar extractor to fetch the control file, and then
it parses it into a tag section parser. */
bool debDebFile::MemControlExtract::Read(debDebFile &Deb)
{
// Get the archive member and positition the file
const ARArchive::Member *Member = Deb.GotoMember("control.tar.gz");
if (Member == 0)
return false;

// Extract it.
ExtractTar Tar(Deb.GetFile(),Member->Size);
if (Tar.Go(*this) == false)
return false;

if (Control == 0)
return true;
Control[Length] = '\n';
Control[Length+1] = '\n';
if (Section.Scan(Control,Length+2) == false)
return _error->Error("Unparsible control file");
return true;
}
/*}}}*/
// MemControlExtract::TakeControl - Parse a memory block /*{{{*/
// ---------------------------------------------------------------------
/* The given memory block is loaded into the parser and parsed as a control
record. */
bool debDebFile::MemControlExtract::TakeControl(const void *Data,unsigned long Size)
{
delete [] Control;
Control = new char[Size+2];
Length = Size;
memcpy(Control,Data,Size);
Control[Length] = '\n';
Control[Length+1] = '\n';
return Section.Scan(Control,Length+2);
}
/*}}}*/


+ 92
- 0
apt-inst/deb/debfile.h View File

@@ -0,0 +1,92 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
// $Id: debfile.h,v 1.2 2001/02/20 07:03:17 jgg Exp $
/* ######################################################################

Debian Archive File (.deb)

This Class handles all the operations performed directly on .deb
files. It makes use of the AR and TAR classes to give the necessary
external interface.
There are only two things that can be done with a raw package,
extract it's control information and extract the contents itself.

This should probably subclass an as-yet unwritten super class to
produce a generic archive mechanism.
The memory control file extractor is useful to extract a single file
into memory from the control.tar.gz
##################################################################### */
/*}}}*/
#ifndef PKGLIB_DEBFILE_H
#define PKGLIB_DEBFILE_H

#ifdef __GNUG__
#pragma interface "apt-pkg/debfile.h"
#endif

#include <apt-pkg/arfile.h>
#include <apt-pkg/database.h>
#include <apt-pkg/dirstream.h>
#include <apt-pkg/tagfile.h>

class debDebFile
{
protected:
FileFd &File;
ARArchive AR;
bool CheckMember(const char *Name);
public:

class ControlExtract;
class MemControlExtract;
bool ExtractControl(pkgDataBase &DB);
bool ExtractArchive(pkgDirStream &Stream);
pkgCache::VerIterator MergeControl(pkgDataBase &DB);
const ARArchive::Member *GotoMember(const char *Name);
inline FileFd &GetFile() {return File;};
debDebFile(FileFd &File);
};

class debDebFile::ControlExtract : public pkgDirStream
{
public:
virtual bool DoItem(Item &Itm,int &Fd);
};

class debDebFile::MemControlExtract : public pkgDirStream
{
bool IsControl;
public:
char *Control;
pkgTagSection Section;
unsigned long Length;
string Member;
// Members from DirStream
virtual bool DoItem(Item &Itm,int &Fd);
virtual bool Process(Item &Itm,const unsigned char *Data,
unsigned long Size,unsigned long Pos);

// Helpers
bool Read(debDebFile &Deb);
bool TakeControl(const void *Data,unsigned long Size);
MemControlExtract() : IsControl(false), Control(0), Length(0), Member("control") {};
MemControlExtract(string Member) : IsControl(false), Control(0), Length(0), Member(Member) {};
~MemControlExtract() {delete [] Control;};
};
/*}}}*/

#endif

+ 490
- 0
apt-inst/deb/dpkgdb.cc View File

@@ -0,0 +1,490 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
// $Id: dpkgdb.cc,v 1.2 2001/02/20 07:03:17 jgg Exp $
/* ######################################################################

DPKGv1 Database Implemenation
This class provides parsers and other implementations for the DPKGv1
database. It reads the diversion file, the list files and the status
file to build both the list of currently installed files and the
currently installed package list.
##################################################################### */
/*}}}*/
// Include Files /*{{{*/
#ifdef __GNUG__
#pragma implementation "apt-pkg/dpkgdb.h"
#endif

#include <apt-pkg/dpkgdb.h>
#include <apt-pkg/configuration.h>
#include <apt-pkg/error.h>
#include <apt-pkg/progress.h>
#include <apt-pkg/tagfile.h>
#include <apt-pkg/strutl.h>

#include <stdio.h>
#include <errno.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <fcntl.h>
#include <unistd.h>
/*}}}*/

// EraseDir - Erase A Directory /*{{{*/
// ---------------------------------------------------------------------
/* This is necessary to create a new empty sub directory. The caller should
invoke mkdir after this with the proper permissions and check for
error. Maybe stick this in fileutils */
static bool EraseDir(const char *Dir)
{
// First we try a simple RM
if (rmdir(Dir) == 0 ||
errno == ENOENT)
return true;
// A file? Easy enough..
if (errno == ENOTDIR)
{
if (unlink(Dir) != 0)
return _error->Errno("unlink","Failed to remove %s",Dir);
return true;
}
// Should not happen
if (errno != ENOTEMPTY)
return _error->Errno("rmdir","Failed to remove %s",Dir);
// Purge it using rm
int Pid = ExecFork();

// Spawn the subprocess
if (Pid == 0)
{
execlp(_config->Find("Dir::Bin::rm","/bin/rm").c_str(),
"rm","-rf","--",Dir,0);
_exit(100);
}
return ExecWait(Pid,_config->Find("dir::bin::rm","/bin/rm").c_str());
}
/*}}}*/
// DpkgDB::debDpkgDB - Constructor /*{{{*/
// ---------------------------------------------------------------------
/* */
debDpkgDB::debDpkgDB() : CacheMap(0), FileMap(0)
{
AdminDir = flNotFile(_config->Find("Dir::State::status"));
DiverInode = 0;
DiverTime = 0;
}
/*}}}*/
// DpkgDB::~debDpkgDB - Destructor /*{{{*/
// ---------------------------------------------------------------------
/* */
debDpkgDB::~debDpkgDB()
{
delete Cache;
Cache = 0;
delete CacheMap;
CacheMap = 0;
delete FList;
FList = 0;
delete FileMap;
FileMap = 0;
}
/*}}}*/
// DpkgDB::InitMetaTmp - Get the temp dir for meta information /*{{{*/
// ---------------------------------------------------------------------
/* This creats+empties the meta temporary directory /var/lib/dpkg/tmp.ci
Only one package at a time can be using the returned meta directory. */
bool debDpkgDB::InitMetaTmp(string &Dir)
{
string Tmp = AdminDir + "tmp.ci/";
if (EraseDir(Tmp.c_str()) == false)
return _error->Error("Unable to create %s",Tmp.c_str());
if (mkdir(Tmp.c_str(),0755) != 0)
return _error->Errno("mkdir","Unable to create %s",Tmp.c_str());
// Verify it is on the same filesystem as the main info directory
dev_t Dev;
struct stat St;
if (stat((AdminDir + "info").c_str(),&St) != 0)
return _error->Errno("stat","Failed to stat %sinfo",AdminDir.c_str());
Dev = St.st_dev;
if (stat(Tmp.c_str(),&St) != 0)
return _error->Errno("stat","Failed to stat %s",Tmp.c_str());
if (Dev != St.st_dev)
return _error->Error("The info and temp directories need to be on the same filesystem");
// Done
Dir = Tmp;
return true;
}
/*}}}*/
// DpkgDB::ReadyPkgCache - Prepare the cache with the current status /*{{{*/
// ---------------------------------------------------------------------
/* This reads in the status file into an empty cache. This really needs
to be somehow unified with the high level APT notion of the Database
directory, but there is no clear way on how to do that yet. */
bool debDpkgDB::ReadyPkgCache(OpProgress &Progress)
{
if (Cache != 0)
{
Progress.OverallProgress(1,1,1,"Reading Package Lists");
return true;
}
if (CacheMap != 0)
{
delete CacheMap;
CacheMap = 0;
}
if (pkgMakeOnlyStatusCache(Progress,&CacheMap) == false)
return false;
Cache->DropProgress();
return true;
}
/*}}}*/
// DpkgDB::ReadFList - Read the File Listings in /*{{{*/
// ---------------------------------------------------------------------
/* This reads the file listing in from the state directory. This is a
performance critical routine, as it needs to parse about 50k lines of
text spread over a hundred or more files. For an initial cold start
most of the time is spent in reading file inodes and so on, not
actually parsing. */
bool debDpkgDB::ReadFList(OpProgress &Progress)
{
// Count the number of packages we need to read information for
unsigned long Total = 0;
pkgCache &Cache = this->Cache->GetCache();
for (pkgCache::PkgIterator I = Cache.PkgBegin(); I.end() == false; I++)
{
// Only not installed packages have no files.
if (I->CurrentState == pkgCache::State::NotInstalled)
continue;
Total++;
}

/* Switch into the admin dir, this prevents useless lookups for the
path components */
string Cwd = SafeGetCWD();
if (chdir((AdminDir + "info/").c_str()) != 0)
return _error->Errno("chdir","Failed to change to the admin dir %sinfo",AdminDir.c_str());
// Allocate a buffer. Anything larger than this buffer will be mmaped
unsigned long BufSize = 32*1024;
char *Buffer = new char[BufSize];

// Begin Loading them
unsigned long Count = 0;
char Name[300];
for (pkgCache::PkgIterator I = Cache.PkgBegin(); I.end() == false; I++)
{
/* Only not installed packages have no files. ConfFile packages have
file lists but we don't want to read them in */
if (I->CurrentState == pkgCache::State::NotInstalled ||
I->CurrentState == pkgCache::State::ConfigFiles)
continue;

// Fetch a package handle to associate with the file
pkgFLCache::PkgIterator FlPkg = FList->GetPkg(I.Name(),0,true);
if (FlPkg.end() == true)
{
_error->Error("Internal Error getting a Package Name");
break;
}
Progress.OverallProgress(Count,Total,1,"Reading File Listing");
// Open the list file
snprintf(Name,sizeof(Name),"%s.list",I.Name());
int Fd = open(Name,O_RDONLY);
/* Okay this is very strange and bad.. Best thing is to bail and
instruct the user to look into it. */
struct stat Stat;
if (Fd == -1 || fstat(Fd,&Stat) != 0)
{
_error->Errno("open","Failed to open the list file '%sinfo/%s'. If you "
"cannot restore this file then make it empty "
"and immediately re-install the same version of the package!",
AdminDir.c_str(),Name);
break;
}
// Set File to be a memory buffer containing the whole file
char *File;
if ((unsigned)Stat.st_size < BufSize)
{
if (read(Fd,Buffer,Stat.st_size) != Stat.st_size)
{
_error->Errno("read","Failed reading the list file %sinfo/%s",
AdminDir.c_str(),Name);
close(Fd);
break;
}
File = Buffer;
}
else
{
// Use mmap
File = (char *)mmap(0,Stat.st_size,PROT_READ,MAP_PRIVATE,Fd,0);
if (File == (char *)(-1))
{
_error->Errno("mmap","Failed reading the list file %sinfo/%s",
AdminDir.c_str(),Name);
close(Fd);
break;
}
}
// Parse it
const char *Start = File;
const char *End = File;
const char *Finish = File + Stat.st_size;
for (; End < Finish; End++)
{
// Not an end of line
if (*End != '\n' && End + 1 < Finish)
continue;

// Skip blank lines
if (End - Start > 1)
{
pkgFLCache::NodeIterator Node = FList->GetNode(Start,End,
FlPkg.Offset(),true,false);
if (Node.end() == true)
{
_error->Error("Internal Error getting a Node");
break;
}
}
// Skip past the end of line
for (; *End == '\n' && End < Finish; End++);
Start = End;
}
close(Fd);
if ((unsigned)Stat.st_size >= BufSize)
munmap((caddr_t)File,Stat.st_size);
// Failed
if (End < Finish)
break;
Count++;
}

delete [] Buffer;
if (chdir(Cwd.c_str()) != 0)
chdir("/");
return !_error->PendingError();
}
/*}}}*/
// DpkgDB::ReadDiversions - Load the diversions file /*{{{*/
// ---------------------------------------------------------------------
/* Read the diversion file in from disk. This is usually invoked by
LoadChanges before performing an operation that uses the FLCache. */
bool debDpkgDB::ReadDiversions()
{
struct stat Stat;
if (stat((AdminDir + "diversions").c_str(),&Stat) != 0)
return true;
if (_error->PendingError() == true)
return false;
FILE *Fd = fopen((AdminDir + "diversions").c_str(),"r");
if (Fd == 0)
return _error->Errno("fopen","Failed to open the diversions file %sdiversions",AdminDir.c_str());
FList->BeginDiverLoad();
while (1)
{
char From[300];
char To[300];
char Package[100];
// Read the three lines in
if (fgets(From,sizeof(From),Fd) == 0)
break;
if (fgets(To,sizeof(To),Fd) == 0 ||
fgets(Package,sizeof(Package),Fd) == 0)
{
_error->Error("The diversion file is corrupted");
break;
}
// Strip the \ns
unsigned long Len = strlen(From);
if (Len < 2 || From[Len-1] != '\n')
_error->Error("Invalid line in the diversion file: %s",From);
else
From[Len-1] = 0;
Len = strlen(To);
if (Len < 2 || To[Len-1] != '\n')
_error->Error("Invalid line in the diversion file: %s",To);
else
To[Len-1] = 0;
Len = strlen(Package);
if (Len < 2 || Package[Len-1] != '\n')
_error->Error("Invalid line in the diversion file: %s",Package);
else
Package[Len-1] = 0;
// Make sure the lines were parsed OK
if (_error->PendingError() == true)
break;
// Fetch a package
if (strcmp(Package,":") == 0)
Package[0] = 0;
pkgFLCache::PkgIterator FlPkg = FList->GetPkg(Package,0,true);
if (FlPkg.end() == true)
{
_error->Error("Internal Error getting a Package Name");
break;
}
// Install the diversion
if (FList->AddDiversion(FlPkg,From,To) == false)
{
_error->Error("Internal Error adding a diversion");
break;
}
}
if (_error->PendingError() == false)
FList->FinishDiverLoad();
DiverInode = Stat.st_ino;
DiverTime = Stat.st_mtime;
fclose(Fd);
return !_error->PendingError();
}
/*}}}*/
// DpkgDB::ReadFileList - Read the file listing /*{{{*/
// ---------------------------------------------------------------------
/* Read in the file listing. The file listing is created from three
sources, *.list, Conffile sections and the Diversion table. */
bool debDpkgDB::ReadyFileList(OpProgress &Progress)
{
if (Cache == 0)
return _error->Error("The pkg cache must be initialize first");
if (FList != 0)
{
Progress.OverallProgress(1,1,1,"Reading File List");
return true;
}
// Create the cache and read in the file listing
FileMap = new DynamicMMap(MMap::Public);
FList = new pkgFLCache(*FileMap);
if (_error->PendingError() == true ||
ReadFList(Progress) == false ||
ReadConfFiles() == false ||
ReadDiversions() == false)
{
delete FList;
delete FileMap;
FileMap = 0;
FList = 0;
return false;
}
cout << "Node: " << FList->HeaderP->NodeCount << ',' << FList->HeaderP->UniqNodes << endl;
cout << "Dir: " << FList->HeaderP->DirCount << endl;
cout << "Package: " << FList->HeaderP->PackageCount << endl;
cout << "HashSize: " << FList->HeaderP->HashSize << endl;
cout << "Size: " << FileMap->Size() << endl;
cout << endl;

return true;
}
/*}}}*/
// DpkgDB::ReadConfFiles - Read the conf file sections from the s-file /*{{{*/
// ---------------------------------------------------------------------
/* Reading the conf files is done by reparsing the status file. This is
actually rather fast so it is no big deal. */
bool debDpkgDB::ReadConfFiles()
{
FileFd File(_config->FindFile("Dir::State::status"),FileFd::ReadOnly);
pkgTagFile Tags(&File);
if (_error->PendingError() == true)
return false;
pkgTagSection Section;
while (1)
{
// Skip to the next section
unsigned long Offset = Tags.Offset();
if (Tags.Step(Section) == false)
break;
// Parse the line
const char *Start;
const char *Stop;
if (Section.Find("Conffiles",Start,Stop) == false)
continue;

const char *PkgStart;
const char *PkgEnd;
if (Section.Find("Package",PkgStart,PkgEnd) == false)
return _error->Error("Failed to find a Package: Header, offset %lu",Offset);

// Snag a package record for it
pkgFLCache::PkgIterator FlPkg = FList->GetPkg(PkgStart,PkgEnd,true);
if (FlPkg.end() == true)
return _error->Error("Internal Error getting a Package Name");

// Parse the conf file lines
while (1)
{
for (; isspace(*Start) != 0 && Start < Stop; Start++);
if (Start == Stop)
break;

// Split it into words
const char *End = Start;
for (; isspace(*End) == 0 && End < Stop; End++);
const char *StartMd5 = End;
for (; isspace(*StartMd5) != 0 && StartMd5 < Stop; StartMd5++);
const char *EndMd5 = StartMd5;
for (; isspace(*EndMd5) == 0 && EndMd5 < Stop; EndMd5++);
if (StartMd5 == EndMd5 || Start == End)
return _error->Error("Bad ConfFile section in the status file. Offset %lu",Offset);
// Insert a new entry
unsigned char MD5[16];
if (Hex2Num(StartMd5,EndMd5,MD5,16) == false)
return _error->Error("Error parsing MD5. Offset %lu",Offset);
if (FList->AddConfFile(Start,End,FlPkg,MD5) == false)
return false;
Start = EndMd5;
}
}
return true;
}
/*}}}*/
// DpkgDB::LoadChanges - Read in any changed state files /*{{{*/
// ---------------------------------------------------------------------
/* The only file in the dpkg system that can change while packages are
unpacking is the diversions file. */
bool debDpkgDB::LoadChanges()
{
struct stat Stat;
if (stat((AdminDir + "diversions").c_str(),&Stat) != 0)
return true;
if (DiverInode == Stat.st_ino && DiverTime == Stat.st_mtime)
return true;
return ReadDiversions();
}
/*}}}*/

+ 53
- 0
apt-inst/deb/dpkgdb.h View File

@@ -0,0 +1,53 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
// $Id: dpkgdb.h,v 1.2 2001/02/20 07:03:17 jgg Exp $
/* ######################################################################

DPKGv1 Data Base Implemenation
The DPKGv1 database is typically stored in /var/lib/dpkg/. For
DPKGv1 the 'meta' information is the contents of the .deb control.tar.gz
member prepended by the package name. The meta information is unpacked
in its temporary directory and then migrated into the main list dir
at a checkpoint.
Journaling is providing by syncronized file writes to the updates sub
directory.

##################################################################### */
/*}}}*/
#ifndef PKGLIB_DPKGDB_H
#define PKGLIB_DPKGDB_H

#ifdef __GNUG__
#pragma interface "apt-pkg/dpkgdb.h"
#endif

#include <apt-pkg/database.h>

class debDpkgDB : public pkgDataBase
{
protected:
string AdminDir;
DynamicMMap *CacheMap;
DynamicMMap *FileMap;
unsigned long DiverInode;
signed long DiverTime;
virtual bool InitMetaTmp(string &Dir);
bool ReadFList(OpProgress &Progress);
bool ReadDiversions();
bool ReadConfFiles();
public:

virtual bool ReadyFileList(OpProgress &Progress);
virtual bool ReadyPkgCache(OpProgress &Progress);
virtual bool LoadChanges();
debDpkgDB();
virtual ~debDpkgDB();
};

#endif

+ 103
- 0
apt-inst/dirstream.cc View File

@@ -0,0 +1,103 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
// $Id: dirstream.cc,v 1.2 2001/02/20 07:03:16 jgg Exp $
/* ######################################################################

Directory Stream
This class provides a simple basic extractor that can be used for
a number of purposes.
##################################################################### */
/*}}}*/
// Include Files /*{{{*/
#ifdef __GNUG__
#pragma implementation "apt-pkg/dirstream.h"
#endif

#include <apt-pkg/dirstream.h>
#include <apt-pkg/error.h>

#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <errno.h>
#include <utime.h>
#include <unistd.h>
/*}}}*/

// DirStream::DoItem - Process an item /*{{{*/
// ---------------------------------------------------------------------
/* This is a very simple extractor, it does not deal with things like
overwriting directories with files and so on. */
bool pkgDirStream::DoItem(Item &Itm,int &Fd)
{
switch (Itm.Type)
{
case Item::File:
{
/* Open the output file, NDELAY is used to prevent this from
blowing up on device special files.. */
int iFd = open(Itm.Name,O_NDELAY|O_WRONLY|O_CREAT|O_TRUNC|O_APPEND,
Itm.Mode);
if (iFd < 0)
return _error->Errno("open","Failed write file %s",
Itm.Name);
// fchmod deals with umask and fchown sets the ownership
if (fchmod(iFd,Itm.Mode) != 0)
return _error->Errno("fchmod","Failed write file %s",
Itm.Name);
if (fchown(iFd,Itm.UID,Itm.GID) != 0 && errno != EPERM)
return _error->Errno("fchown","Failed write file %s",
Itm.Name);
Fd = iFd;
return true;
}
case Item::HardLink:
case Item::SymbolicLink:
case Item::CharDevice:
case Item::BlockDevice:
case Item::Directory:
case Item::FIFO:
break;
}
return true;
}
/*}}}*/
// DirStream::FinishedFile - Finished processing a file /*{{{*/
// ---------------------------------------------------------------------
/* */
bool pkgDirStream::FinishedFile(Item &Itm,int Fd)
{
if (Fd < 0)
return true;
if (close(Fd) != 0)
return _error->Errno("close","Failed to close file %s",Itm.Name);

/* Set the modification times. The only way it can fail is if someone
has futzed with our file, which is intolerable :> */
struct utimbuf Time;
Time.actime = Itm.MTime;
Time.modtime = Itm.MTime;
if (utime(Itm.Name,&Time) != 0)
_error->Errno("utime","Failed to close file %s",Itm.Name);
return true;
}
/*}}}*/
// DirStream::Fail - Failed processing a file /*{{{*/
// ---------------------------------------------------------------------
/* */
bool pkgDirStream::Fail(Item &Itm,int Fd)
{
if (Fd < 0)
return true;
close(Fd);
return false;
}
/*}}}*/

+ 61
- 0
apt-inst/dirstream.h View File

@@ -0,0 +1,61 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
// $Id: dirstream.h,v 1.2 2001/02/20 07:03:16 jgg Exp $
/* ######################################################################

Directory Stream

When unpacking the contents of the archive are passed into a directory
stream class for analysis and processing. The class controls all aspects
of actually writing the directory stream from disk. The low level
archive handlers are only responsible for decoding the archive format
and sending events (via method calls) to the specified directory
stream.
When unpacking a real file the archive handler is passed back a file
handle to write the data to, this is to support strange
archives+unpacking methods. If that fd is -1 then the file data is
simply ignored.
The provided defaults do the 'Right Thing' for a normal unpacking
process (ie 'tar')
##################################################################### */
/*}}}*/
#ifndef PKGLIB_DIRSTREAM_H
#define PKGLIB_DIRSTREAM_H

#ifdef __GNUG__
#pragma interface "apt-pkg/dirstream.h"
#endif

class pkgDirStream
{
public:

// All possible information about a component
struct Item
{
enum Type_t {File, HardLink, SymbolicLink, CharDevice, BlockDevice,
Directory, FIFO} Type;
char *Name;
char *LinkTarget;
unsigned long Mode;
unsigned long UID;
unsigned long GID;
unsigned long Size;
unsigned long MTime;
unsigned long Major;
unsigned long Minor;
};
virtual bool DoItem(Item &Itm,int &Fd);
virtual bool Fail(Item &Itm,int Fd);
virtual bool FinishedFile(Item &Itm,int Fd);
virtual bool Process(Item &Itm,const unsigned char *Data,
unsigned long Size,unsigned long Pos) {return true;};
virtual ~pkgDirStream() {};
};

#endif

+ 5
- 0
apt-inst/dpkg-diffs.txt View File

@@ -0,0 +1,5 @@
- Replacing directories with files
dpkg permits this with the weak condition that the directory is owned only
by the package. APT requires that the directory have no files that are not
owned by the package. Replaces are specifically not checked to prevent
file list corruption.

+ 509
- 0
apt-inst/extract.cc View File

@@ -0,0 +1,509 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
// $Id: extract.cc,v 1.2 2001/02/20 07:03:16 jgg Exp $
/* ######################################################################

Archive Extraction Directory Stream
Extraction for each file is a bit of an involved process. Each object
undergoes an atomic backup, overwrite, erase sequence. First the
object is unpacked to '.dpkg.new' then the original is hardlinked to
'.dpkg.tmp' and finally the new object is renamed to overwrite the old
one. From an external perspective the file never ceased to exist.
After the archive has been sucessfully unpacked the .dpkg.tmp files
are erased. A failure causes all the .dpkg.tmp files to be restored.
Decisions about unpacking go like this:
- Store the original filename in the file listing
- Resolve any diversions that would effect this file, all checks
below apply to the diverted name, not the real one.
- Resolve any symlinked configuration files.
- If the existing file does not exist then .dpkg-tmp is checked for.
[Note, this is reduced to only check if a file was expected to be
there]
- If the existing link/file is not a directory then it is replaced
irregardless
- If the existing link/directory is being replaced by a directory then
absolutely nothing happens.
- If the existing link/directory is being replaced by a link then
absolutely nothing happens.
- If the existing link/directory is being replaced by a non-directory
then this will abort if the package is not the sole owner of the
directory. [Note, this is changed to not happen if the directory
non-empty - that is, it only includes files that are part of this
package - prevents removing user files accidentally.]
- If the non-directory exists in the listing database and it
does not belong to the current package then an overwrite condition
is invoked.
As we unpack we record the file list differences in the FL cache. If
we need to unroll the the FL cache knows which files have been unpacked
and can undo. When we need to erase then it knows which files have not
been unpacked.
##################################################################### */
/*}}}*/
// Include Files /*{{{*/
#ifdef __GNUG__
#pragma implementation "apt-pkg/extract.h"
#endif
#include <apt-pkg/extract.h>
#include <apt-pkg/error.h>
#include <apt-pkg/debversion.h>

#include <sys/stat.h>
#include <stdio.h>
#include <unistd.h>
#include <errno.h>
#include <dirent.h>
/*}}}*/

static const char *TempExt = "dpkg-tmp";
//static const char *NewExt = "dpkg-new";

// Extract::pkgExtract - Constructor /*{{{*/
// ---------------------------------------------------------------------
/* */
pkgExtract::pkgExtract(pkgFLCache &FLCache,pkgCache::VerIterator Ver) :
FLCache(FLCache), Ver(Ver)
{
FLPkg = FLCache.GetPkg(Ver.ParentPkg().Name(),true);
if (FLPkg.end() == true)
return;
Debug = true;
}
/*}}}*/
// Extract::DoItem - Handle a single item from the stream /*{{{*/
// ---------------------------------------------------------------------
/* This performs the setup for the extraction.. */
bool pkgExtract::DoItem(Item &Itm,int &Fd)
{
char Temp[sizeof(FileName)];
/* Strip any leading/trailing /s from the filename, then copy it to the
temp buffer and re-apply the leading / We use a class variable
to store the new filename for use by the three extraction funcs */
char *End = FileName+1;
const char *I = Itm.Name;
for (; *I != 0 && *I == '/'; I++);
*FileName = '/';
for (; *I != 0 && End < FileName + sizeof(FileName); I++, End++)
*End = *I;
if (End + 20 >= FileName + sizeof(FileName))
return _error->Error("The path %s is too long",Itm.Name);
for (; End > FileName && End[-1] == '/'; End--);
*End = 0;
Itm.Name = FileName;
/* Lookup the file. Nde is the file [group] we are going to write to and
RealNde is the actual node we are manipulating. Due to diversions
they may be entirely different. */
pkgFLCache::NodeIterator Nde = FLCache.GetNode(Itm.Name,End,0,false,false);
pkgFLCache::NodeIterator RealNde = Nde;
// See if the file is already in the file listing
unsigned long FileGroup = RealNde->File;
for (; RealNde.end() == false && FileGroup == RealNde->File; RealNde++)
if (RealNde.RealPackage() == FLPkg)
break;

// Nope, create an entry
if (RealNde.end() == true)
{
RealNde = FLCache.GetNode(Itm.Name,End,FLPkg.Offset(),true,false);
if (RealNde.end() == true)
return false;
RealNde->Flags |= pkgFLCache::Node::NewFile;
}

/* Check if this entry already was unpacked. The only time this should
ever happen is if someone has hacked tar to support capabilities, in
which case this needs to be modified anyhow.. */
if ((RealNde->Flags & pkgFLCache::Node::Unpacked) ==
pkgFLCache::Node::Unpacked)
return _error->Error("Unpacking %s more than once",Itm.Name);
if (Nde.end() == true)
Nde = RealNde;

/* Consider a diverted file - We are not permitted to divert directories,
but everything else is fair game (including conf files!) */
if ((Nde->Flags & pkgFLCache::Node::Diversion) != 0)
{
if (Itm.Type == Item::Directory)
return _error->Error("The directory %s is diverted",Itm.Name);

/* A package overwriting a diversion target is just the same as
overwriting a normally owned file and is checked for below in
the overwrites mechanism */

/* If this package is trying to overwrite the target of a diversion,
that is never, ever permitted */
pkgFLCache::DiverIterator Div = Nde.Diversion();
if (Div.DivertTo() == Nde)
return _error->Error("The package is trying to write to the "
"diversion target %s/%s",Nde.DirN(),Nde.File());
// See if it is us and we are following it in the right direction
if (Div->OwnerPkg != FLPkg.Offset() && Div.DivertFrom() == Nde)
{
Nde = Div.DivertTo();
End = FileName + snprintf(FileName,sizeof(FileName)-20,"%s/%s",
Nde.DirN(),Nde.File());
if (End <= FileName)
return _error->Error("The diversion path is too long");
}
}
// Deal with symlinks and conf files
if ((RealNde->Flags & pkgFLCache::Node::NewConfFile) ==
pkgFLCache::Node::NewConfFile)
{
string Res = flNoLink(Itm.Name);
if (Res.length() > sizeof(FileName))
return _error->Error("The path %s is too long",Res.c_str());
if (Debug == true)
clog << "Followed conf file from " << FileName << " to " << Res << endl;
Itm.Name = strcpy(FileName,Res.c_str());
}
/* Get information about the existing file, and attempt to restore
a backup if it does not exist */
struct stat LExisting;
bool EValid = false;
if (lstat(Itm.Name,&LExisting) != 0)
{
// This is bad news.
if (errno != ENOENT)
return _error->Errno("stat","Failed to stat %s",Itm.Name);
// See if we can recover the backup file
if (Nde.end() == false)
{
snprintf(Temp,sizeof(Temp),"%s.%s",Itm.Name,TempExt);
if (rename(Temp,Itm.Name) != 0 && errno != ENOENT)
return _error->Errno("rename","Failed to rename %s to %s",
Temp,Itm.Name);
if (stat(Itm.Name,&LExisting) != 0)
{
if (errno != ENOENT)
return _error->Errno("stat","Failed to stat %s",Itm.Name);
}
else
EValid = true;
}
}
else
EValid = true;
/* If the file is a link we need to stat its destination, get the
existing file modes */
struct stat Existing = LExisting;
if (EValid == true && S_ISLNK(Existing.st_mode))
{
if (stat(Itm.Name,&Existing) != 0)
{
if (errno != ENOENT)
return _error->Errno("stat","Failed to stat %s",Itm.Name);
Existing = LExisting;
}
}
// We pretend a non-existing file looks like it is a normal file
if (EValid == false)
Existing.st_mode = S_IFREG;
/* Okay, at this point 'Existing' is the stat information for the