aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPascal Rigaux <pixel@mandriva.com>2000-03-07 23:37:35 +0000
committerPascal Rigaux <pixel@mandriva.com>2000-03-07 23:37:35 +0000
commitecad1562e7ef9070a2ecce05c05e0b431dc2045a (patch)
tree508a3dfefe74d74703174e4f3e7167aa494034db
parent18a2943fdceb9c1197cdc2b2253db52a94437747 (diff)
downloadrpmtools-ecad1562e7ef9070a2ecce05c05e0b431dc2045a.tar
rpmtools-ecad1562e7ef9070a2ecce05c05e0b431dc2045a.tar.gz
rpmtools-ecad1562e7ef9070a2ecce05c05e0b431dc2045a.tar.bz2
rpmtools-ecad1562e7ef9070a2ecce05c05e0b431dc2045a.tar.xz
rpmtools-ecad1562e7ef9070a2ecce05c05e0b431dc2045a.zip
no_comment
-rwxr-xr-xbuild_archive175
-rwxr-xr-xextract_archive248
-rw-r--r--gendepslist2.cc348
-rw-r--r--genhdlist_cz250
-rw-r--r--hdlist2prereq.cc53
-rw-r--r--rpmtools.spec4
6 files changed, 877 insertions, 1 deletions
diff --git a/build_archive b/build_archive
new file mode 100755
index 0000000..e2b21f2
--- /dev/null
+++ b/build_archive
@@ -0,0 +1,175 @@
+#!/usr/bin/perl
+
+#- Mandrake Simple Archive Builder.
+#- Copyright (C) 1999 MandrakeSoft (fpons@mandrakesoft.com)
+#-
+#- This program is free software; you can redistribute it and/or modify
+#- it under the terms of the GNU General Public License as published by
+#- the Free Software Foundation; either version 2, or (at your option)
+#- any later version.
+#-
+#- This program is distributed in the hope that it will be useful,
+#- but WITHOUT ANY WARRANTY; without even the implied warranty of
+#- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#- GNU General Public License for more details.
+#-
+#- You should have received a copy of the GNU General Public License
+#- along with this program; if not, write to the Free Software
+#- Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+#- Simple cat archive with bzip2 for perl.
+#- read file list and produce an $ARGV[0].cz2 archive file.
+#- uncompressing sheme is:
+#- | |
+#- | | | |
+#- $off1 =|*| } | |
+#- |*| } $off2 =|+| }
+#- |*| } $siz1 => 'bzip2 -d' => |+| } $siz2 => $filename
+#- |*| } |+| }
+#- |*| } | |
+#- | | | |
+#- | | | |
+#- | |
+#- where %data has the following format:
+#- { 'filename' => [ 'f', $off1, $siz1, $off2, $siz2 ] }
+#- except for symbolink link where it is:
+#- { 'filename_symlink' => [ 'l', $symlink_value ] }
+#- and directory where it is only
+#- { 'filename_directory' => [ 'd' ] }
+#- as you can see, there is no owner, group, filemode... an extension could be
+#- made with 'F' (instead of 'f'), 'L' instead of 'l' for exemple.
+#- we do not need them as it is used for DrakX for fast archive extraction and
+#- owner/filemode is for user running only (ie root).
+#-
+#- archive file contains concatenation of all bzip2'ed group of files whose
+#- filenames are on input,
+#- then a TOC (describing %data, concatenation of toc_line) follow and a
+#- TOC_TRAILER for summary.
+
+use strict qw(subs vars refs);
+
+sub toc_line {
+ my ($file, $data) = @_;
+
+ for ($data->[0]) {
+ return(/l/ && pack("anna*", 'l', length($file), length($data->[1]), "$file$data->[1]") ||
+ /d/ && pack("ana*", 'd', length($file), $file) ||
+ /f/ && pack("anNNNNa*", 'f', length($file), $data->[1], $data->[2], $data->[3], $data->[4], $file) ||
+ die "unknown extension $_\n");
+ }
+}
+
+sub main {
+ my ($archivename, $maxsiz) = @_;
+ my ($compress, $uncompress, $off1, $siz1, $off2, $siz2) = ('', '', 0, 0, 0, 0);
+ my @filelist = ();
+ my @data = ();
+ my %data = ();
+
+ die "usage: $0 <archivename> <maxsiz>\n" unless $maxsiz >= 100000;
+
+ #- guess compress method to use.
+ if ($archivename =~ /\.cz$/) {
+ ($compress, $uncompress) = ("gzip -9", "gzip -d");
+ } elsif ($archivename =~ /\.cz2$/) {
+ ($compress, $uncompress) = ("bzip2 -9", "bzip2 -d");
+ } else {
+ die "how to choose a compression which such a filename $archivename\n";
+ }
+ print STDERR "choosing compression method with \"$compress\" for archive $archivename\n";
+
+ unlink "$archivename";
+ unlink "tmp.z";
+
+ foreach (<STDIN>) {
+ chomp;
+
+ my $file = $_; -e $file or die "unable to find file $file\n";
+
+ push @data, $file;
+ #- now symbolic link and directory are supported, extension is
+ #- available with the first field of $data{$file}.
+ if (-l $file) {
+ $data{$file} = [ 'l', readlink $file ];
+ } elsif (-d $file) {
+ $data{$file} = [ 'd' ];
+ } else {
+ $siz2 = -s $file;
+
+ push @filelist, $file;
+ $data{$file} = [ 'f', -1, -1, $off2, $siz2 ];
+
+ if ($off2 + $siz2 > $maxsiz) { #- need compression.
+ system "cat @filelist | $compress >tmp.z";
+ $siz1 = -s "tmp.z";
+
+ $data{$_} = [ 'f', $off1, $siz1, $data{$_}[3], $data{$_}[4] ] foreach @filelist;
+
+ system "cat tmp.z >>$archivename";
+ $off1 += $siz1;
+ $off2 = 0; $siz2 = 0;
+ @filelist = ();
+ }
+ $off2 += $siz2;
+ }
+ }
+ if (scalar @filelist) {
+ system "cat @filelist | $compress >tmp.z";
+ $siz1 = -s "tmp.z";
+
+ $data{$_} = [ 'f', $off1, $siz1, $data{$_}[3], $data{$_}[4] ] foreach @filelist;
+
+ system "cat tmp.z >>$archivename";
+ $off1 += $siz1;
+ print STDERR "real archive size of $archivename is $off1\n";
+ }
+
+ #- produce a TOC directly at the end of the file, follow with
+ #- a trailer with TOC summary and archive summary.
+ local *OUTPUT;
+ open OUTPUT, ">>$archivename";
+
+ my ($toc_str, $toc_data) = ('', '');
+ my @data_d = ();
+ my @data_l = ();
+ my @data_f = ();
+
+ foreach (@data) {
+ my $file = $_;
+ $data{$file} or die "build_archive: internal error on $_\n";
+
+ #- specific according to type.
+ #- with this version, only f has specific data other than strings.
+ for ($data{$file}[0]) {
+ /d/ && do { push @data_d, $file; last; };
+ /l/ && do { push @data_l, $file; last; };
+ /f/ && do { push @data_f, $file; $toc_data .= pack("NNNN",
+ $data{$file}[1],
+ $data{$file}[2],
+ $data{$file}[3],
+ $data{$file}[4]); last; };
+ die "unknown extension $_\n";
+ }
+ }
+
+ foreach (@data_d) { $toc_str .= $_ . "\n" }
+ foreach (@data_l) { $toc_str .= $_ . "\n" . $data{$_}[1] . "\n" }
+ foreach (@data_f) { $toc_str .= $_ . "\n" }
+
+ print OUTPUT $toc_str;
+ print OUTPUT $toc_data;
+ print OUTPUT toc_trailer(scalar(@data_d), scalar(@data_l), scalar(@data_f),
+ length($toc_str), $uncompress);
+ close OUTPUT;
+}
+
+sub toc_trailer {
+ my ($toc_d_count, $toc_l_count, $toc_f_count, $toc_str_size, $uncompress) = @_;
+
+ #- 'cz[0' is toc_trailer header where 0 is version information, only 0 now.
+ #- '0]cz' is toc_trailer trailer that match the corresponding header for information.
+ return pack "a4NNNNa40a4", 'cz[0', $toc_d_count, $toc_l_count, $toc_f_count, $toc_str_size, $uncompress, '0]cz';
+}
+
+main(@ARGV);
+unlink "tmp.z";
diff --git a/extract_archive b/extract_archive
new file mode 100755
index 0000000..cdd98da
--- /dev/null
+++ b/extract_archive
@@ -0,0 +1,248 @@
+#!/usr/bin/perl
+
+#- Mandrake Simple Archive Extracter.
+#- Copyright (C) 1999 MandrakeSoft (fpons@mandrakesoft.com)
+#-
+#- This program is free software; you can redistribute it and/or modify
+#- it under the terms of the GNU General Public License as published by
+#- the Free Software Foundation; either version 2, or (at your option)
+#- any later version.
+#-
+#- This program is distributed in the hope that it will be useful,
+#- but WITHOUT ANY WARRANTY; without even the implied warranty of
+#- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#- GNU General Public License for more details.
+#-
+#- You should have received a copy of the GNU General Public License
+#- along with this program; if not, write to the Free Software
+#- Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+#- Simple cat archive with gzip/bzip2 for perl.
+#- see build_archive for more information.
+#-
+#- uncompressing sheme is:
+#- | |
+#- | | | |
+#- $off1 =|*| } | |
+#- |*| } $off2 =|+| }
+#- |*| } $siz1 => 'gzip/bzip2 -d' => |+| } $siz2 => $filename
+#- |*| } |+| }
+#- |*| } | |
+#- | | | |
+#- | | | |
+#- | |
+
+use strict qw(subs vars refs);
+
+#- used for uncompressing archive and other.
+my %toc_trailer;
+my @data;
+my %data;
+
+#- taken from DrakX common stuff, for conveniance and modified to match our expectation.
+sub dirname { @_ == 1 or die "usage: dirname <name>\n"; local $_ = shift; s|[^/]*/*\s*$||; s|(.)/*$|$1|; $_ || '.' }
+sub basename { @_ == 1 or die "usage: basename <name>\n"; local $_ = shift; s|/*\s*$||; s|.*/||; $_ }
+sub mkdir_ {
+ my $root = dirname $_[0];
+ if (-e $root) {
+ -d $root or die "mkdir: error creating directory $_[0]: $root is a file and i won't delete it\n";
+ } else {
+ mkdir_($root);
+ }
+ -d $_[0] and return;
+ mkdir $_[0], 0755 or die "mkdir: error creating directory $_: $!\n";
+}
+sub symlink_ { mkdir_ dirname($_[1]); unlink $_[1]; symlink $_[0], $_[1] }
+
+#- compute the closure of filename list according to symlinks or directory
+#- contents inside the archive.
+sub compute_closure {
+ my %file;
+ my @file;
+
+ #- keep in mind when a filename already exist and remove doublons.
+ @file{@_} = ();
+
+ #- navigate through filename list to follow symlinks.
+ do {
+ @file = grep { !$file{$_} } keys %file;
+ foreach (@file) {
+ my $file = $_;
+
+ #- keep in mind this one has been processed and does not need
+ #- to be examined again.
+ $file{$file} = 1;
+
+ exists $data{$file} or next;
+
+ for ($data{$file}[0]) {
+ #- on symlink, try to follow it and mark %file if
+ #- it is still inside the archive contents.
+ /l/ && do {
+ my ($source, $target) = ($file, $data{$file}[1]);
+
+ $source =~ s|[^/]*$||; #- remove filename to navigate directory.
+ if ($source) {
+ while ($target =~ s|^\./|| || $target =~ s|//+|/| || $target =~ s|/$|| or
+ $source and $target =~ s|^\.\./|| and $source =~ s|[^/]*/$||) {}
+ }
+
+ #- FALL THROUGH with new selection.
+ $file = $target =~ m|^/| ? $target : $source.$target;
+ };
+
+ #- on directory, try all files on data starting with
+ #- this directory, provided they are not already taken
+ #- into account.
+ /[ld]/ && do {
+ @file{grep { !$file{$_} && m|^$file$| || m|^$file/| } keys %data} = ();
+ last;
+ };
+ }
+ }
+ } while (@file > 0);
+
+ keys %file;
+}
+
+#- read toc at end of archive.
+sub read_toc {
+ my ($file) = @_;
+ my ($toc, $toc_trailer, $toc_size);
+ my @toc_str;
+ my @toc_data;
+
+ local *ARCHIVE;
+ open ARCHIVE, "<$file" or die "cannot open archive file $file\n";
+
+ #- seek to end of file minus 64, size of trailer.
+ #- read toc_trailer, check header/trailer for version 0.
+ seek ARCHIVE, -64, 2;
+ read ARCHIVE, $toc_trailer, 64 or die "cannot read toc_trailer of archive file $file\n";
+ @toc_trailer{qw(header toc_d_count toc_l_count toc_f_count toc_str_size uncompress trailer)} =
+ unpack "a4NNNNZ40a4", $toc_trailer;
+ $toc_trailer{header} eq 'cz[0' && $toc_trailer{trailer} eq '0]cz' or die "bad toc_trailer in archive file $file\n";
+
+ #- read toc, extract data hashes.
+ $toc_size = $toc_trailer{toc_str_size} + 16*$toc_trailer{toc_f_count};
+ seek ARCHIVE, -64-$toc_size, 2;
+
+ #- read strings separated by \n, so this char cannot be inside filename, oops.
+ read ARCHIVE, $toc, $toc_trailer{toc_str_size} or die "cannot read toc of archive file $file\n";
+ @toc_str = split "\n", $toc;
+
+ #- read data for file.
+ read ARCHIVE, $toc, 16*$toc_trailer{toc_f_count} or die "cannot read toc of archive file $file\n";
+ @toc_data = unpack "N". 4*$toc_trailer{toc_f_count}, $toc;
+
+ close ARCHIVE;
+
+ foreach (0..$toc_trailer{toc_d_count}-1) {
+ my $file = $toc_str[$_];
+ push @data, $file;
+ $data{$file} = [ 'd' ];
+ }
+ foreach (0..$toc_trailer{toc_l_count}-1) {
+ my ($file, $symlink) = ($toc_str[$toc_trailer{toc_d_count}+2*$_],
+ $toc_str[$toc_trailer{toc_d_count}+2*$_+1]);
+ push @data, $file;
+ $data{$file} = [ 'l', $symlink ];
+ }
+ foreach (0..$toc_trailer{toc_f_count}-1) {
+ my $file = $toc_str[$toc_trailer{toc_d_count}+2*$toc_trailer{toc_l_count}+$_];
+ push @data, $file;
+ $data{$file} = [ 'f', @toc_data[4*$_ .. 4*$_+3] ];
+ }
+
+ scalar keys %data == $toc_trailer{toc_d_count}+$toc_trailer{toc_l_count}+$toc_trailer{toc_f_count} or
+ die "mismatch count when reading toc, bad archive file $file\n";
+}
+
+sub catsksz {
+ my ($input, $seek, $siz, $output) = @_;
+ my ($buf, $sz);
+
+ while (($sz = sysread($input, $buf, $seek > 4096 ? 4096 : $seek))) {
+ $seek -= $sz;
+ last unless $seek > 0;
+ }
+ while (($sz = sysread($input, $buf, $siz > 4096 ? 4096 : $siz))) {
+ $siz -= $sz;
+ syswrite($output, $buf);
+ last unless $siz > 0;
+ }
+}
+
+sub main {
+ my ($archivename, $dir, @file) = @_;
+ my %extract_table;
+
+ #- update %data according to TOC of archive.
+ read_toc($archivename);
+
+ #- as a special features, if both $dir and $file are empty, list contents of archive.
+ if (!$dir && !@file) {
+ my $count = scalar keys %data;
+ print "$count files in archive, uncompression method is \"$toc_trailer{uncompress}\"\n";
+ foreach my $file (@data) {
+ for ($data{$file}[0]) {
+ /l/ && do { printf "l %13c %s -> %s\n", ' ', $file, $data{$file}[1]; last; };
+ /d/ && do { printf "d %13c %s\n", ' ', $file; last; };
+ /f/ && do { printf "f %12d %s\n", $data{$file}[4], $file; last; };
+ }
+ }
+ exit 0;
+ }
+
+ #- compute closure.
+ @file = compute_closure(@file);
+
+ #- check all file given are in the archive before continuing.
+ foreach (@file) { $data{$_} or die "unable to find file $_ in archive $archivename\n"; }
+
+ foreach my $file (@file) {
+ my $newfile = "$dir/$file";
+
+ print "extracting $file\n";
+ for ($data{$file}[0]) {
+ /l/ && do { symlink_ $data{$file}[1], $newfile; last; };
+ /d/ && do { mkdir_ $newfile; last; };
+ /f/ && do {
+ mkdir_ dirname $newfile;
+ $extract_table{$data{$file}[1]} ||= [ $data{$file}[2], [] ];
+ push @{$extract_table{$data{$file}[1]}[1]}, [ $newfile, $data{$file}[3], $data{$file}[4] ];
+ $extract_table{$data{$file}[1]}[0] == $data{$file}[2] or die "mismatched relocation in toc\n";
+ last;
+ };
+ die "unknown extension $_ when uncompressing archive $archivename\n";
+ }
+ }
+
+ #- delayed extraction is done on each block for a single execution
+ #- of uncompress executable.
+ foreach (sort { $a <=> $b } keys %extract_table) {
+ local *OUTPUT;
+ if (open OUTPUT, "-|") {
+ #- $curr_off is used to handle the reading in a pipe and simulating
+ #- a seek on it as done by catsksz, so last file position is
+ #- last byte not read (ie last block read start + last block read size).
+ my $curr_off = 0;
+ foreach (sort { $a->[1] <=> $b->[1] } @{$extract_table{$_}[1]}) {
+ my ($newfile, $off, $siz) = @$_;
+ local *FILE;
+ open FILE, $dir ? ">$newfile" : ">&STDOUT";
+ catsksz(\*OUTPUT, $off - $curr_off, $siz, \*FILE);
+ $curr_off = $off + $siz;
+ }
+ } else {
+ local *BUNZIP2;
+ open BUNZIP2, "| $toc_trailer{uncompress}";
+ local *ARCHIVE;
+ open ARCHIVE, "<$archivename" or die "cannot open archive $archivename\n";
+ catsksz(\*ARCHIVE, $_, $extract_table{$_}[0], \*BUNZIP2);
+ exit 0;
+ }
+ }
+}
+
+main(@ARGV);
diff --git a/gendepslist2.cc b/gendepslist2.cc
new file mode 100644
index 0000000..faaf953
--- /dev/null
+++ b/gendepslist2.cc
@@ -0,0 +1,348 @@
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <rpm/rpmlib.h>
+#include <rpm/header.h>
+#include <string>
+#include <vector>
+#include <map>
+#include <set>
+#include <fstream>
+#include <algorithm>
+
+#define COMPATIBILITY
+
+string put_first = "filesystem setup";
+
+
+/********************************************************************************/
+/* C++ template functions *******************************************************/
+/********************************************************************************/
+template<class V, class C> C sum(const V &v, const C &join = C()) {
+ typename V::const_iterator p, q;
+ C s = C();
+ if (v.begin() != v.end()) {
+ for (p = q = v.begin(), q++; q != v.end(); p = q, q++) s += *p + join;
+ s += *p;
+ }
+ return s;
+}
+
+vector<string> split(char sep, const string &l) {
+ vector<string> r;
+ for (int pos = 0, pos2 = 0; pos2 >= 0;) {
+ pos2 = l.find(sep, pos);
+ r.push_back(l.substr(pos, pos2));
+ pos = pos2 + 1;
+ }
+ return r;
+}
+
+template<class A, class B> void map_insert(map<A, set<B> > &m, const A &a, const B &b) {
+ if (m.find(a) == m.end()) m[a] = *(new set<B>);
+ m[a].insert(b);
+}
+
+template<class A> bool in(const A &a, const vector<A> &v) {
+ vector<A>::const_iterator p;
+ for (p = v.begin(); p != v.end(); p++) if (*p == a) return 1;
+ return 0;
+}
+template<class A, class B> bool in(const A &a, const map<A,B> &m) {
+ return m.find(a) != m.end();
+}
+
+template<class A, class B> map<A,B> &set2map(const set<A> &s) {
+ map<A,B> map;
+ set<A>::const_iterator p;
+ for (p = s.begin(); p != s.end(); p++) map[*p] = *(new B);
+ return map;
+}
+
+template<class A, class B> void add(set<A> &v1, const B &v2) {
+ typename B::const_iterator p;
+ for (p = v2.begin(); p != v2.end(); p++) v1.insert(*p);
+}
+template<class A, class B> void add(vector<A> &v1, const B &v2) {
+ typename B::const_iterator p;
+ for (p = v2.begin(); p != v2.end(); p++) v1.push_back(*p);
+}
+
+typedef vector<string>::iterator ITv;
+typedef set<string>::iterator ITs;
+typedef map<string, set<string> >::iterator ITms;
+
+
+
+
+/********************************************************************************/
+/* header extracting functions **************************************************/
+/********************************************************************************/
+string get_name(Header header, int_32 tag) {
+ int_32 type, count;
+ char *name;
+
+ headerGetEntry(header, tag, &type, (void **) &name, &count);
+ return string(name);
+}
+
+int get_int(Header header, int_32 tag) {
+ int_32 type, count;
+ int *i;
+
+ headerGetEntry(header, tag, &type, (void **) &i, &count);
+ return *i;
+}
+
+vector<string> get_info(Header header, int_32 tag) {
+ int_32 type, count, i;
+ vector<string> r;
+ char **list;
+
+ headerGetEntry(header, tag, &type, (void **) &list, &count);
+ if (list) {
+ r.reserve(count);
+ for (i = 0; i < count; i++) r.push_back(list[i]);
+ free(list);
+ }
+ return r;
+}
+
+vector<string> get_files(Header header) {
+ int_32 type, count, i;
+ char ** baseNames, ** dirNames;
+ int_32 * dirIndexes;
+
+#ifdef COMPATIBILITY
+ // deprecated one
+ vector<string> r = get_info(header, RPMTAG_OLDFILENAMES);
+#else
+ vector<string> r;
+#endif
+
+ headerGetEntry(header, RPMTAG_BASENAMES, &type, (void **) &baseNames, &count);
+ headerGetEntry(header, RPMTAG_DIRINDEXES, &type, (void **) &dirIndexes, NULL);
+ headerGetEntry(header, RPMTAG_DIRNAMES, &type, (void **) &dirNames, NULL);
+
+ if (baseNames && dirNames && dirIndexes) {
+ r.reserve(count);
+ for(i = 0; i < count; i++) {
+ string s(dirNames[dirIndexes[i]]);
+ s += baseNames[i];
+ r.push_back(s);
+ }
+ free(baseNames);
+ free(dirNames);
+ }
+ return r;
+}
+
+/********************************************************************************/
+/* gendepslist ******************************************************************/
+/********************************************************************************/
+vector<string> packages;
+map<string, int> sizes;
+map<string, string> name2fullname;
+map<string, vector<string> > requires, frequires;
+map<string, vector<string> > provided_by, fprovided_by;
+
+void getRequires(FD_t fd) {
+ set<string> all_requires, all_frequires;
+ Header header;
+
+ while ((header=headerRead(fd, HEADER_MAGIC_YES)))
+ {
+ string s_name = get_name(header, RPMTAG_NAME);
+ string name = s_name + "-" + get_name(header, RPMTAG_VERSION) + "-" + get_name(header, RPMTAG_RELEASE);
+ vector<string> l = get_info(header, RPMTAG_REQUIRENAME);
+
+ for (ITv p = l.begin(); p != l.end(); p++) {
+ ((*p)[0] == '/' ? frequires : requires)[name].push_back(*p);
+ ((*p)[0] == '/' ? all_frequires : all_requires).insert(*p);
+ }
+ headerFree(header);
+ }
+ for (ITs p = all_requires.begin(); p != all_requires.end(); p++) provided_by[*p] = *(new vector<string>);
+ for (ITs p = all_frequires.begin(); p != all_frequires.end(); p++) fprovided_by[*p] = *(new vector<string>);
+}
+
+void getProvides(FD_t fd) {
+ Header header;
+ while ((header=headerRead(fd, HEADER_MAGIC_YES)))
+ {
+ string s_name = get_name(header, RPMTAG_NAME);
+ string name = s_name + "-" + get_name(header, RPMTAG_VERSION) + "-" + get_name(header, RPMTAG_RELEASE);
+
+ packages.push_back(name);
+ name2fullname[s_name] = name;
+ sizes[name] = get_int(header, RPMTAG_SIZE);
+
+ if (in(s_name, provided_by)) provided_by[s_name].push_back(name);
+
+ vector<string> provides = get_info(header, RPMTAG_PROVIDES);
+ for (ITv p = provides.begin(); p != provides.end(); p++)
+ if (in(*p, provided_by)) provided_by[*p].push_back(name);
+
+ vector<string> fprovides = get_files(header);
+ for (ITv p = fprovides.begin(); p != fprovides.end(); p++)
+ if (in(*p, fprovided_by)) fprovided_by[*p].push_back(name);
+
+ headerFree(header);
+ }
+}
+
+set<string> getDep_(const string &dep, vector<string> &l) {
+ set<string> r;
+ switch (l.size())
+ {
+ case 0:
+ r.insert((string) "NOTFOUND_" + dep);
+ break;
+ case 1:
+ r.insert(l[0]);
+ break;
+ default:
+ r.insert(sum(l, (string)"|"));
+ }
+ return r;
+}
+
+set<string> getDep(const string &name) {
+ set<string> r;
+ r.insert(name);
+ for (ITv p = requires[name].begin(); p != requires[name].end(); p++) add(r, getDep_(*p, provided_by[*p]));
+ for (ITv p = frequires[name].begin(); p != frequires[name].end(); p++) add(r, getDep_(*p, fprovided_by[*p]));
+ return r;
+}
+
+map<string, set<string> > closure(const map<string, set<string> > &names) {
+ map<string, set<string> > r = names;
+
+ map<string, set<string> > reverse;
+ for (ITv i = packages.begin(); i != packages.end(); i++) reverse[*i] = *(new set<string>);
+
+ for (ITms i = r.begin(); i != r.end(); i++)
+ for (ITs j = i->second.begin(); j != i->second.end(); j++)
+ reverse[*j].insert(i->first);
+
+ for (ITms i = r.begin(); i != r.end(); i++) {
+ set<string> rev = reverse[i->first];
+ for (ITs j = rev.begin(); j != rev.end(); j++) {
+
+ for (ITs k = i->second.begin(); k != i->second.end(); k++) {
+ r[*j].insert(*k);
+ reverse[*k].insert(*j);
+ }
+
+ }
+ }
+ return r;
+}
+
+
+//struct cmp : public binary_function<string,string,bool> {
+// bool operator()(const string &a, const string &b) {
+// int na = closed[a].size();
+// int nb = closed[b].size();
+// return na < nb;
+// }
+//};
+
+void printDepslist(ofstream *out1, ofstream *out2) {
+
+ map<string, set<string> > names;
+ for (ITv p = packages.begin(); p != packages.end(); p++) {
+ set<string> s = getDep(*p);
+ s.erase(*p);
+ names[*p] = s;
+ if (out1) *out1 << *p << " " << sizes[*p] << " " << sum(s, (string) " ") << "\n";
+ }
+ if (out2 == 0) return;
+
+ map<string, set<string> > closed = closure(names);
+ for (ITms p = closed.begin(); p != closed.end(); p++) p->second.erase(p->first);
+
+ names = closed;
+ map<string,int> length;
+ for (ITms p = names.begin(); p != names.end(); p++) {
+ int l = p->second.size();
+ for (ITs q = p->second.begin(); q != p->second.end(); q++) if (q->find('|') >= 0) l += 1000;
+ length[p->first] = l;
+ }
+
+ vector<string> put_first_ = split(' ', put_first);
+ vector<string> packages;
+ while (names.begin() != names.end()) {
+ string n;
+ unsigned int l_best = 9999;
+
+ for (ITv p = put_first_.begin(); p != put_first_.end(); p++)
+ if (in(name2fullname[*p], names)) { n = name2fullname[*p]; goto found; }
+
+ for (ITms p = names.begin(); p != names.end(); p++)
+ if (p->second.size() < l_best) {
+ l_best = p->second.size();
+ n = p->first;
+ if (l_best == 0) break;
+ }
+ found:
+ names.erase(n);
+ packages.push_back(n);
+ for (ITms p = names.begin(); p != names.end(); p++) p->second.erase(n);
+ }
+
+
+ int i = 0;
+ map<string,int> where;
+ for (ITv p = packages.begin(); p != packages.end(); p++, i++) where[*p] = i;
+
+ i = 0;
+ for (ITv p = packages.begin(); p != packages.end(); p++, i++) {
+ set<string> dep = closed[*p];
+ *out2 << *p << " " << sizes[*p];
+ for (ITs q = dep.begin(); q != dep.end(); q++) {
+ if (q->find('|') >= 0) {
+ vector<string> l = split('|', *q);
+ for (ITv k = l.begin(); k != l.end(); k++) *out2 << " " << where[*k];
+ } else if (q->compare("NOTFOUND_") > 1) {
+ *out2 << " " << *q;
+ } else {
+ int j = where[*q];
+ if (j > i) cerr << *p << "\n";
+ *out2 << " " << j;
+ }
+ }
+ *out2 << "\n";
+ }
+}
+
+FD_t hdlists(const char *cmd) {
+ return fdDup(fileno(popen(cmd, "r")));
+}
+
+int main(int argc, char **argv)
+{
+ ofstream *out1 = 0, *out2 = 0;
+ if (argc > 2 && (string)argv[1] == "-o") {
+ out1 = new ofstream(argv[2]);
+ out2 = new ofstream(((string)argv[2] + ".ordered").c_str());
+ argc -= 2; argv += 2;
+ } else {
+ out1 = new ofstream(STDOUT_FILENO);
+ }
+ if (argc < 2) {
+ cerr << "usage: gendepslist2 [-o <depslist>] hdlists_cz2...\n";
+ return 1;
+ }
+ string cmd = "bzip2 -dc ";
+ for (int i = 1; i < argc; i++) cmd = cmd + argv[i] + " ";
+ cmd += "2>/dev/null";
+
+ getRequires(hdlists(cmd.c_str()));
+ cerr << "getRequires done\n";
+ getProvides(hdlists(cmd.c_str()));
+ cerr << "getProvides done\n";
+ printDepslist(out1, out2);
+ delete out1;
+ delete out2;
+}
diff --git a/genhdlist_cz2 b/genhdlist_cz2
new file mode 100644
index 0000000..7512d68
--- /dev/null
+++ b/genhdlist_cz2
@@ -0,0 +1,50 @@
+#!/usr/bin/perl
+
+($noclean, @ARGV) = @ARGV if $ARGV[0] eq "--noclean";
+(undef, $depslist, @ARGV) = @ARGV if $ARGV[0] eq "--ordered-depslist";
+(undef, $hdlist, @ARGV) = @ARGV if $ARGV[0] eq "-o";
+(undef, $root, @ARGV) = @ARGV if $ARGV[0] eq "--distrib";
+
+$hdlist && @ARGV == 1 || $root && @ARGV == 0 or die
+"usage: genhdlist_cz2 [--noclean] [--ordered-depslist <depslist>] -o <hdlist_cz2> <rpm dir>
+ or genhdlist_cz2 [--noclean] --distrib <root distrib>
+";
+
+if ($root) {
+ $depslist = "$root/Mandrake/base/depslist.ordered";
+ $hdlist = "$root/Mandrake/base/hdlist.cz2";
+ $dir = "$root/Mandrake/RPMS";
+ $ENV{PATH} = "$ENV{PATH}:$root/misc";
+} else {
+ ($dir) = @ARGV;
+}
+
+
+$work_dir = "/tmp/.build_hdlist";
+
+
+-e $work_dir && !-d $work_dir and unlink($work_dir) || die "cannot use $work_dir as a working directory";
+chmod 0755, $work_dir or system("rm -rf $work_dir");
+-d $work_dir or mkdir $work_dir, 0755 or die "cannot create working directory $work_dir\n";
+chdir $work_dir;
+
+my (%keys, @keys);
+
+opendir DIR, $dir or die "unable to opendir $dir: $!\n";
+while ($_ = readdir DIR) {
+ my ($key) = /(.*)\..*\.rpm$/ or next;
+ system("rpm2header $dir/$_ > $key") unless -e $key;
+ $? == 0 or unlink($key), die "bad rpm $dir/$_\n";
+ $keys{$key} = 1;
+}
+if (-e $depslist) {
+ open F, $depslist;
+ @keys = map { (split)[0] } <F>;
+}
+@keys = grep { delete $keys{$_} } @keys;
+
+open B, "| build_archive $hdlist 400000";
+print B "$_\n" foreach @keys, keys %keys;
+close B or die "build_archive failed\n";
+
+system("rm -rf $work_dir") unless $noclean;
diff --git a/hdlist2prereq.cc b/hdlist2prereq.cc
new file mode 100644
index 0000000..68434f0
--- /dev/null
+++ b/hdlist2prereq.cc
@@ -0,0 +1,53 @@
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <rpm/rpmlib.h>
+#include <rpm/header.h>
+#include <iostream>
+
+
+char *get_name(Header header, int_32 tag) {
+ int_32 type, count;
+ char *name;
+
+ headerGetEntry(header, tag, &type, (void **) &name, &count);
+ return name;
+}
+
+int get_int(Header header, int_32 tag) {
+ int_32 type, count;
+ int *i;
+
+ headerGetEntry(header, tag, &type, (void **) &i, &count);
+ return *i;
+}
+
+int main(int argc, char **argv)
+{
+ if (argc <= 1) {
+ cerr << "usage: hdlist2prereq <hdlist> [<hdlists...>]\n";
+ exit(1);
+ }
+ for (int i = 1; i < argc; i++) {
+ FD_t fd = strcmp(argv[i], "-") == 0 ? fdDup(STDIN_FILENO) : fdOpen(argv[i], O_RDONLY, 0);
+ if (fdFileno(fd) < 0) cerr << "rpmpackdeps: cannot open file " << argv[i] << "\n";
+ else {
+ Header header;
+ int_32 type, count;
+ char **list;
+ int *flags;
+
+ while ((header=headerRead(fd, HEADER_MAGIC_YES))) {
+ char *name = get_name(header, RPMTAG_NAME);
+
+ headerGetEntry(header, RPMTAG_REQUIRENAME, &type, (void **) &list, &count);
+ headerGetEntry(header, RPMTAG_REQUIREFLAGS, &type, (void **) &flags, &count);
+
+ if (flags && list)
+ for(i = 0; i < count; i++)
+ if (flags[i] & RPMSENSE_PREREQ) printf("%s:%s\n", name, list[i]);
+ }
+ }
+ fdClose(fd);
+ }
+}
diff --git a/rpmtools.spec b/rpmtools.spec
index c54fa30..8c77900 100644
--- a/rpmtools.spec
+++ b/rpmtools.spec
@@ -47,6 +47,8 @@ rm -rf $RPM_BUILD_ROOT
/usr/bin/hdlist2names
/usr/bin/rpm2header
/usr/bin/genhdlist_cz2
+/usr/bin/extract_archive
+/usr/bin/build_archive
%files devel
%defattr(-,root,root)
@@ -55,7 +57,7 @@ rm -rf $RPM_BUILD_ROOT
%changelog
* Tue Mar 7 2000 Pixel <pixel@mandrakesoft.com> 1.1-1mdk
- new version (gendepslist2 instead of gendepslist, hdlist2prereq)
-- missing Requires, must wait for francois to choose the names for build_archive/extract_archive :)
+- host build_archive/extract_archive until francois put them somewhere else :)
* Fri Feb 18 2000 Chmouel Boudjnah <chmouel@mandrakesoft.com> 1.0-9mdk
- Really fix with rpm-3.0.4 (Fredl).