summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrian Harring <ferringb@gentoo.org>2005-07-10 09:21:05 +0000
committerBrian Harring <ferringb@gentoo.org>2005-07-10 09:21:05 +0000
commitce7f0c0fbaba0b37df2744e579f67388917dc671 (patch)
tree7ff8d575a1eb66d727a0cd30d410685b241935d2
parentfix gnuconfig find expression #93363 (diff)
downloadportage-cvs-ce7f0c0fbaba0b37df2744e579f67388917dc671.tar.gz
portage-cvs-ce7f0c0fbaba0b37df2744e579f67388917dc671.tar.bz2
portage-cvs-ce7f0c0fbaba0b37df2744e579f67388917dc671.zip
whee. import of embryonic portage rewrite.
config.* code works, but the path to conf_default_types is going to be screwed up. welcome to solutions, although personally I like having a working portage, so we can't stomp existing portage yet (soon one hopes). docs from new/* will go into rewrite/ , including other files (bin included). It ain't clean nor perfect, but it's initial import. sort it out as we go.
-rw-r--r--portage/binpkg/__init__.py0
-rw-r--r--portage/binpkg/repository.py76
-rw-r--r--portage/cache/__init__.py5
-rw-r--r--portage/cache/anydbm.py69
-rw-r--r--portage/cache/cache_errors.py41
-rw-r--r--portage/cache/flat_list.py114
-rw-r--r--portage/cache/fs_template.py74
-rw-r--r--portage/cache/metadata.py110
-rw-r--r--portage/cache/multiplex.py5
-rw-r--r--portage/cache/sql_template.py247
-rw-r--r--portage/cache/sqlite.py67
-rw-r--r--portage/cache/template.py152
-rw-r--r--portage/cache/util.py92
-rw-r--r--portage/chksum/__init__.py47
-rw-r--r--portage/chksum/md5hash.py29
-rw-r--r--portage/chksum/sha1hash.py21
-rw-r--r--portage/config/NewStyle.py182
-rw-r--r--portage/config/__init__.py19
-rw-r--r--portage/config/central.py280
-rw-r--r--portage/config/domain.py12
-rw-r--r--portage/config/errors.py72
-rw-r--r--portage/ebuild/__init__.py5
-rw-r--r--portage/ebuild/ebuild_buildable.py57
-rw-r--r--portage/ebuild/ebuild_internal.py1265
-rw-r--r--portage/ebuild/ebuild_package.py89
-rw-r--r--portage/ebuild/ebuild_repository.py65
-rw-r--r--portage/ebuild/eclass_cache.py77
-rw-r--r--portage/ebuild/processor.py455
-rw-r--r--portage/package/__init__.py6
-rw-r--r--portage/package/cpv.py328
-rw-r--r--portage/package/metadata.py81
-rw-r--r--portage/repository/__init__.py6
-rw-r--r--portage/repository/errors.py14
-rw-r--r--portage/repository/multiplex.py72
-rw-r--r--portage/repository/prototype.py96
-rw-r--r--portage/repository/visibility.py25
-rw-r--r--portage/restrictions/__init__.py5
-rw-r--r--portage/restrictions/restriction.py117
-rw-r--r--portage/restrictions/restrictionSet.py82
-rw-r--r--portage/sync/__init__.py10
-rw-r--r--portage/sync/cvs.py95
-rw-r--r--portage/sync/parseuri.py28
-rw-r--r--portage/sync/rsync.py161
-rw-r--r--portage/sync/snapshot.py147
-rw-r--r--portage/sync/syncexceptions.py11
-rw-r--r--portage/transports/__init__.py9
-rw-r--r--portage/transports/bundled_lib.py363
-rw-r--r--portage/transports/fetchcommand.py72
-rw-r--r--portage/util/IndexableSequence.py97
-rw-r--r--portage/util/__init__.py1
-rw-r--r--portage/util/currying.py9
-rw-r--r--portage/util/dicts.py158
-rw-r--r--portage/util/fs.py519
-rw-r--r--portage/util/misc.py99
-rw-r--r--portage/util/modules.py22
-rw-r--r--portage/vdb/__init__.py1
-rw-r--r--portage/vdb/repository.py76
57 files changed, 6437 insertions, 0 deletions
diff --git a/portage/binpkg/__init__.py b/portage/binpkg/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/portage/binpkg/__init__.py
diff --git a/portage/binpkg/repository.py b/portage/binpkg/repository.py
new file mode 100644
index 0000000..ca65cce
--- /dev/null
+++ b/portage/binpkg/repository.py
@@ -0,0 +1,76 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/binpkg/repository.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+
+#yoink when fixed
+raise Exception("sorry bub, doesn't work atm do to changes in portage namespace. plsfix kthnx")
+
+import os,stat
+import prototype, errors
+
+#needed to grab the PN
+import portage_versions
+
+class tree(prototype.PrototypeTree):
+ def __init__(self, base):
+ super(tree,self).__init__()
+ self.base = base
+ try:
+ st = os.lstat(self.base)
+ if not stat.S_ISDIR(st.st_mode):
+ raise errors.InitializationError("base not a dir: %s" % self.base)
+ elif not st.st_mode & (os.X_OK|os.R_OK):
+ raise errors.InitializationError("base lacks read/executable: %s" % self.base)
+
+ except OSError:
+ raise errors.InitializationError("lstat failed on base %s" % self.base)
+
+
+ def _get_categories(self, *optionalCategory):
+ # return if optionalCategory is passed... cause it's not yet supported
+ if len(optionalCategory):
+ return {}
+
+ try: return tuple([x for x in os.listdir(self.base) \
+ if stat.S_ISDIR(os.lstat(os.path.join(self.base,x)).st_mode) and x != "All"])
+
+ except (OSError, IOError), e:
+ raise KeyError("failed fetching categories: %s" % str(e))
+
+ def _get_packages(self, category):
+ cpath = os.path.join(self.base,category.lstrip(os.path.sep))
+ #-5 == len(".tbz2")
+ l=[]
+ try:
+ for x in os.listdir(cpath):
+ if x.endswith(".tbz2"):
+ l.append(portage_versions.pkgsplit(x[:-5])[0])
+ return tuple(l)
+
+ except (OSError, IOError), e:
+ raise KeyError("failed fetching packages for category %s: %s" % \
+ (os.path.join(self.base,category.lstrip(os.path.sep)), str(e)))
+
+ def _get_versions(self, catpkg):
+ pkg = catpkg.split("/")[-1]
+ l=[]
+ try:
+ for x in os.listdir(os.path.join(self.base, os.path.dirname(catpkg.lstrip("/").rstrip("/")))):
+ if x.startswith(pkg):
+ ver=portage_versions.pkgsplit(x[:-5])
+
+ #pkgsplit returns -r0, when it's not always there
+ if ver[2] == "r0":
+ if x.endswith(ver[2]+".tbz2"):
+ l.append("%s-%s" % (ver[1], ver[2]))
+ else:
+ l.append(ver[1])
+ else:
+ l.append("%s-%s" % (ver[1], ver[2]))
+ return tuple(l)
+ except (OSError, IOError), e:
+ raise KeyError("failed fetching packages for package %s: %s" % \
+ (os.path.join(self.base,catpkg.lstrip(os.path.sep)), str(e)))
+
diff --git a/portage/cache/__init__.py b/portage/cache/__init__.py
new file mode 100644
index 0000000..8af161e
--- /dev/null
+++ b/portage/cache/__init__.py
@@ -0,0 +1,5 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/cache/__init__.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
diff --git a/portage/cache/anydbm.py b/portage/cache/anydbm.py
new file mode 100644
index 0000000..5f3242e
--- /dev/null
+++ b/portage/cache/anydbm.py
@@ -0,0 +1,69 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/cache/anydbm.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+anydbm_module = __import__("anydbm")
+import cPickle, os
+import fs_template
+import cache_errors
+
+
+class database(fs_template.FsBased):
+
+ autocommits = True
+
+ def __init__(self, *args, **config):
+ super(database,self).__init__(*args, **config)
+
+ default_db = config.get("dbtype","anydbm")
+ if not default_db.startswith("."):
+ default_db = '.' + default_db
+
+ self._db_path = os.path.join(self.location, fs_template.gen_label(self.location, self.label)+default_db)
+ print "opening self._db_path=",self._db_path
+ self.__db = None
+ try:
+ self.__db = anydbm_module.open(self._db_path, "w", self._perms)
+ try:
+ self._ensure_dirs()
+ self._ensure_dirs(self._db_path)
+ self._ensure_access(self._db_path)
+
+ except (OSError, IOError), e:
+ raise cache_errors.InitializationError(self.__clas__, e)
+ # try again if failed
+ if self.__db == None:
+ self.__db = anydbm_module.open(self._db_path, "c", self._perms)
+
+
+ except anydbm_module.error, e:
+ # XXX handle this at some point
+ raise
+
+
+ def __getitem__(self, cpv):
+ # we override getitem because it's just a cpickling of the data handed in.
+ return cPickle.loads(self.__db[cpv])
+
+
+ def _setitem(self, cpv, values):
+ self.__db[cpv] = cPickle.dumps(values,cPickle.HIGHEST_PROTOCOL)
+
+ def _delitem(self, cpv):
+ del self.__db[cpv]
+
+
+ def iterkeys(self):
+ return iter(self.__db)
+
+
+ def has_key(self, cpv):
+ return cpv in self.__db
+
+ def commit(self): pass
+
+ def __del__(self):
+ print "keys=",self.__db.keys()
+ self.__db.sync()
+ self.__db.close()
diff --git a/portage/cache/cache_errors.py b/portage/cache/cache_errors.py
new file mode 100644
index 0000000..8a68e31
--- /dev/null
+++ b/portage/cache/cache_errors.py
@@ -0,0 +1,41 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/cache/cache_errors.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+class CacheError(Exception): pass
+
+class InitializationError(CacheError):
+ def __init__(self, class_name, error):
+ self.error, self.class_name = error, class_name
+ def __str__(self):
+ return "Creation of instance %s failed due to %s" % \
+ (self.class_name, str(self.error))
+
+
+class CacheCorruption(CacheError):
+ def __init__(self, key, ex):
+ self.key, self.ex = key, ex
+ def __str__(self):
+ return "%s is corrupt: %s" % (self.key, str(self.ex))
+
+
+class GeneralCacheCorruption(CacheError):
+ def __init__(self,ex): self.ex = ex
+ def __str__(self): return "corruption detected: %s" % str(self.ex)
+
+
+class InvalidRestriction(CacheError):
+ def __init__(self, key, restriction, exception=None):
+ if exception == None: exception = ''
+ self.key, self.restriction, self.ex = key, restriction, ex
+ def __str__(self):
+ return "%s:%s is not valid: %s" % \
+ (self.key, self.restriction, str(self.ex))
+
+
+class ReadOnlyRestriction(CacheError):
+ def __init__(self, info=''):
+ self.info = info
+ def __str__(self):
+ return "cache is non-modifiable"+str(self.info)
diff --git a/portage/cache/flat_list.py b/portage/cache/flat_list.py
new file mode 100644
index 0000000..81549f8
--- /dev/null
+++ b/portage/cache/flat_list.py
@@ -0,0 +1,114 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/cache/flat_list.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+import fs_template
+import cache_errors
+import os, stat
+
+# store the current key order *here*.
+class database(fs_template.FsBased):
+
+ autocommits = True
+
+ # do not screw with this ordering. _eclasses_ needs to be last
+ auxdbkey_order=('DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
+ 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
+ 'KEYWORDS', 'IUSE', 'CDEPEND',
+ 'PDEPEND', 'PROVIDE','_eclasses_')
+
+ def __init__(self, *args, **config):
+ super(database,self).__init__(*args, **config)
+ self.location = os.path.join(self.location,
+ self.label.lstrip(os.path.sep).rstrip(os.path.sep))
+
+ if len(self._known_keys) > len(self.auxdbkey_order) + 2:
+ raise Exception("less ordered keys then auxdbkeys")
+ if not os.path.exists(self.location):
+ self._ensure_dirs()
+
+
+ def _getitem(self, cpv):
+ d = {}
+ try:
+ myf = open(os.path.join(self.location, cpv),"r")
+ for k,v in zip(self.auxdbkey_order, myf):
+ d[k] = v.rstrip("\n")
+ except (OSError, IOError),e:
+ if isinstance(e,IOError) and e.errno == 2:
+# print "caught for %s" % cpv, e
+# l=os.listdir(os.path.dirname(os.path.join(self.location,cpv)))
+# l.sort()
+# print l
+ raise KeyError(cpv)
+ raise cache_errors.CacheCorruption(cpv, e)
+
+ try: d["_mtime_"] = os.fstat(myf.fileno()).st_mtime
+ except OSError, e:
+ myf.close()
+ raise cache_errors.CacheCorruption(cpv, e)
+ myf.close()
+ return d
+
+
+ def _setitem(self, cpv, values):
+ s = cpv.rfind("/")
+ fp=os.path.join(self.location,cpv[:s],".update.%i.%s" % (os.getpid(), cpv[s+1:]))
+ try: myf=open(fp, "w")
+ except (OSError, IOError), e:
+ if e.errno == 2:
+ try:
+ self._ensure_dirs(cpv)
+ myf=open(fp,"w")
+ except (OSError, IOError),e:
+ raise cache_errors.CacheCorruption(cpv, e)
+ else:
+ raise cache_errors.CacheCorruption(cpv, e)
+
+
+ for x in self.auxdbkey_order:
+ myf.write(values.get(x,"")+"\n")
+
+ myf.close()
+ self._ensure_access(fp, mtime=values["_mtime_"])
+ #update written. now we move it.
+ new_fp = os.path.join(self.location,cpv)
+ try: os.rename(fp, new_fp)
+ except (OSError, IOError), e:
+ os.remove(fp)
+ raise cache_errors.CacheCorruption(cpv, e)
+
+
+ def _delitem(self, cpv):
+ try:
+ os.remove(os.path.join(self.location,cpv))
+ except OSError, e:
+ if e.errno == 2:
+ raise KeyError(cpv)
+ else:
+ raise cache_errors.CacheCorruption(cpv, e)
+
+
+ def has_key(self, cpv):
+ return os.path.exists(os.path.join(self.location, cpv))
+
+
+ def iterkeys(self):
+ """generator for walking the dir struct"""
+ dirs = [self.location]
+ len_base = len(self.location)
+ while len(dirs):
+ for l in os.listdir(dirs[0]):
+ if l.endswith(".cpickle"):
+ continue
+ p = os.path.join(dirs[0],l)
+ st = os.lstat(p)
+ if stat.S_ISDIR(st.st_mode):
+ dirs.append(p)
+ continue
+ yield p[len_base+1:]
+ dirs.pop(0)
+
+
+ def commit(self): pass
diff --git a/portage/cache/fs_template.py b/portage/cache/fs_template.py
new file mode 100644
index 0000000..de0e01f
--- /dev/null
+++ b/portage/cache/fs_template.py
@@ -0,0 +1,74 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/cache/fs_template.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+import os
+import template, cache_errors
+from portage.os_data import portage_gid
+
+class FsBased(template.database):
+ """template wrapping fs needed options, and providing _ensure_access as a way to
+ attempt to ensure files have the specified owners/perms"""
+
+ def __init__(self, *args, **config):
+ """throws InitializationError if needs args aren't specified
+ gid and perms aren't listed do to an oddity python currying mechanism
+ gid=portage_gid
+ perms=0665"""
+
+ for x,y in (("gid",portage_gid),("perms",0664)):
+ if x in config:
+ setattr(self, "_"+x, config[x])
+ del config[x]
+ else:
+ setattr(self, "_"+x, y)
+ super(FsBased, self).__init__(*args, **config)
+
+ if self.label.startswith(os.path.sep):
+ # normpath.
+ self.label = os.path.sep + os.path.normpath(self.label).lstrip(os.path.sep)
+
+
+ def _ensure_access(self, path, mtime=-1):
+ """returns true or false if it's able to ensure that path is properly chmod'd and chowned.
+ if mtime is specified, attempts to ensure that's correct also"""
+ try:
+ os.chown(path, -1, self._gid)
+ os.chmod(path, self._perms)
+ if mtime:
+ mtime=long(mtime)
+ os.utime(path, (mtime, mtime))
+ except OSError, IOError:
+ return False
+ return True
+
+ def _ensure_dirs(self, path=None):
+ """with path!=None, ensure beyond self.location. otherwise, ensure self.location"""
+ if path:
+ path = os.path.dirname(path)
+ base = self.location
+ else:
+ path = self.location
+ base='/'
+
+ for dir in path.lstrip(os.path.sep).rstrip(os.path.sep).split(os.path.sep):
+ base = os.path.join(base,dir)
+ if not os.path.exists(base):
+ um=os.umask(0)
+ try:
+ os.mkdir(base, self._perms | 0111)
+ os.chown(base, -1, self._gid)
+ finally:
+ os.umask(um)
+
+
+def gen_label(base, label):
+ """if supplied label is a path, generate a unique label based upon label, and supplied base path"""
+ if label.find(os.path.sep) == -1:
+ return label
+ label = label.strip("\"").strip("'")
+ label = os.path.join(*(label.rstrip(os.path.sep).split(os.path.sep)))
+ tail = os.path.split(label)[1]
+ return "%s-%X" % (tail, abs(label.__hash__()))
+
diff --git a/portage/cache/metadata.py b/portage/cache/metadata.py
new file mode 100644
index 0000000..d136bf3
--- /dev/null
+++ b/portage/cache/metadata.py
@@ -0,0 +1,110 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/cache/metadata.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+import os, stat
+import fs_template
+import cache_errors
+
+# store the current key order *here*.
+class database(fs_template.FsBased):
+ complete_eclass_entries = False
+ auxdbkey_order=('DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
+ 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
+ 'KEYWORDS', 'INHERITED', 'IUSE', 'CDEPEND',
+ 'PDEPEND', 'PROVIDE')
+
+ def __init__(self, *args, **config):
+ super(database,self).__init__(*args, **config)
+ self.location = os.path.join(self.location,
+ self.label.lstrip(os.path.sep).rstrip(os.path.sep))
+
+ if len(self._known_keys) > len(self.auxdbkey_order):
+ raise Exception("less ordered keys then auxdbkeys")
+ if not os.path.exists(self.location):
+ self._ensure_dirs()
+
+
+ def __getitem__(self, cpv):
+ d = {}
+ try:
+ myf = open(os.path.join(self.location, cpv),"r")
+ for k,v in zip(self.auxdbkey_order, myf):
+ d[k] = v.rstrip("\n")
+ except (OSError, IOError),e:
+ if isinstance(e,IOError) and e.errno == 2:
+ raise KeyError(cpv)
+ raise cache_errors.CacheCorruption(cpv, e)
+
+ try: d["_mtime_"] = os.lstat(os.path.join(self.location, cpv)).st_mtime
+ except OSError, e:raise cache_errors.CacheCorruption(cpv, e)
+ return d
+
+
+ def _setitem(self, cpv, values):
+ s = cpv.rfind("/")
+ fp=os.path.join(self.location,cpv[:s],".update.%i.%s" % (os.getpid(), cpv[s+1:]))
+ try: myf=open(fp, "w")
+ except (OSError, IOError), e:
+ if e.errno == 2:
+ try:
+ self._ensure_dirs(cpv)
+ myf=open(fp,"w")
+ except (OSError, IOError),e:
+ raise cache_errors.CacheCorruption(cpv, e)
+ else:
+ raise cache_errors.CacheCorruption(cpv, e)
+
+
+# try:
+# s = os.path.split(cpv)
+# if len(s[0]) == 0:
+# s = s[1]
+# else:
+# s = s[0]
+# os._ensure_dirs(s)
+#
+# except (OSError, IOError), e:
+
+ myf.writelines( [ values.get(x,"")+"\n" for x in self.auxdbkey_order] )
+ myf.close()
+ self._ensure_access(fp, mtime=values["_mtime_"])
+ #update written. now we move it.
+ new_fp = os.path.join(self.location,cpv)
+ try: os.rename(fp, new_fp)
+ except (OSError, IOError), e:
+ os.remove(fp)
+ raise cache_errors.CacheCorruption(cpv, e)
+
+
+ def _delitem(self, cpv):
+ try:
+ os.remove(os.path.join(self.location,cpv))
+ except OSError, e:
+ if e.errno == 2:
+ raise KeyError(cpv)
+ else:
+ raise cache_errors.CacheCorruption(cpv, e)
+
+
+ def has_key(self, cpv):
+ return os.path.exists(os.path.join(self.location, cpv))
+
+
+ def iterkeys(self):
+ """generator for walking the dir struct"""
+ dirs = [self.location]
+ len_base = len(self.location)
+ while len(dirs):
+ for l in os.listdir(dirs[0]):
+ if l.endswith(".cpickle"):
+ continue
+ p = os.path.join(dirs[0],l)
+ st = os.lstat(p)
+ if stat.S_ISDIR(st.st_mode):
+ dirs.append(p)
+ continue
+ yield p[len_base+1:]
+ dirs.pop(0)
+
diff --git a/portage/cache/multiplex.py b/portage/cache/multiplex.py
new file mode 100644
index 0000000..826cf09
--- /dev/null
+++ b/portage/cache/multiplex.py
@@ -0,0 +1,5 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/cache/multiplex.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
diff --git a/portage/cache/sql_template.py b/portage/cache/sql_template.py
new file mode 100644
index 0000000..1cb11bd
--- /dev/null
+++ b/portage/cache/sql_template.py
@@ -0,0 +1,247 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/cache/sql_template.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+import template, cache_errors
+
+class SQLDatabase(template.database):
+ """template class for RDBM based caches
+
+ This class is designed such that derivatives don't have to change much code, mostly constant strings.
+ _BaseError must be an exception class that all Exceptions thrown from the derived RDBMS are derived
+ from.
+
+ SCHEMA_INSERT_CPV_INTO_PACKAGE should be modified dependant on the RDBMS, as should SCHEMA_PACKAGE_CREATE-
+ basically you need to deal with creation of a unique pkgid. If the dbapi2 rdbms class has a method of
+ recovering that id, then modify _insert_cpv to remove the extra select.
+
+ Creation of a derived class involves supplying _initdb_con, and table_exists.
+ Additionally, the default schemas may have to be modified.
+ """
+
+ SCHEMA_PACKAGE_NAME = "package_cache"
+ SCHEMA_PACKAGE_CREATE = "CREATE TABLE %s (\
+ pkgid INTEGER PRIMARY KEY, label VARCHAR(255), cpv VARCHAR(255), UNIQUE(label, cpv))" % SCHEMA_PACKAGE_NAME
+ SCHEMA_PACKAGE_DROP = "DROP TABLE %s" % SCHEMA_PACKAGE_NAME
+
+ SCHEMA_VALUES_NAME = "values_cache"
+ SCHEMA_VALUES_CREATE = "CREATE TABLE %s ( pkgid integer references %s (pkgid) on delete cascade, \
+ key varchar(255), value text, UNIQUE(pkgid, key))" % (SCHEMA_VALUES_NAME, SCHEMA_PACKAGE_NAME)
+ SCHEMA_VALUES_DROP = "DROP TABLE %s" % SCHEMA_VALUES_NAME
+ SCHEMA_INSERT_CPV_INTO_PACKAGE = "INSERT INTO %s (label, cpv) VALUES(%%s, %%s)" % SCHEMA_PACKAGE_NAME
+
+ _BaseError = ()
+ _dbClass = None
+
+ autocommits = False
+
+ # boolean indicating if the derived RDBMS class supports replace syntax
+ _supports_replace = False
+
+ def __init__(self, label, auxdbkeys, **config):
+ """initialize the instance.
+ derived classes shouldn't need to override this"""
+
+ super(SQLDatabase, self).__init__(label, auxdbkeys, **config)
+
+ config.setdefault("host","127.0.0.1")
+ config.setdefault("autocommit", self.autocommits)
+ self._initdb_con(config)
+
+ self.label = self._sfilter(self.label)
+
+
+ def _dbconnect(self, config):
+ """should be overridden if the derived class needs special parameters for initializing
+ the db connection, or cursor"""
+ self.db = self._dbClass(**config)
+ self.con = self.db.cursor()
+
+
+ def _initdb_con(self,config):
+ """ensure needed tables are in place.
+ If the derived class needs a different set of table creation commands, overload the approriate
+ SCHEMA_ attributes. If it needs additional execution beyond, override"""
+
+ self._dbconnect(config)
+ if not self._table_exists(self.SCHEMA_PACKAGE_NAME):
+ if self.readonly:
+ raise cache_errors.ReadOnlyRestriction("table %s doesn't exist" % \
+ self.SCHEMA_PACKAGE_NAME)
+ try: self.con.execute(self.SCHEMA_PACKAGE_CREATE)
+ except self._BaseError, e:
+ raise cache_errors.InitializationError(self.__class__, e)
+
+ if not self._table_exists(self.SCHEMA_VALUES_NAME):
+ if self.readonly:
+ raise cache_errors.ReadOnlyRestriction("table %s doesn't exist" % \
+ self.SCHEMA_VALUES_NAME)
+ try: self.con.execute(self.SCHEMA_VALUES_CREATE)
+ except self._BaseError, e:
+ raise cache_errors.InitializationError(self.__class__, e)
+
+
+ def _table_exists(self, tbl):
+ """return true if a table exists
+ derived classes must override this"""
+ raise NotImplementedError
+
+
+ def _sfilter(self, s):
+ """meta escaping, returns quoted string for use in sql statements"""
+ return "\"%s\"" % s.replace("\\","\\\\").replace("\"","\\\"")
+
+
+ def _getitem(self, cpv):
+ try: self.con.execute("SELECT key, value FROM %s NATURAL JOIN %s "
+ "WHERE label=%s AND cpv=%s" % (self.SCHEMA_PACKAGE_NAME, self.SCHEMA_VALUES_NAME,
+ self.label, self._sfilter(cpv)))
+ except self._BaseError, e:
+ raise cache_errors.CacheCorruption(self, cpv, e)
+
+ rows = self.con.fetchall()
+
+ if len(rows) == 0:
+ raise KeyError(cpv)
+
+ vals = dict([(k,"") for k in self._known_keys])
+ vals.update(dict(rows))
+ return vals
+
+
+ def _delitem(self, cpv):
+ """delete a cpv cache entry
+ derived RDBM classes for this *must* either support cascaded deletes, or
+ override this method"""
+ try:
+ try:
+ self.con.execute("DELETE FROM %s WHERE label=%s AND cpv=%s" % \
+ (self.SCHEMA_PACKAGE_NAME, self.label, self._sfilter(cpv)))
+ if self.autocommits:
+ self.commit()
+ except self._BaseError, e:
+ raise cache_errors.CacheCorruption(self, cpv, e)
+ if self.con.rowcount <= 0:
+ raise KeyError(cpv)
+ except Exception:
+ if not self.autocommits:
+ self.db.rollback()
+ # yes, this can roll back a lot more then just the delete. deal.
+ raise
+
+ def __del__(self):
+ # just to be safe.
+ self.commit()
+ self.db.close()
+
+ def _setitem(self, cpv, values):
+
+ try:
+ # insert.
+ try: pkgid = self._insert_cpv(cpv)
+ except self._BaseError, e:
+ raise cache_errors.CacheCorruption(cpv, e)
+
+ # __getitem__ fills out missing values,
+ # so we store only what's handed to us and is a known key
+ db_values = []
+ for key in self._known_keys:
+ if values.has_key(key):
+ db_values.append({"key":key, "value":values[key]})
+
+ if len(db_values) > 0:
+ try: self.con.executemany("INSERT INTO %s (pkgid, key, value) VALUES(\"%s\", %%(key)s, %%(value)s)" % \
+ (self.SCHEMA_VALUES_NAME, str(pkgid)), db_values)
+ except self._BaseError, e:
+ raise cache_errors.CacheCorruption(cpv, e)
+ if self.autocommits:
+ self.commit()
+
+ except Exception:
+ if not self.autocommits:
+ try: self.db.rollback()
+ except self._BaseError: pass
+ raise
+
+
+ def _insert_cpv(self, cpv):
+ """uses SCHEMA_INSERT_CPV_INTO_PACKAGE, which must be overloaded if the table definition
+ doesn't support auto-increment columns for pkgid.
+ returns the cpvs new pkgid
+ note this doesn't commit the transaction. The caller is expected to."""
+
+ cpv = self._sfilter(cpv)
+ if self._supports_replace:
+ query_str = self.SCHEMA_INSERT_CPV_INTO_PACKAGE.replace("INSERT","REPLACE",1)
+ else:
+ # just delete it.
+ try: del self[cpv]
+ except (cache_errors.CacheCorruption, KeyError): pass
+ query_str = self.SCHEMA_INSERT_CPV_INTO_PACKAGE
+ try:
+ self.con.execute(query_str % (self.label, cpv))
+ except self._BaseError:
+ self.db.rollback()
+ raise
+ self.con.execute("SELECT pkgid FROM %s WHERE label=%s AND cpv=%s" % \
+ (self.SCHEMA_PACKAGE_NAME, self.label, cpv))
+
+ if self.con.rowcount != 1:
+ raise cache_error.CacheCorruption(cpv, "Tried to insert the cpv, but found "
+ " %i matches upon the following select!" % len(rows))
+ return self.con.fetchone()[0]
+
+
+ def has_key(self, cpv):
+ if not self.autocommits:
+ try: self.commit()
+ except self._BaseError, e:
+ raise cache_errors.GeneralCacheCorruption(e)
+
+ try: self.con.execute("SELECT cpv FROM %s WHERE label=%s AND cpv=%s" % \
+ (self.SCHEMA_PACKAGE_NAME, self.label, self._sfilter(cpv)))
+ except self._BaseError, e:
+ raise cache_errors.GeneralCacheCorruption(e)
+ return self.con.rowcount > 0
+
+
+ def iterkeys(self):
+ if not self.autocommits:
+ try: self.commit()
+ except self._BaseError, e:
+ raise cache_errors.GeneralCacheCorruption(e)
+
+ try: self.con.execute("SELECT cpv FROM %s WHERE label=%s" %
+ (self.SCHEMA_PACKAGE_NAME, self.label))
+ except self._BaseError, e:
+ raise cache_errors.GeneralCacheCorruption(e)
+# return [ row[0] for row in self.con.fetchall() ]
+ for x in self.con.fetchall():
+ yield x[0]
+
+ def commit(self):
+ self.db.commit()
+
+ def get_matches(self,match_dict):
+ query_list = []
+ for k,v in match_dict.items():
+ if k not in self._known_keys:
+ raise cache_errors.InvalidRestriction(k, v, "key isn't known to this cache instance")
+ v = v.replace("%","\\%")
+ v = v.replace(".*","%")
+ query_list.append("(key=%s AND value LIKE %s)" % (self._sfilter(k), self._sfilter(v)))
+
+ if len(query_list):
+ query = " AND "+" AND ".join(query_list)
+ else:
+ query = ''
+
+ print "query = SELECT cpv from package_cache natural join values_cache WHERE label=%s %s" % (self.label, query)
+ try: self.con.execute("SELECT cpv from package_cache natural join values_cache WHERE label=%s %s" % \
+ (self.label, query))
+ except self._BaseError, e:
+ raise cache_errors.GeneralCacheCorruption(e)
+
+ return [ row[0] for row in self.con.fetchall() ]
+
diff --git a/portage/cache/sqlite.py b/portage/cache/sqlite.py
new file mode 100644
index 0000000..925408f
--- /dev/null
+++ b/portage/cache/sqlite.py
@@ -0,0 +1,67 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/cache/sqlite.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+sqlite_module =__import__("sqlite")
+import os
+import sql_template, fs_template
+import cache_errors
+
+class database(fs_template.FsBased, sql_template.SQLDatabase):
+
+ SCHEMA_DELETE_NAME = "delete_package_values"
+ SCHEMA_DELETE_TRIGGER = """CREATE TRIGGER %s AFTER DELETE on %s
+ begin
+ DELETE FROM %s WHERE pkgid=old.pkgid;
+ end;""" % (SCHEMA_DELETE_NAME, sql_template.SQLDatabase.SCHEMA_PACKAGE_NAME,
+ sql_template.SQLDatabase.SCHEMA_VALUES_NAME)
+
+ _BaseError = sqlite_module.Error
+ _dbClass = sqlite_module
+ _supports_replace = True
+
+ def _dbconnect(self, config):
+ self._dbpath = os.path.join(self.location, fs_template.gen_label(self.location, self.label)+".sqldb")
+ try:
+ self.db = sqlite_module.connect(self._dbpath, mode=self._perms, autocommit=False)
+ if not self._ensure_access(self._dbpath):
+ raise cache_errors.InitializationError(self.__class__, "can't ensure perms on %s" % self._dbpath)
+ self.con = self.db.cursor()
+ except self._BaseError, e:
+ raise cache_errors.InitializationError(self.__class__, e)
+
+
+ def _initdb_con(self, config):
+ sql_template.SQLDatabase._initdb_con(self, config)
+ try:
+ self.con.execute("SELECT name FROM sqlite_master WHERE type=\"trigger\" AND name=%s" % \
+ self._sfilter(self.SCHEMA_DELETE_NAME))
+ if self.con.rowcount == 0:
+ self.con.execute(self.SCHEMA_DELETE_TRIGGER);
+ self.db.commit()
+ except self._BaseError, e:
+ raise cache_errors.InitializationError(self.__class__, e)
+
+ def _table_exists(self, tbl):
+ """return true/false dependant on a tbl existing"""
+ try: self.con.execute("SELECT name FROM sqlite_master WHERE type=\"table\" AND name=%s" %
+ self._sfilter(tbl))
+ except self._BaseError, e:
+ # XXX crappy.
+ return False
+ return len(self.con.fetchall()) == 1
+
+ # we can do it minus a query via rowid.
+ def _insert_cpv(self, cpv):
+ cpv = self._sfilter(cpv)
+ try: self.con.execute(self.SCHEMA_INSERT_CPV_INTO_PACKAGE.replace("INSERT","REPLACE",1) % \
+ (self.label, cpv))
+ except self._BaseError, e:
+ raise cache_errors.CacheCorruption(cpv, "tried to insert a cpv, but failed: %s" % str(e))
+
+ # sums the delete also
+ if self.con.rowcount <= 0 or self.con.rowcount > 2:
+ raise cache_errors.CacheCorruption(cpv, "tried to insert a cpv, but failed- %i rows modified" % self.rowcount)
+ return self.con.lastrowid
+
diff --git a/portage/cache/template.py b/portage/cache/template.py
new file mode 100644
index 0000000..3215b4b
--- /dev/null
+++ b/portage/cache/template.py
@@ -0,0 +1,152 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/cache/template.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+import cache_errors, copy
+
+class database(object):
+ # this is for metadata/cache transfer.
+ # basically flags the cache needs be updated when transfered cache to cache.
+ # leave this.
+
+ complete_eclass_entries_ = True
+ autocommits = False
+
+ def __init__(self, location, label, auxdbkeys, readonly=False):
+ """ initialize the derived class; specifically, store label/keys"""
+ self._known_keys = auxdbkeys
+ self.location = location
+ self.label = label
+ self.readonly = readonly
+ self.sync_rate = 0
+ self.updates = 0
+
+
+ def __getitem__(self, cpv):
+ """set a cpv to values
+ This shouldn't be overriden in derived classes since it handles the __eclasses__ conversion.
+ that said, if the class handles it, they can override it."""
+ if self.updates > self.sync_rate:
+ self.commit()
+ self.updates = 0
+ d=self._getitem(cpv)
+ if "_eclasses_" in d:
+ d["_eclasses_"] = reconstruct_eclasses(d["_eclasses_"])
+ return d
+
+ def _getitem(self, cpv):
+ """get cpv's values.
+ override this in derived classess"""
+ raise NotImplementedError
+
+
+ def __setitem__(self, cpv, values):
+ """set a cpv to values
+ This shouldn't be overriden in derived classes since it handles the readonly checks"""
+ if self.readonly:
+ raise cache_errors.ReadOnlyRestriction()
+ d=copy.copy(values)
+ if "_eclasses_" in d:
+ d["_eclasses_"] = serialize_eclasses(d["_eclasses_"])
+ self._setitem(cpv, d)
+ if not self.autocommits:
+ self.updates += 1
+ if self.updates > self.sync_rate:
+ self.commit()
+ self.updates = 0
+
+
+ def _setitem(self, name, values):
+ """__setitem__ calls this after readonly checks. override it in derived classes
+ note _eclassees_ key *must* be handled"""
+ raise NotImplementedError
+
+
+ def __delitem__(self, cpv):
+ """delete a key from the cache.
+ This shouldn't be overriden in derived classes since it handles the readonly checks"""
+ if self.readonly:
+ raise cache_errors.ReadOnlyRestriction()
+ if not self.autocommits:
+ self.updates += 1
+ self._delitem(cpv)
+ if self.updates > self.sync_rate:
+ self.commit()
+ self.updates = 0
+
+
+ def _delitem(self,cpv):
+ """__delitem__ calls this after readonly checks. override it in derived classes"""
+ raise NotImplementedError
+
+
+ def has_key(self, cpv):
+ raise NotImplementedError
+
+
+ def keys(self):
+ return tuple(self.iterkeys())
+
+ def iterkeys(self):
+ raise NotImplementedError
+
+ def sync(self, rate=0):
+ self.sync_rate = rate
+ if(rate == 0):
+ self.commit()
+
+ def commit(self):
+ raise NotImplementedError
+
+ def get_matches(self, match_dict):
+ """generic function for walking the entire cache db, matching restrictions to
+ filter what cpv's are returned. Derived classes should override this if they
+ can implement a faster method then pulling each cpv:values, and checking it.
+
+ For example, RDBMS derived classes should push the matching logic down to the
+ actual RDBM."""
+
+ import re
+ restricts = {}
+ for key,match in match_dict.iteritems():
+ # XXX this sucks.
+ try:
+ if isinstance(match, str):
+ restricts[key] = re.compile(match).match
+ else:
+ restricts[key] = re.compile(match[0],match[1]).match
+ except re.error, e:
+ raise InvalidRestriction(key, match, e)
+ if key not in self.__known_keys:
+ raise InvalidRestriction(key, match, "Key isn't valid")
+
+ for cpv in self.keys():
+ cont = True
+ vals = self[cpv]
+ for key, match in restricts.iteritems():
+ if not match(vals[key]):
+ cont = False
+ break
+ if cont:
+# yield cpv,vals
+ yield cpv
+
+
+def serialize_eclasses(eclass_dict):
+ """takes a dict, returns a string representing said dict"""
+ return "\t".join(["%s\t%s\t%s" % (k, v[0], str(v[1])) for k,v in eclass_dict.items()])
+
+def reconstruct_eclasses(eclass_string):
+ """returns a dict when handed a string generated by serialize_eclasses"""
+ eclasses = eclass_string.rstrip().lstrip().split("\t")
+ if eclasses == [""]:
+ # occasionally this occurs in the fs backends. they suck.
+ return {}
+ if len(eclasses) % 3 != 0:
+ raise cache_errors.CacheCorruption(cpv, "_eclasses_ was of invalid len %i" % len(eclasses))
+ d={}
+ for x in range(0, len(eclasses), 3):
+ d[eclasses[x]] = (eclasses[x + 1], long(eclasses[x + 2]))
+ del eclasses
+ return d
diff --git a/portage/cache/util.py b/portage/cache/util.py
new file mode 100644
index 0000000..9aab6dd
--- /dev/null
+++ b/portage/cache/util.py
@@ -0,0 +1,92 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/cache/util.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+import cache_errors
+
+def mirror_cache(valid_nodes_iterable, src_cache, trg_cache, eclass_cache=None, verbose_instance=None):
+
+ if not src_cache.complete_eclass_entries and not eclass_cache:
+ raise Exception("eclass_cache required for cache's of class %s!" % src_cache.__class__)
+
+ if verbose_instance == None:
+ noise=quiet_mirroring()
+ else:
+ noise=verbose_instance
+
+ dead_nodes = {}
+ dead_nodes.fromkeys(trg_cache.keys())
+ count=0
+
+ if not trg_cache.autocommits:
+ trg_cache.sync(100)
+
+ for x in valid_nodes_iterable:
+# print "processing x=",x
+ count+=1
+ if dead_nodes.has_key(x):
+ del dead_nodes[x]
+ try: entry = src_cache[x]
+ except KeyError, e:
+ noise.missing_entry(x)
+ del e
+ continue
+ if entry.get("INHERITED",""):
+ if src_cache.complete_eclass_entries:
+ if not "_eclasses_" in entry:
+ noise.corruption(x,"missing _eclasses_ field")
+ continue
+ if not eclass_cache.is_eclass_data_valid(entry["_eclasses_"]):
+ noise.eclass_stale(x)
+ continue
+ else:
+ entry["_eclasses_"] = eclass_cache.get_eclass_data(entry["INHERITED"].split(), \
+ from_master_only=True)
+ if not entry["_eclasses_"]:
+ noise.eclass_stale(x)
+ continue
+
+ # by this time, if it reaches here, the eclass has been validated, and the entry has
+ # been updated/translated (if needs be, for metadata/cache mainly)
+ try: trg_cache[x] = entry
+ except cache_errors.CacheError, ce:
+ noise.exception(x, ce)
+ del ce
+ continue
+ if count >= noise.call_update_min:
+ noise.update(x)
+ count = 0
+
+ if not trg_cache.autocommits:
+ trg_cache.commit()
+
+ # ok. by this time, the trg_cache is up to date, and we have a dict
+ # with a crapload of cpv's. we now walk the target db, removing stuff if it's in the list.
+ for key in dead_nodes:
+ try: del trg_cache[key]
+ except cache_errors.CacheError, ce:
+ noise.exception(ce)
+ del ce
+ dead_nodes.clear()
+ del noise
+
+
+class quiet_mirroring(object):
+ # call_update_every is used by mirror_cache to determine how often to call in.
+ # quiet defaults to 2^24 -1. Don't call update, 'cept once every 16 million or so :)
+ call_update_min = 0xffffff
+ def update(self,key,*arg): pass
+ def exception(self,key,*arg): pass
+ def eclass_stale(self,*arg): pass
+ def missing_entry(self, key): pass
+ def misc(self,key,*arg): pass
+ def corruption(self, key, s): pass
+
+class non_quiet_mirroring(quiet_mirroring):
+ call_update_min=1
+ def update(self,key,*arg): print "processed",key
+ def exception(self, key, *arg): print "exec",key,arg
+ def missing(self,key): print "key %s is missing", key
+ def corruption(self,key,*arg): print "corrupt %s:" % key,arg
+ def eclass_stale(self,key,*arg):print "stale %s:"%key,arg
diff --git a/portage/chksum/__init__.py b/portage/chksum/__init__.py
new file mode 100644
index 0000000..ae15b6d
--- /dev/null
+++ b/portage/chksum/__init__.py
@@ -0,0 +1,47 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/chksum/__init__.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+import os
+
+chksum_types = {}
+__inited__ = False
+
+def init(additional_handlers={}):
+ """init the chksum subsystem. scan the dir, find what handlers are available, etc.
+ if desired to register additional, or override existing, pass in a dict of type:func"""
+ import sys, os, logging
+
+ if not isinstance(additional_handlers, dict):
+ raise TypeError("additional handlers must be a dict!")
+
+ chksum_types.clear()
+ __inited__ = False
+ loc = os.path.dirname(sys.modules[__name__].__file__)
+ for f in os.listdir(loc):
+ if not f.endswith(".py") or f.startswith("__init__."):
+ continue
+ try:
+ i = f.find(".")
+ if i != -1: f = f[:i]
+ del i
+ m = __import__(f)
+ except ImportError:
+ continue
+ try:
+ types = getattr(m, "chksum_types")
+ except AttributeError:
+ # no go.
+ continue
+ try:
+ for name, chf in types:
+ chksum_types[name] = chf
+
+ except ValueError:
+ logging.warn("%s.%s invalid chksum_types, ValueError Exception" % (__name__, f))
+ continue
+
+ chksum_types.update(additional_handlers)
+
+ __inited__ = True
diff --git a/portage/chksum/md5hash.py b/portage/chksum/md5hash.py
new file mode 100644
index 0000000..3b136a0
--- /dev/null
+++ b/portage/chksum/md5hash.py
@@ -0,0 +1,29 @@
+# Copyright: 2004-2005 Gentoo Foundation
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/chksum/md5hash.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+
+# We _try_ to load this module. If it fails we do the slow fallback.
+try:
+ import fchksum
+
+ def md5hash(filename, chksum):
+ return fchksum.fmd5t(filename)[0] == chksum
+
+except ImportError:
+ import md5
+ def md5hash(filename, chksum):
+ f = open(filename, 'rb')
+ blocksize=32768
+ data = f.read(blocksize)
+ size = 0L
+ sum = md5.new()
+ while data:
+ sum.update(data)
+ size = size + len(data)
+ data = f.read(blocksize)
+ f.close()
+
+ return sum.hexdigest() == chksum
+
+chksum_types = (("md5", md5hash),)
diff --git a/portage/chksum/sha1hash.py b/portage/chksum/sha1hash.py
new file mode 100644
index 0000000..b0fbea4
--- /dev/null
+++ b/portage/chksum/sha1hash.py
@@ -0,0 +1,21 @@
+# Copyright: 2004-2005 Gentoo Foundation
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/chksum/sha1hash.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+import sha
+
+def sha1hash(filename, chksum):
+ f = open(filename, 'rb')
+ blocksize=32768
+ data = f.read(blocksize)
+ size = 0L
+ sum = sha.new()
+ while data:
+ sum.update(data)
+ size = size + len(data)
+ data = f.read(blocksize)
+ f.close()
+
+ return sum.hexdigest() == chksum
+
+chksum_types = (("sha1", sha1hash),)
diff --git a/portage/config/NewStyle.py b/portage/config/NewStyle.py
new file mode 100644
index 0000000..24cbf5e
--- /dev/null
+++ b/portage/config/NewStyle.py
@@ -0,0 +1,182 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/config/Attic/NewStyle.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+import logging
+import errors
+from portage_const import CONF_DEFAULTS
+from portage.util import modules
+
+# default class settings for types. dict, loaded when needed
+# {type:class} <-- strings.
+default_classes = None
+section_settings = None
+
+class config:
+ """Global Config representation, based around a ConfigParser. Section instantiation occurs on demand"""
+
+ # these are options available to all sections, that have special meaning to the parser, and are filtered
+ # iow, the callable doesn't see it. inherit, type, class are also filtered
+ filter_opts = ["package.keywords", "package.mask", "package.unmask", "package.use"]
+
+ def __init__(self, cparser, parser_config=None):
+ self._cparser = cparser
+ if parser_config == None:
+ parser_config = load_section_settings()
+ self._parser_defaults = parser_config
+ self.__domains = {}
+ # auto exec.
+ s = cparser.sections()
+
+ for x in s:
+ if not c.has_option(x, "type"):
+ continue
+ val = c.get(x, "type").lower()
+ if val == "exec":
+ # do something a bit more.
+ logging.error("skipping section %s of exec type, don't know how to deal with it" % x)
+ c.set(x, "type", val)
+
+
+ def _find_sections(self, type):
+ l = []
+ for x in self._cparser.sections():
+ if not self._cparser.has_option(x, "type'):
+ continue
+ if self._cparser.get(x, "type").lower() == type:
+ l.append(x)
+ return l
+
+ def default_domain(self):
+ l = self.domains()
+ if len(l) == 1:
+ return l[0]
+ d = c.defaults().get("domain")
+ return d
+
+ def get_domain(self, domain):
+ if domain in self.__domains:
+ return self.__domains[domain]
+
+ if not c.has_section(domain) or c.get(domain, 'type') != "domain":
+ raise KeyError("domain %s doesn't exist" % domain)
+
+
+ def load_repositories(self, repositories=None):
+ """instantiate repositories. either load a list of repository names (section titles passed
+ in via repositories=[]), or load all.
+ Chucks KeyError if a requested repository isn't in this config. or
+ portage.repository.errors.BaseException derivatives"""
+
+ repos = self.repositories()
+ if repositories == None:
+ repositories = repos:
+ else:
+ # XXX note this is quadratic.
+ for x in repositories:
+ if x not in repos:
+ raise KeyError(x)
+ for x in repositories:
+ if x not in self._repo_instances:
+ self._repo_instances[x], opts = self._instantiate_section(x)
+ if opts:
+
+
+ def _instantiate_section(self, section, additional_filter_list=[]):
+ """handler for instantiating sections defined by class, using default if available.
+ Throws UndefinedTypeError, InstantiationError
+ returns (obj, filtered opts)"""
+ assert section in self._cparser.sections()
+ confdict = self._colapse_section(section)
+ if not "type" in confdict:
+ raise errors.UndefinedTypeError(section)
+ defaults = load_defaults()
+
+ # pull what's needed, cleaning up confdict in the process
+ type = confdict["type"]
+ del confdict["type"]
+ if "class" in confdict:
+ class = confdict["class"]
+ del confdict["class"]
+ else:
+ if type not in defaults:
+ raise errors.ClassRequired(section, type)
+ class = defaults[type]
+
+ removed_opts = {}
+ for x in self.filter_opts + additional_filter_list:
+ if x in confdict:
+ removed_opts.append(x)
+ del confdict[x]
+ # load callable. must be a callable too, 'coz we check up on it. >:)
+ from inspect import isroutine, isclass
+ callable = load_attribute(class)
+ if not isclass(callable) and not isroutine(callable):
+ raise errors.InstantiationError(class, [], confdict,
+ TypeError("%s is not a class/callable" % type(callable))
+
+ sect_settings = load_section_settings()
+ if type in sect_settings:
+ if 'instantiate' in sect_settings[type]:
+ for x in sect_settings[type]['instantiate'].split():
+ if x in confdict:
+
+ try: obj = callable(**confdict)
+ except Exception, e:
+ if isinstance(e, RuntimeError) or isinstance(e, SystemExit):
+ raise
+ raise errors.InstantiationError(class, [], confdict, e)
+ if obj == None:
+ raise errors.InstantiationError(class, [], confdict,
+ errors.NoObjectReturned(class))
+
+ return obj, removed_opts
+
+ def _collapse_section(self, section, defaults={}):
+ """given a top level section, walks the section's inherit's, returning a dict.
+ defaults if set, must be dict, and are just that, defaults."""
+
+ assert isinstance(defaults, dict)
+ if len(defaults.keys): defaults = defaults.copy()
+
+ slist = [section]
+ while self._cparser.has_option(slist[-1], "inherit"):
+ newsect = self._cparser.get(slist[-1], "inherit")
+ if not self._cparser.has_section(newsect):
+ raise errors.InheritError(slist[-1], newsect)
+ slist.append(newsect)
+
+ # walk list in reverse, pullints items working way down the list.
+ while len(slist):
+ d = self._cparser.items(slist[-1])
+ # do whatever mangling would occur, here (cleansing inherit fex)
+ defaults.update(d)
+ slist.pop(-1)
+ if "inherit" in d: del d["inherit"]
+ return d
+
+ def domains(self):
+ return self._find_sections("domain")
+
+ def repositories(self):
+ return self._find_sections("repo")
+
+ def configs(self):
+ return self._find_sections("config")
+
+
+def load_section_settings():
+ """load iff needed default class definitions, returning dict of type:class"""
+ global section_settings
+ if section_settings != None:
+ return section_settings
+ c=ConfigParser()
+ c.read(CONF_DEFAULT)
+ for x in c.sections():
+ d2 = c.items(x)
+ if len(d2.keys()):
+ ds[x] = d2
+ section_settings = ds
+ return section_settings
+
diff --git a/portage/config/__init__.py b/portage/config/__init__.py
new file mode 100644
index 0000000..ad7ad1d
--- /dev/null
+++ b/portage/config/__init__.py
@@ -0,0 +1,19 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/config/__init__.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+import ConfigParser
+import central, os
+from portage.const import DEFAULT_CONF_FILE
+
+def load_config(file=DEFAULT_CONF_FILE):
+ c = ConfigParser.ConfigParser()
+ if os.path.isfile(file):
+ c.read(file)
+ c = central.config(c)
+ else:
+ # make.conf...
+ raise Exception("sorry, default '%s' doesn't exist, and I don't like make.conf currently (I'm working out my issues however)" %
+ file)
+ return c
diff --git a/portage/config/central.py b/portage/config/central.py
new file mode 100644
index 0000000..813b99b
--- /dev/null
+++ b/portage/config/central.py
@@ -0,0 +1,280 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/config/central.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+import errors
+from portage.const import CONF_DEFAULTS
+from portage.util.modules import load_attribute
+from ConfigParser import ConfigParser
+import new
+from portage.util.dicts import LazyValDict
+from portage.util.currying import curry
+
+class config:
+ def __init__(self, cparser, conf_defaults=CONF_DEFAULTS):
+ self._cparser = cparser
+ self.type_handler = load_conf_definitions(conf_defaults)
+ self.type_conversions = {}
+ # add auto exec shit
+ # weakref .instantiated?
+ self.instantiated = {}
+ for t in self.type_handler:
+ from string import lower
+ for x in ("required", "incrementals", "defaults", "section_ref", "positional"):
+ self.type_handler[t][x] = tuple(map(lower, list_parser(self.type_handler[t].get(x,""))))
+
+ conversions = {}
+ for x,f in (("list", list_parser), ("str", str_parser), ("bool", bool_parser)):
+ if x in self.type_handler[t]:
+ for y in list_parser(self.type_handler[t][x]):
+ conversions[y.lower()] = f
+ del self.type_handler[t][x]
+
+ if "positional" in self.type_handler[t]:
+ for x in self.type_handler[t]["positional"]:
+ if x not in self.type_handler[t]["required"]:
+ raise errors.BrokenSectionDefinition(t, "position '%s' is not listed in required" % x)
+
+ conversions["_default_"] = str_parser
+ self.type_conversions[t] = conversions
+
+ # cleanup, covering ass of default conf definition author (same parsing rules applied to conf, applied to default)
+ # without this, it's possible for a section definitions conf to make errors appear via defaults in a config, if
+ # the section definition is fscked.
+ # work out an checkup of defaults + conversions for config definition, right now we allow that possibility
+ # to puke while inspecting user config (the passed in cparser)
+# for k,v in self.type_handler[t].items():
+# try:
+# self.type_handler[t][k] = str_parser(v)
+# except errors.QuoteInterpretationError, qe:
+# qe.var = v
+# raise qe
+ setattr(self, t, LazyValDict(curry(self.sections, t), self.instantiate_section))
+
+ def collapse_config(self, section, verify=True):
+ """collapse a section's config down to a dict for use in instantiating that section.
+ verify controls whether sanity checks specified by the section type are enforced.
+ required and section_ref fex, are *not* verified if this is False."""
+ if not self._cparser.has_section(section):
+ raise KeyError("%s not a valid section" % section)
+ if not self._cparser.has_option(section, "type"):
+ raise errors.UnknownTypeRequired("Not set")
+ type = str_parser(self._cparser.get(section, "type"))
+ if not self.type_handler.has_key(type):
+ raise errors.UnknownTypeRequired(type)
+
+ slist = [section]
+
+ # first map out inherits.
+ i=0;
+ while i < len(slist):
+ if self._cparser.has_option(slist[i], "inherit"):
+ slist.extend(list_parser(self._cparser.get(slist[i], "inherit")))
+ if not self._cparser.has_section(slist[i]):
+ raise errors.InheritError(slist[i-1], slist[i])
+ i+=1
+ # collapse, honoring incrementals.
+ # remember that inherit's are l->r. So the slist above works with incrementals,
+ # and default overrides (doesn't look it, but it does. tree isn't needed, list suffices)
+ incrementals = self.type_handler[type]["incrementals"]
+ conversions = self.type_conversions[type]
+
+ cleanse_inherit = len(slist) > 1
+
+ d={}
+ default_conversion = conversions["_default_"]
+ while len(slist):
+ d2 = dict(self._cparser.items(slist[-1]))
+ # conversions, baby.
+ for x in d2.keys():
+ try:
+ # note get ain't a tertiary op. so, this is the same as the equivalent contains/exec else
+ # find default/exec struct.
+ d2[x] = conversions.get(x, default_conversion)(d2[x])
+ except errors.QuoteInterpretationError, qe:
+ qe.var = v;
+ raise qe
+ for x in incrementals:
+ if x in d2 and x in d:
+ d2[x] = d[x] + d2[x]
+
+ d.update(d2)
+ slist.pop(-1)
+
+ if cleanse_inherit:
+ del d["inherit"]
+
+ d["type"] = type
+ default_conversion = conversions["_default_"]
+ for x in self.type_handler[type]["defaults"]:
+ if x not in d:
+ if x == "label":
+ d[x] = section
+ continue
+ # XXX yank the checks later, see __init__ for explanation of default + conversions + section conf possibility
+ try:
+ d[x] = conversions.get(x, default_conversion)(self.type_handler[type][x])
+ except errors.QuoteInterpretationError, qe:
+ qe.var = v;
+ raise qe
+
+ if verify:
+ for var in self.type_handler[type]["required"]:
+ if var not in d:
+ raise errors.RequiredSetting(type, section, var)
+ for var in self.type_handler[type]["section_ref"]:
+ if var in d:
+ if isinstance(d[var], list):
+ for sect_label in d[var]:
+ if not self._cparser.has_section(sect_label):
+ raise errors.SectionNotFound(section, var, sect_label)
+ elif not self._cparser.has_section(d[var]):
+ raise errors.SectionNotFound(section, var, sect_label)
+
+ return d
+
+ def instantiate_section(self, section, conf=None, allow_reuse=True):
+ """make a section config into an actual object.
+ if conf is specified, allow_reuse is forced to false.
+ if conf isn't specified, it's pulled via collapse_config
+ allow_reuse is used for controlling whether existing instantiations of that section can be reused or not."""
+ if not self._cparser.has_section(section):
+ raise KeyError("%s not a valid section" % section)
+ if conf == None:
+ if section in self.instantiated:
+ return self.instantiated[section]
+ conf = self.collapse_config(section)
+ else:
+ allow_reuse = False
+
+ if "type" not in conf:
+ raise errors.UnknownTypeRequired(section)
+ type = conf["type"]
+ del conf["type"]
+
+ if "class" not in conf:
+ raise errors.ClassRequired(section, type)
+ cls_name = conf["class"]
+ del conf["class"]
+
+ callable = load_attribute(cls_name)
+ from inspect import isclass, isroutine
+ if not (isclass(callable) or isroutine(callable)):
+ raise errors.InstantiationError(cls_name, [], conf,
+ TypeError("%s is not a class/callable" % type(callable)))
+
+ if "instantiate" in self.type_handler[type]:
+ inst = load_attribute(self.type_handler[type]["instantiate"])
+ if not (isclass(inst) or isroutine(inst)):
+ raise errors.InstantiationError(self.type_handler[type]["instantiate"], [], conf,
+ TypeError("%s is not a class/callable" % type(inst)))
+ else:
+ inst = None
+ # instantiate all section refs.
+ for var in self.type_handler[type]["section_ref"]:
+ if var in conf:
+ if isinstance(conf[var], list):
+ for x in range(0, len(conf[var])):
+ conf[var][x] = self.instantiate_section(conf[var][x])
+ else:
+ conf[var] = self.instantiate_section(conf[var])
+ pargs = []
+ for var in self.type_handler[type]["positional"]:
+ pargs.append(conf[var])
+ del conf[var]
+ try:
+ if inst != None:
+ obj=inst(self, callable, section, conf)
+ else:
+ obj=callable(*pargs, **conf)
+ except Exception, e:
+ if isinstance(e, RuntimeError) or isinstance(e, SystemExit) or isinstance(e, errors.InstantiationError):
+ raise
+ #else wrap and chuck.
+ raise errors.InstantiationError(cls_name, [], conf, e)
+
+ if obj == None:
+ raise errors.InstantiationError(cls_name, [], conf, errors.NoObjectReturned(cls_name))
+
+ if allow_reuse:
+ self.instantiated[section] = obj
+
+ return obj
+
+ def sections(self, type=None):
+ if type==None:
+ return self._cparser.sections()
+ l=[]
+ for x in self.sections():
+ if self._cparser.has_option(x, "type") and self._cparser.get(x,"type") == type:
+ l.append(x)
+ return l
+
+def load_conf_definitions(loc):
+ c = ConfigParser()
+ c.read(loc)
+ d = {}
+ for x in c.sections():
+ d[x] = dict(c.items(x))
+ del c
+ return d
+
+
+def list_parser(s):
+ """split on whitespace honoring quoting for new tokens"""
+ l=[]
+ i = 0
+ e = len(s)
+ while i < e:
+ if not s[i].isspace():
+ if s[i] in ("'", '"'):
+ q = i
+ i += 1
+ while i < e and s[i] != s[q]:
+ if s[i] == '\\':
+ i+=2
+ else:
+ i+=1
+ if i >= e:
+ raise errors.QuoteInterpretationError(s)
+ l.append(s[q+1:i])
+ else:
+ start = i
+ while i < e and not (s[i].isspace() or s[i] in ("'", '"')):
+ if s[i] == '\\':
+ i+=2
+ else:
+ i+=1
+ if i < e and s[i] in ("'", '"'):
+ raise errors.QuoteInterpretationError(s)
+ l.append(s[start:i])
+ i+=1
+ return l
+
+def str_parser(s):
+ """yank leading/trailing whitespace and quotation, along with newlines"""
+ i=0
+ l = len(s)
+ while i < l and s[i].isspace():
+ i+=1
+ if i < l and s[i] in ("'",'"'):
+ i+=1
+ e=l
+ while e > i and s[e - 1].isspace():
+ e-=1
+ if e > i and s[e - 1] in ("'", '"'):
+ e-=1
+ if e > i:
+ s=s[i:e]
+ i = 0; e = len(s) - 1
+ while i < e:
+ if s[i] in ("\n", "\t"):
+ s[i] = ' '
+ i+=1
+ return s
+ else:
+ return ''
+
+def bool_parser(s):
+ return bool(str_parser(s))
diff --git a/portage/config/domain.py b/portage/config/domain.py
new file mode 100644
index 0000000..0fa62d4
--- /dev/null
+++ b/portage/config/domain.py
@@ -0,0 +1,12 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/config/domain.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+
+class domain:
+ def __init__(self, config):
+ self.__master = config
+
+ def load_all_repositories(self):
+
diff --git a/portage/config/errors.py b/portage/config/errors.py
new file mode 100644
index 0000000..caaec19
--- /dev/null
+++ b/portage/config/errors.py
@@ -0,0 +1,72 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/config/errors.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+# potentially use an intermediate base for user config errors, seperate base for instantiation?
+class BaseException(Exception):
+ pass
+
+class InheritError(BaseException):
+ """Inherit target was not found"""
+ def __init__(self, baseSect, trgSect):
+ self.base, self.trg = baseSect, trgSect
+ def __str__(self):
+ return "Section %s inherits %s, but %s wasn't found" % (self.file, self.base, self.trg)
+
+class ClassRequired(BaseException):
+ """Section type requires a class, but one wasn't specified"""
+ def __init__(self, sectname, type):
+ self.name, type = sectname, type
+ def __str__(self):
+ return "Section %s doesn't define a class setting, but type '%s' requires it" % (self.name, self.type)
+
+class UnknownTypeRequired(BaseException):
+ """Section was requested it be instantiated, but lacked a known type (type required for everything but conf grouppings)"""
+ def __init__(self, sectname):
+ self.name = sectname
+ def __str__(self):
+ return "Section %s cannot be instantiated, since it lacks a type setting" % self.name
+
+class InstantiationError(BaseException):
+ """Exception occured during instantiation. Actual exception is stored in instance.exc"""
+ def __init__(self, callablename, pargs, kwargs, exception):
+ self.callable, self.pargs, self.kwargs, self.exc = callablename, pargs, kwargs, exception
+ def __str__(self):
+ return "Caught exception '%s' instantiating %s" % (str(self.exc), self.callable)
+
+class NoObjectReturned(BaseException):
+ """instantiating a callable, but either None or nothing was returned"""
+ def __init__(self, callable):
+ self.callable = callable
+ def __str__(self):
+ return "No object was returned from callable '%s'" % self.callable
+
+class QuoteInterpretationError(BaseException):
+ """Quoting of a var was screwed up."""
+ def __init__(self, s, v=None):
+ self.str, self.var = s, v
+ def __str__(self):
+ return "Parsing of var '%s' \n%s\n failed" % (str(self.var), s)
+
+class RequiredSetting(BaseException):
+ """A setting is required for this type, but not set in the config"""
+ def __init__(self, type, section, setting):
+ self.type, self.section, self.setting = type, section, setting
+ def __str__(self):
+ return "Type %s requires '%s' to be defined, but no setting found in '%s'" % (self.type, self.setting, self.section)
+
+class SectionNotFound(BaseException):
+ """A specified section label was not found"""
+ def __init__(self, section, var, requested):
+ self.section, self.var, self.requested = section, var, requested
+ def __str__(self):
+ return "Section %s references section '%s' in setting '%s', but it doesn't (yet?) exist" % \
+ (self.section, self.requested, self.var)
+
+class BrokenSectionDefinition(BaseException):
+ """The conf that defines sections is invalid in some respect"""
+ def __init__(self, section, errormsg):
+ self.section, self.errmsg = section, errormsg
+ def __str__(self):
+ return "Section '%s' definition: error %s" % (self.section, self.errmsg)
diff --git a/portage/ebuild/__init__.py b/portage/ebuild/__init__.py
new file mode 100644
index 0000000..e527a65
--- /dev/null
+++ b/portage/ebuild/__init__.py
@@ -0,0 +1,5 @@
+import ebuild_package
+import ebuild_repository
+repository = ebuild_repository.tree
+package = ebuild_package.ebuild_package
+package_factory = ebuild_package.ebuild_factory
diff --git a/portage/ebuild/ebuild_buildable.py b/portage/ebuild/ebuild_buildable.py
new file mode 100644
index 0000000..cb9331f
--- /dev/null
+++ b/portage/ebuild/ebuild_buildable.py
@@ -0,0 +1,57 @@
+import package.buildable
+import os
+
+import portage_dep, portage_util
+
+from ebuild_internal import ebuild_handler
+
+class ebuild_buildable(package.buildable.package):
+ _metadata_pulls=("DEPEND",)
+
+ def __init__(self, *args, **kwargs):
+ super(ebuild_buildable, self).__init__(*args, **kwargs)
+ dir(self)
+ self.root = self._config["ROOT"]
+ self._build_env = {
+ "ROOT":self._config["ROOT"], "EBUILD":self.path,
+ "CATEGORY":self.category, "PF":self.cpvstr,
+ "P":"%s-%s" % (self.package, self.version),
+ "PN":self.package, "PV":self.version,
+ "PR":"-r%i" % self.revision, "PVR":self.fullver,
+ "FILESDIR":os.path.dirname(os.path.join(self.path,"files")),
+ "BUILD_PREFIX":os.path.join(self._config["PORTAGE_TMPDIR"],"portage"),
+ "ROOT":self._config["ROOT"],
+ }
+
+ self._build_env["BUILDDIR"] = os.path.join(self._build_env["BUILD_PREFIX"], self._build_env["PF"])
+ for x,y in (("T","temp"),("WORKDIR","work"), ("D","image")):
+ self._build_env[x] = os.path.join(self._build_env["BUILDDIR"], y)
+
+ self.URI = portage_dep.paren_reduce(self.SRC_URI)
+ uf = self._config["USE"].split()
+ self.ALL_URI = portage_util.flatten(portage_dep.use_reduce(self.URI,uselist=uf,matchall=True))
+ self.URI = portage_util.flatten(portage_dep.use_reduce(self.URI,uselist=uf,matchall=False))
+
+
+ def _setup(self):
+ self.ebd = ebuild_handler(self._config)
+ return self.ebd.process_phase("setup", self.path, self.root, self)
+
+ def _fetch(self):
+ return self.ebd.process_phase("fetch", self.path, self.root, self)
+
+ def _unpack(self):
+ return self.ebd.process_phase("unpack", self.path, self.root, self)
+
+ def _configure(self):
+ return True
+
+ def _compile(self):
+ return self.ebd.process_phase("compile", self.path, self.root, self)
+
+ def _install(self):
+ return self.ebd.process_phase("install", self.path, self.root, self)
+
+ def _clean(self):
+ return self.ebd.process_phase("clean", self.path, self.root, self)
+
diff --git a/portage/ebuild/ebuild_internal.py b/portage/ebuild/ebuild_internal.py
new file mode 100644
index 0000000..7f86430
--- /dev/null
+++ b/portage/ebuild/ebuild_internal.py
@@ -0,0 +1,1265 @@
+#!/usr/bin/python
+# ebuild.py; Ebuild classes/abstraction of phase processing, and communicating with a ebuild-daemon.sh instance
+# Copyright 2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+#$Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/ebuild/ebuild_internal.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+
+import os,sys,traceback
+import portage_const,types
+#still needed?
+from portage_const import *
+import portage_locks, portage_util
+import portage_exec
+import portage_versions
+import shutil, anydbm
+import stat
+import string
+
+def shutdown_all_processors():
+ """kill off all known processors"""
+ global active_ebp_list, inactive_ebp_list
+ if type(active_ebp_list) != types.ListType:
+ print "warning, ebuild.active_ebp_list wasn't a list."
+ active_ebp_list = []
+ if type(inactive_ebp_list) != types.ListType:
+ print "warning, ebuild.inactive_ebp_list wasn't a list."
+ inactive_ebp_list = []
+ while len(active_ebp_list) > 0:
+ try: active_ebp_list[0].shutdown_processor()
+ except (IOError,OSError):
+ active_ebp_list.pop(0)
+ continue
+ try: active_ebp_list.pop(0)
+ except IndexError: pass
+ while len(inactive_ebp_list) > 0:
+ try:
+ inactive_ebp_list[0].shutdown_processor()
+ except (IOError,OSError):
+ inactive_ebp_list.pop(0)
+ continue
+ try: inactive_ebp_list.pop(0)
+ except IndexError: pass
+
+
+inactive_ebp_list = []
+active_ebp_list = []
+
+def request_ebuild_processor(config,ebuild_daemon_path=portage_const.EBUILD_DAEMON_PATH,userpriv=False, \
+ sandbox=None,fakeroot=False,save_file=None):
+ """request an ebuild_processor instance from the pool, or create a new one
+ this walks through the requirements, matching a inactive processor if one exists
+ note fakerooted processors are never reused, do to the nature of fakeroot"""
+
+ if sandbox == None:
+ sandbox = portage_exec.sandbox_capable
+
+ global inactive_ebp_list, active_ebp_list
+ if not fakeroot:
+ for x in inactive_ebp_list:
+ if not x.locked() and x.ebd == ebuild_daemon_path and \
+ x.userprived() == userpriv and (x.sandboxed() or not sandbox):
+ inactive_ebp_list.remove(x)
+ active_ebp_list.append(x)
+ return x
+ active_ebp_list.append(ebuild_processor(config, userpriv=userpriv,sandbox=sandbox,fakeroot=fakeroot,save_file=save_file))
+ return active_ebp_list[-1]
+
+def release_ebuild_processor(ebp):
+ """the inverse of request_ebuild_processor. Any processor requested via request_ebuild_processor
+ _must_ be released via this function once it's no longer in use.
+ this includes fakerooted processors.
+ Returns True exempting when the processor requested to be released isn't marked as active"""
+
+ global inactive_ebp_list, active_ebp_list
+ try: active_ebp_list.remove(ebp)
+ except ValueError: return False
+
+ try: inactive_ebp_list.index(ebp)
+ except ValueError:
+ # if it's a fakeroot'd process, we throw it away. it's not useful outside of a chain of calls
+ if not ebp.onetime():
+ inactive_ebp_list.append(ebp)
+ else:
+ del ebp
+ return True
+
+ # if it makes it this far, that means ebp was already in the inactive list.
+ # which is indicative of an internal fsck up.
+ import traceback
+ print "ebp was requested to be free'd, yet it already is claimed inactive _and_ was in the active list"
+ print "this means somethings horked, badly"
+ traceback.print_stack()
+ return False
+
+
+
+class ebuild_processor:
+ """abstraction of a running ebuild.sh instance- the env, functions, etc that ebuilds expect."""
+ def __init__(self, config, ebuild_daemon_path=portage_const.EBUILD_DAEMON_PATH,userpriv=False, sandbox=True, \
+ fakeroot=False,save_file=None):
+ """ebuild_daemon_path shouldn't be fooled with unless the caller knows what they're doing.
+ sandbox enables a sandboxed processor
+ userpriv enables a userpriv'd processor
+ fakeroot enables a fakeroot'd processor- this is a mutually exclusive option to sandbox, and
+ requires userpriv to be enabled. Violating this will result in nastyness"""
+
+ self._config = config
+ self.ebd = ebuild_daemon_path
+ from portage_data import portage_uid, portage_gid
+ spawn_opts = {}
+
+ if fakeroot and (sandbox or not userpriv):
+ import traceback
+ traceback.print_stack()
+ print "warning, was asking to enable fakeroot but-"
+ print "sandbox",sandbox,"userpriv",userpriv
+ print "this isn't valid. bailing"
+ raise Exception,"cannot initialize with sandbox and fakeroot"
+
+ if userpriv:
+ self.__userpriv = True
+ spawn_opts.update({"uid":portage_uid,"gid":portage_gid,"groups":[portage_gid],"umask":002})
+ else:
+ if portage_exec.userpriv_capable:
+ spawn_opts.update({"gid":portage_gid,"groups":[0,portage_gid]})
+ self.__userpriv = False
+
+ # open the pipes to be used for chatting with the new daemon
+ cread, cwrite = os.pipe()
+ dread, dwrite = os.pipe()
+ self.__sandbox = False
+ self.__fakeroot = False
+
+ # since it's questionable which spawn method we'll use (if sandbox or fakeroot fex),
+ # we ensure the bashrc is invalid.
+ env={"BASHRC":"/etc/portage/spork/not/valid/ha/ha"}
+ args = []
+ if sandbox:
+ if fakeroot:
+ print "!!! ERROR: fakeroot was on, but sandbox was also on"
+ sys.exit(1)
+ self.__sandbox = True
+ spawn_func = portage_exec.spawn_sandbox
+ env.update({"SANDBOX_DEBUG":"1","SANDBOX_DEBUG_LOG":"/var/tmp/test"})
+
+ elif fakeroot:
+ self.__fakeroot = True
+ spawn_func = portage_exec.spawn_fakeroot
+ args.append(save_file)
+ else:
+ spawn_func = portage_exec.spawn
+
+ self.pid = spawn_func(self.ebd+" daemonize", fd_pipes={0:0, 1:1, 2:2, 3:cread, 4:dwrite},
+ returnpid=True,env=env, *args, **spawn_opts)[0]
+
+ os.close(cread)
+ os.close(dwrite)
+ self.ebd_write = os.fdopen(cwrite,"w")
+ self.ebd_read = os.fdopen(dread,"r")
+
+ # basically a quick "yo" to the daemon
+ self.write("dude?")
+ if not self.expect("dude!"):
+ print "error in server coms, bailing."
+ raise Exception("expected 'dude!' response from ebd, which wasn't received. likely a bug")
+ if self.__sandbox:
+ self.write("sandbox_log?")
+ self.__sandbox_log = self.read().split()[0]
+ self.dont_export_vars=self.read().split()
+ # locking isn't used much, but w/ threading this will matter
+ self.unlock()
+
+ def sandboxed(self):
+ """is this instance sandboxed?"""
+ return self.__sandbox
+
+ def userprived(self):
+ """is this instance userprived?"""
+ return self.__userpriv
+
+ def fakerooted(self):
+ """is this instance fakerooted?"""
+ return self.__fakeroot
+
+ def onetime(self):
+ """is this instance going to be discarded after usage; eg is it fakerooted?"""
+ return self.__fakeroot
+
+ def write(self, string,flush=True):
+ """talk to running daemon. Disabling flush is useful when dumping large amounts of data
+ all strings written are automatically \\n terminated"""
+ if string[-1] == "\n":
+ self.ebd_write.write(string)
+ else:
+ self.ebd_write.write(string +"\n")
+ if flush:
+ self.ebd_write.flush()
+
+ def expect(self, want):
+ """read from the daemon, and return true or false if the returned string is what is expected"""
+ got=self.ebd_read.readline()
+ return want==got[:-1]
+
+ def read(self,lines=1):
+ """read data from the daemon. Shouldn't be called except internally"""
+ mydata=''
+ while lines > 0:
+ mydata += self.ebd_read.readline()
+ lines -= 1
+ return mydata
+
+ def sandbox_summary(self, move_log=False):
+ """if the instance is sandboxed, print the sandbox access summary"""
+ if not os.path.exists(self.__sandbox_log):
+ self.write("end_sandbox_summary")
+ return 0
+ violations=portage_util.grabfile(self.__sandbox_log)
+ if len(violations)==0:
+ self.write("end_sandbox_summary")
+ return 0
+ if not move_log:
+ move_log=self.__sandbox_log
+ elif move_log != self.__sandbox_log:
+ myf=open(move_log)
+ for x in violations:
+ myf.write(x+"\n")
+ myf.close()
+ from output import red
+ self.ebd_write.write(red("--------------------------- ACCESS VIOLATION SUMMARY ---------------------------")+"\n")
+ self.ebd_write.write(red("LOG FILE = \"%s\"" % move_log)+"\n\n")
+ for x in violations:
+ self.ebd_write.write(x+"\n")
+ self.write(red("--------------------------------------------------------------------------------")+"\n")
+ self.write("end_sandbox_summary")
+ try:
+ os.remove(self.__sandbox_log)
+ except (IOError, OSError), e:
+ print "exception caught when cleansing sandbox_log=%s" % str(e)
+ return 1
+
+ def preload_eclasses(self, ec_file):
+ """this preloades eclasses into a function, thus avoiding the cost of going to disk.
+ preloading eutils (which is heaviliy inherited) speeds up regen times fex"""
+ if not os.path.exists(ec_file):
+ return 1
+ self.write("preload_eclass %s" % ec_file)
+ if self.expect("preload_eclass succeeded"):
+ self.preloaded_eclasses=True
+ return True
+ return False
+
+ def lock(self):
+ """lock the processor. Currently doesn't block any access, but will"""
+ self.processing_lock = True
+
+ def unlock(self):
+ """unlock the processor"""
+ self.processing_lock = False
+
+ def locked(self):
+ """is the processor locked?"""
+ return self.processing_lock
+
+ def is_alive(self):
+ """returns if it's known if the processor has been shutdown.
+ Currently doesn't check to ensure the pid is still running, yet it should"""
+ return self.pid != None
+
+ def shutdown_processor(self):
+ """tell the daemon to shut itself down, and mark this instance as dead"""
+ try:
+ if self.is_alive():
+ self.write("shutdown_daemon")
+ self.ebd_write.close()
+ self.ebd_read.close()
+
+ # now we wait.
+ os.waitpid(self.pid,0)
+ except (IOError,OSError,ValueError):
+ pass
+
+ # we *really* ought to modify portageatexit so that we can set limits for waitpid.
+ # currently, this assumes all went well.
+ # which isn't always true.
+ self.pid = None
+
+ def set_sandbox_state(self,state):
+ """tell the daemon whether to enable the sandbox, or disable it"""
+ if state:
+ self.write("set_sandbox_state 1")
+ else:
+ self.write("set_sandbox_state 0")
+
+ def send_env(self):
+ """essentially transfer the ebuild's desired env to the running daemon
+ accepts a portage.config instance, although it will accept dicts at some point"""
+ be=self._config.bash_environ()
+ self.write("start_receiving_env\n")
+ exported_keys = ''
+ for x in be.keys():
+ if x not in self.dont_export_vars:
+ self.write("%s=%s\n" % (x,be[x]), flush=False)
+ exported_keys += x+' '
+ self.write("export "+exported_keys,flush=False)
+ self.write("end_receiving_env")
+ return self.expect("env_received")
+
+ def set_logfile(self,logfile=''):
+ """relevant only when the daemon is sandbox'd, set the logfile"""
+ self.write("logging %s" % logfile)
+ return self.expect("logging_ack")
+
+
+ def __del__(self):
+ """simply attempts to notify the daemon to die"""
+ # for this to be reached means we ain't in a list no more.
+ if self.pid:
+ self.shutdown_processor()
+
+
+class ebuild_handler:
+ """abstraction of ebuild phases, fetching exported keys, fetching srcs, etc"""
+ import portageq
+ def __init__(self, config, process_limit=5):
+ """process_limit is currently ignored"""
+ self.processed = 0
+ self.__process_limit = process_limit
+ self.preloaded_eclasses = False
+ self._config = config
+ self.__ebp = None
+
+ def __del__(self):
+ """only ensures any processors this handler has claimed are released"""
+ if self.__ebp:
+ release_ebuild_processor(self.__ebp)
+
+ # this is an implementation of stuart's confcache/sandbox trickery, basically the file/md5 stuff implemented in
+ # python with a basic bash wrapper that calls back to this.
+ # all credit for the approach goes to him, as stated, this is just an implementation of it.
+ # bugs should be thrown at ferringb.
+ def load_confcache(self,transfer_to,confcache=portage_const.CONFCACHE_FILE,
+ confcache_list=portage_const.CONFCACHE_LIST):
+ """verifys a requested conf cache, removing the global cache if it's stale.
+ The handler should be the only one to call this"""
+ from portage_checksum import perform_md5
+ from output import red
+ if not self.__ebp:
+ import traceback
+ traceback.print_stack()
+ print "err... no ebp, yet load_confcache called. invalid"
+ raise Exception,"load_confcache called yet no running processor. bug?"
+
+ valid=True
+ lock=None
+ if not os.path.exists(confcache_list):
+ print "confcache file listing doesn't exist"
+ valid=False
+ elif not os.path.exists(confcache):
+ print "confcache doesn't exist"
+ valid=False
+ else:
+ lock=portage_locks.lockfile(confcache_list,wantnewlockfile=1)
+ try:
+ myf=anydbm.open(confcache_list, "r", 0664)
+ for l in myf.keys():
+ # file, md5
+ if perform_md5(l,calc_prelink=1) != myf[l]:
+ print red("***")+" confcache is stale: %s: recorded md5: %s: actual: %s:" % (l,myf[l],perform_md5(l,calc_prelink=1))
+ raise Exception("md5 didn't match")
+ myf.close()
+ # verify env now.
+ new_cache=[]
+ env_vars=[]
+
+ # guessing on THOST. I'm sure it's wrong...
+
+ env_translate={"build_alias":"CBUILD","host_alias":"CHOST","target_alias":"THOST"}
+ cache=portage_util.grabfile(confcache)
+
+ x=0
+ while x < len(cache):
+ #ac_cv_env
+ if cache[x][0:10] == "ac_cv_env_":
+ f=cache[x][10:].find("_set")
+ if f == -1 or f==11:
+ cache.pop(x)
+ continue
+ env_vars.append(cache[x][10:10 + cache[x][10:].find("_set")])
+ x += 1
+ else:
+ new_cache.append(cache[x])
+ x += 1
+
+ for x in env_vars:
+ self.__ebp.write("request %s" % env_translate.get(x,x))
+ line=self.__ebp.read()
+ if line[-1] == "\n":
+ line=line[:-1]
+ new_cache.append("ac_cv_env_%s_set=%s" % (x, line))
+ if line == "unset":
+ new_cache.append("ac_cv_env_%s_value=" % x)
+ else:
+ line=self.__ebp.read()
+ if line[-1] == "\n":
+ line=line[:-1]
+ if line.split()[0] != line:
+ #quoting... XXX
+ new_cache.append("ac_cv_env_%s_value='%s'" % (x,line))
+ else:
+ new_cache.append("ac_cv_env_%s_value=%s" % (x,line))
+
+ myf=open(confcache,"w")
+ for x in new_cache:
+ myf.write(x+"\n")
+ myf.close()
+
+ except SystemExit, e:
+ raise
+ except Exception,e:
+ print "caught exception: %s" % str(e)
+ try: myf.close()
+ except (IOError, OSError): pass
+ valid=False
+
+ if not valid:
+ print "\nconfcache is invalid\n"
+ try: os.remove(confcache_list)
+ except OSError: pass
+ try: os.remove(confcache)
+ except OSError: pass
+ self.__ebp.write("empty")
+ valid=0
+ else:
+ self.__ebp.write("location: %s" % confcache)
+ valid=1
+ if lock:
+ portage_locks.unlockfile(lock)
+ return valid
+
+ def update_confcache(self,settings,logfile,new_confcache, confcache=portage_const.CONFCACHE_FILE, \
+ confcache_list=portage_const.CONFCACHE_LIST):
+ """internal function called when a processor has finished a configure, and wishes its cache
+ be transferred to the global cache
+ This runs through the sandbox log, storing the md5 of files along with the list of files to check.
+ Finally, it transfers the cache to the global location."""
+
+ if not self.__ebp:
+ import traceback
+ traceback.print_stack()
+ print "err... no ebp, yet load_confcache called. invalid"
+ sys.exit(1)
+
+ import re
+ from portage_checksum import perform_md5
+ if not (os.path.exists(logfile) and os.path.exists(new_confcache)) :
+ # eh? wth?
+ self.__ebp.write("failed")
+ return 0
+ myfiles=portage_util.grabfile(logfile)
+ filter=re.compile('^(%s|/tmp|/dev|.*/\.ccache)/' % os.path.normpath(settings["PORTAGE_TMPDIR"]))
+ l=[]
+ for x in myfiles:
+ # get only read syscalls...
+ if x[0:8] == "open_rd:":
+ l.append(x.split()[1])
+
+ myfiles = portage_util.unique_array(l)
+ l=[]
+ for x in myfiles:
+ if not os.path.exists(x):
+ continue
+ if not filter.match(x):
+ l.append(x)
+ del myfiles
+
+ if not len(l):
+ self.__ebp.write("updated")
+ return 0
+
+ lock=portage_locks.lockfile(confcache_list,wantnewlockfile=1)
+ # update phase.
+ if not os.path.exists(confcache_list):
+ prevmask=os.umask(0)
+ myf=anydbm.open(confcache_list,"n",0664)
+ os.umask(prevmask)
+ else:
+ myf=anydbm.open(confcache_list,"w",0664)
+
+ for x in l:
+ try:
+ if not stat.S_ISDIR(os.stat(x).st_mode) and not myf.has_key(x):
+ myf[x]=str(perform_md5(x,calc_prelink=1))
+ except (IOError, OSError):
+ # exceptions are only possibly (ignoring anydbm horkage) from os.stat
+ pass
+ myf.close()
+ from portage_data import portage_gid
+ os.chown(confcache_list, -1, portage_gid)
+ shutil.move(new_confcache, confcache)
+ os.chown(confcache, -1, portage_gid)
+ m=os.umask(0)
+ os.chmod(confcache, 0664)
+ os.chmod(confcache_list, 0664)
+ os.umask(m)
+ portage_locks.unlockfile(lock)
+ self.__ebp.write("updated")
+ return 0
+
+ def get_keys(self,myebuild,myroot="/"):
+ """request the auxdbkeys from an ebuild
+ returns a dict when successful, None when failed"""
+# print "getting keys for %s" % myebuild
+ # normally,
+ # userpriv'd, minus sandbox. which is odd.
+ # I say both, personally (and I'm writing it, so live with it)
+ if self.__ebp:
+ import traceback
+ traceback.print_stack()
+ print "self.__ebp exists. it shouldn't. this indicates a handler w/ an active ebp never"
+ print "released it, or a bug in the calls"
+ sys.exit(1)
+
+
+ self.__ebp = request_ebuild_processor(self._config, userpriv=portage_exec.userpriv_capable)
+
+ if self.__adjust_env("depend",myebuild,myroot):
+ return {}
+
+ self.__ebp.write("process_ebuild depend")
+ self.__ebp.send_env()
+ self.__ebp.set_sandbox_state(True)
+ self.__ebp.write("start_processing")
+ line=self.__generic_phase(["sending_keys"], interpret_results=False)
+ if line != "sending_keys":
+ return None
+ mykeys={}
+ while line != "end_keys":
+ line=self.__ebp.read()
+ line=line[:-1]
+ if line == "failed":
+ self.__ebp.unlock()
+ return {}
+ if line == "end_keys" or not len(line):
+ continue
+ pair = line.split('=',1)
+ mykeys[pair[0]]=pair[1]
+ self.__ebp.expect("phases succeeded")
+ if not release_ebuild_processor(self.__ebp):
+ self.__ebp = None
+ raise Exception,"crud"
+ self.__ebp = None
+ return mykeys
+
+ def __adjust_env(self,mydo,myebuild,myroot,debug=0,listonly=0,fetchonly=0,cleanup=0,dbkey=None,\
+ use_cache=1,fetchall=0,tree="porttree",use_info_env=True,verbosity=0):
+ """formerly portage.doebuild, since it's specific to ebuilds, it's now a method of ebuild handling.
+ severely gutted, and in need of cleansing/exorcism"""
+ from portage import db,ExtractKernelVersion,fetch,features, \
+ digestgen,digestcheck,root,flatten, digestParseFile
+ from portage_data import portage_uid,portage_gid,secpass
+ import portage_dep
+ from portage_util import writemsg
+
+ ebuild_path = os.path.abspath(myebuild)
+ pkg_dir = os.path.dirname(ebuild_path)
+
+ if self._config.configdict["pkg"].has_key("CATEGORY"):
+ cat = self._config.configdict["pkg"]["CATEGORY"]
+ else:
+ cat = os.path.basename(os.path.normpath(pkg_dir+"/.."))
+ mypv = os.path.basename(ebuild_path)[:-7]
+ mycpv = cat+"/"+mypv
+
+ mysplit=portage_versions.pkgsplit(mypv,silent=0)
+ if mysplit==None:
+ writemsg("!!! Error: PF is null '%s'; exiting.\n" % mypv)
+ return 1
+
+ if mydo == "clean":
+ cleanup=True
+
+ if mydo != "depend":
+ # XXX: We're doing a little hack here to curtain the gvisible locking
+ # XXX: that creates a deadlock... Really need to isolate that.
+ self._config.reset(use_cache=use_cache)
+
+ self._config.setcpv(mycpv,use_cache=use_cache)
+
+ if not os.path.exists(myebuild):
+ writemsg("!!! doebuild: "+str(myebuild)+" not found for "+str(mydo)+"\n")
+ return 1
+
+ if debug: # Otherwise it overrides emerge's settings.
+ # We have no other way to set debug... debug can't be passed in
+ # due to how it's coded... Don't overwrite this so we can use it.
+ self._config["PORTAGE_DEBUG"]=str(debug)
+
+ self._config["ROOT"] = myroot
+
+ self._config["EBUILD"] = ebuild_path
+ self._config["O"] = pkg_dir
+ self._config["CATEGORY"] = cat
+ self._config["FILESDIR"] = pkg_dir+"/files"
+ self._config["PF"] = mypv
+
+ self._config["ECLASSDIR"] = self._config["PORTDIR"]+"/eclass"
+
+ self._config["PROFILE_PATHS"] = PROFILE_PATH+"\n"+CUSTOM_PROFILE_PATH
+ self._config["P"] = mysplit[0]+"-"+mysplit[1]
+ self._config["PN"] = mysplit[0]
+ self._config["PV"] = mysplit[1]
+ self._config["PR"] = mysplit[2]
+
+ # ensure this is set for all phases, setup included.
+ # Should be ok again to set $T, as sandbox does not depend on it
+ self._config["BUILD_PREFIX"] = self._config["PORTAGE_TMPDIR"]+"/portage"
+ self._config["BUILDDIR"] = self._config["BUILD_PREFIX"]+"/"+self._config["PF"]
+ self._config["T"] = self._config["BUILDDIR"]+"/temp"
+ self._config["WORKDIR"] = self._config["BUILDDIR"]+"/work"
+ self._config["D"] = self._config["BUILDDIR"]+"/image/"
+
+
+ # bailing now, probably horks a few things up, but neh.
+ # got to break a few eggs to make an omelot after all (spelling is wrong, too) :)
+ if mydo=="unmerge":
+ return 0
+
+ if mydo!="depend":
+ try:
+ self._config["INHERITED"],self._config["RESTRICT"] = db[root][tree].dbapi.aux_get(
+ mycpv,["INHERITED","RESTRICT"])
+
+ self._config["PORTAGE_RESTRICT"]=string.join(flatten(portage_dep.use_reduce(
+ portage_dep.paren_reduce(self._config["RESTRICT"]),
+ uselist=self._config["USE"].split() )),' ')
+
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print "caught exception %s in ebd_proc:doebuild" % str(e)
+ self._config["RESTRICT"] = self._config["PORTAGE_RESTRICT"] = ""
+ pass
+
+
+ if mysplit[2] == "r0":
+ self._config["PVR"]=mysplit[1]
+ else:
+ self._config["PVR"]=mysplit[1]+"-"+mysplit[2]
+
+ self._config["SLOT"]=""
+
+ if self._config.has_key("PATH"):
+ mysplit=string.split(self._config["PATH"],":")
+ else:
+ mysplit=[]
+
+ if PORTAGE_BIN_PATH not in mysplit:
+ self._config["PATH"]=PORTAGE_BIN_PATH+":"+self._config["PATH"]
+
+ if tree=="bintree":
+ self._config["BUILD_PREFIX"] += "-pkg"
+
+ self._config["HOME"] = self._config["BUILD_PREFIX"]+"/homedir"
+ self._config["PKG_TMPDIR"] = self._config["PORTAGE_TMPDIR"]+"/portage-pkg"
+
+ if cleanup and os.path.exists(self._config["BUILDDIR"]):
+ print "cleansing builddir"+self._config["BUILDDIR"]
+ shutil.rmtree(self._config["BUILDDIR"])
+
+ if mydo=="clean":
+ # if clean, just flat out skip the rest of this crap.
+ return 0
+
+ self._config["PORTAGE_BASHRC"] = EBUILD_SH_ENV_FILE
+
+ #set up KV variable -- DEP SPEEDUP :: Don't waste time. Keep var persistent.
+
+ if mydo not in ["depend","fetch","digest","manifest"]:
+ if not self._config.has_key("KV"):
+ mykv,err1=ExtractKernelVersion(root+"usr/src/linux")
+ if mykv:
+ # Regular source tree
+ self._config["KV"]=mykv
+ else:
+ self._config["KV"]=""
+
+ if (mydo!="depend") or not self._config.has_key("KVERS"):
+ myso=os.uname()[2]
+ self._config["KVERS"]=myso[1]
+
+
+ # get possible slot information from the deps file
+ if mydo=="depend":
+ if self._config.has_key("PORTAGE_DEBUG") and self._config["PORTAGE_DEBUG"]=="1":
+ # XXX: This needs to use a FD for saving the output into a file.
+ # XXX: Set this up through spawn
+ pass
+ writemsg("!!! DEBUG: dbkey: %s\n" % str(dbkey),2)
+ if dbkey:
+ self._config["dbkey"] = dbkey
+ else:
+ self._config["dbkey"] = self._config.depcachedir+"/aux_db_key_temp"
+
+ return 0
+
+ self._config["PORTAGE_LOGFILE"]=''
+ logfile=None
+
+
+ #fetch/digest crap
+ if mydo not in ["prerm","postrm","preinst","postinst","config","help","setup","unmerge"]:
+
+ newuris, alist = db["/"]["porttree"].dbapi.getfetchlist(mycpv,mysetting=self._config)
+ alluris, aalist = db["/"]["porttree"].dbapi.getfetchlist(mycpv,mysettings=self._config,all=1)
+ self._config["A"]=string.join(alist," ")
+ self._config["AA"]=string.join(aalist," ")
+ if ("mirror" in features) or fetchall:
+ fetchme=alluris[:]
+ checkme=aalist[:]
+ elif mydo=="digest":
+ fetchme=alluris[:]
+ checkme=aalist[:]
+ digestfn=self._config["FILESDIR"]+"/digest-"+self._config["PF"]
+ if os.path.exists(digestfn):
+ mydigests=digestParseFile(digestfn)
+ if mydigests:
+ for x in mydigests:
+ while x in checkme:
+ i = checkme.index(x)
+ del fetchme[i]
+ del checkme[i]
+ else:
+ fetchme=newuris[:]
+ checkme=alist[:]
+
+ try:
+ if not os.path.exists(self._config["DISTDIR"]):
+ os.makedirs(self._config["DISTDIR"])
+ if not os.path.exists(self._config["DISTDIR"]+"/cvs-src"):
+ os.makedirs(self._config["DISTDIR"]+"/cvs-src")
+ except OSError, e:
+ print "!!! File system problem. (Bad Symlink?)"
+ print "!!! Fetching may fail:",str(e)
+
+ try:
+ mystat=os.stat(self._config["DISTDIR"]+"/cvs-src")
+ if ((mystat[stat.ST_GID]!=portage_gid) or ((mystat[stat.ST_MODE]&00775)!=00775)) and not listonly:
+ print "*** Adjusting cvs-src permissions for portage user..."
+ os.chown(self._config["DISTDIR"]+"/cvs-src",0,portage_gid)
+ os.chmod(self._config["DISTDIR"]+"/cvs-src",00775)
+ portage_exec.spawn("chgrp -R "+str(portage_gid)+" "+self._config["DISTDIR"]+"/cvs-src")
+ portage_exec.spawn("chmod -R g+rw "+self._config["DISTDIR"]+"/cvs-src")
+ except (IOError, OSError):
+ pass
+
+ if not fetch(fetchme, self._config, listonly=listonly, fetchonly=fetchonly,verbosity=verbosity):
+ return 1
+
+ if mydo=="fetch" and listonly:
+ return 0
+
+ if "digest" in features:
+ #generate digest if it doesn't exist.
+ if mydo=="digest":
+ # exemption to the return rule
+ return (not digestgen(aalist,self._config,overwrite=1,verbosity=verbosity))
+ else:
+ digestgen(aalist,self._config,overwrite=0,verbosity=verbosity)
+
+ elif mydo=="digest":
+ #since we are calling "digest" directly, recreate the digest even if it already exists
+ return (not digestgen(checkme,self._config,overwrite=1,verbosity=verbosity))
+ if mydo=="manifest":
+ return (not digestgen(checkme,self._config,overwrite=1,manifestonly=1,verbosity=verbosity))
+
+ if not digestcheck(checkme, self._config, ("strict" in features),verbosity=verbosity):
+ return 1
+
+ if mydo=="fetch":
+ return 0
+
+ if not os.path.exists(self._config["BUILD_PREFIX"]):
+ os.makedirs(self._config["BUILD_PREFIX"])
+ os.chown(self._config["BUILD_PREFIX"],portage_uid,portage_gid)
+ os.chmod(self._config["BUILD_PREFIX"],00775)
+
+ if not os.path.exists(self._config["T"]):
+ print "creating temp dir"
+ os.makedirs(self._config["T"])
+ os.chown(self._config["T"],portage_uid,portage_gid)
+ os.chmod(self._config["T"],0770)
+
+ logdir = self._config["T"]+"/logging"
+ if not os.path.exists(logdir):
+ os.makedirs(logdir)
+ os.chown(logdir, portage_uid, portage_gid)
+ os.chmod(logdir, 0770)
+
+ try:
+ #XXX: negative restrict
+ myrestrict = self._config["PORTAGE_RESTRICT"].split()
+ if ("nouserpriv" not in myrestrict and "userpriv" not in myrestrict):
+ if ("userpriv" in self._config.features) and (portage_uid and portage_gid):
+ if (secpass==2):
+ if os.path.exists(self._config["HOME"]):
+ # XXX: Potentially bad, but held down by HOME replacement above.
+ portage_exec.spawn("rm -Rf "+self._config["HOME"])
+ if not os.path.exists(self._config["HOME"]):
+ os.makedirs(self._config["HOME"])
+ elif ("userpriv" in features):
+ print "!!! Disabling userpriv from features... Portage UID/GID not valid."
+ del features[features.index("userpriv")]
+ except (IOError, OSError), e:
+ print "!!! Couldn't empty HOME:",self._config["HOME"]
+ print "!!!",e
+
+
+ try:
+ # no reason to check for depend since depend returns above.
+ if not os.path.exists(self._config["BUILD_PREFIX"]):
+ os.makedirs(self._config["BUILD_PREFIX"])
+ os.chown(self._config["BUILD_PREFIX"],portage_uid,portage_gid)
+ if not os.path.exists(self._config["BUILDDIR"]):
+ os.makedirs(self._config["BUILDDIR"])
+ os.chown(self._config["BUILDDIR"],portage_uid,portage_gid)
+
+
+ except OSError, e:
+ print "!!! File system problem. (ReadOnly? Out of space?)"
+ print "!!! Perhaps: rm -Rf",self._config["BUILD_PREFIX"]
+ print "!!!",str(e)
+ return 1
+
+ try:
+ if not os.path.exists(self._config["HOME"]):
+ os.makedirs(self._config["HOME"])
+ os.chown(self._config["HOME"],portage_uid,portage_gid)
+ os.chmod(self._config["HOME"],02770)
+
+ except OSError, e:
+ print "!!! File system problem. (ReadOnly? Out of space?)"
+ print "!!! Failed to create fake home directory in BUILDDIR"
+ print "!!!",str(e)
+ return 1
+
+ try:
+ if ("userpriv" in features) and ("ccache" in features):
+ if (not self._config.has_key("CCACHE_DIR")) or (self._config["CCACHE_DIR"]==""):
+ self._config["CCACHE_DIR"]=self._config["PORTAGE_TMPDIR"]+"/ccache"
+ if not os.path.exists(self._config["CCACHE_DIR"]):
+ os.makedirs(self._config["CCACHE_DIR"])
+ os.chown(self._config["CCACHE_DIR"],portage_uid,portage_gid)
+ os.chmod(self._config["CCACHE_DIR"],0775)
+ except OSError, e:
+ print "!!! File system problem. (ReadOnly? Out of space?)"
+ print "!!! Perhaps: rm -Rf",self._config["BUILD_PREFIX"]
+ print "!!!",str(e)
+ return 1
+
+ try:
+ mystat=os.stat(self._config["CCACHE_DIR"])
+ if (mystat[stat.ST_GID]!=portage_gid) or ((mystat[stat.ST_MODE]&02070)!=02070):
+ print "*** Adjusting ccache permissions for portage user..."
+ os.chown(self._config["CCACHE_DIR"],portage_uid,portage_gid)
+ os.chmod(self._config["CCACHE_DIR"],02770)
+ portage_exec.spawn("chown -R "+str(portage_uid)+":"+str(portage_gid)+" "+self._config["CCACHE_DIR"])
+ portage_exec.spawn("chmod -R g+rw "+self._config["CCACHE_DIR"])
+ except (OSError, IOError):
+ pass
+
+ if "distcc" in features:
+ try:
+ if (not self._config.has_key("DISTCC_DIR")) or (self._config["DISTCC_DIR"]==""):
+ self._config["DISTCC_DIR"]=self._config["PORTAGE_TMPDIR"]+"/portage/.distcc"
+ if not os.path.exists(self._config["DISTCC_DIR"]):
+ os.makedirs(self._config["DISTCC_DIR"])
+ os.chown(self._config["DISTCC_DIR"],portage_uid,portage_gid)
+ os.chmod(self._config["DISTCC_DIR"],02775)
+ for x in ("/lock", "/state"):
+ if not os.path.exists(self._config["DISTCC_DIR"]+x):
+ os.mkdir(self._config["DISTCC_DIR"]+x)
+ os.chown(self._config["DISTCC_DIR"]+x,portage_uid,portage_gid)
+ os.chmod(self._config["DISTCC_DIR"]+x,02775)
+ except OSError, e:
+ writemsg("\n!!! File system problem when setting DISTCC_DIR directory permissions.\n")
+ writemsg( "!!! DISTCC_DIR="+str(self._config["DISTCC_DIR"]+"\n"))
+ writemsg( "!!! "+str(e)+"\n\n")
+ time.sleep(5)
+ features.remove("distcc")
+ self._config["DISTCC_DIR"]=""
+
+ # break off into process_phase
+ if self._config.has_key("PORT_LOGDIR"):
+ try:
+ st=os.stat(self._config["PORT_LOGDIR"])
+ if not st.st_gid == portage_gid:
+ os.chown(self._config["PORT_LOGDIR"], -1, portage_gid)
+ if not st.st_mode & (os.W_OK << 3):
+ os.chmod(self._config["PORT_LOGDIR"], st.st_mode | (os.W_OK << 3))
+ # by this time, we have write access to the logdir. or it's bailed.
+ try:
+ os.chown(self._config["BUILD_PREFIX"],portage_uid,portage_gid)
+ os.chmod(self._config["PORT_LOGDIR"],00770)
+ if not self._config.has_key("LOG_PF") or (self._config["LOG_PF"] != self._config["PF"]):
+ self._config["LOG_PF"]=self._config["PF"]
+ self._config["LOG_COUNTER"]=str(db[myroot]["vartree"].dbapi.get_counter_tick_core("/"))
+ self._config["PORTAGE_LOGFILE"]="%s/%s-%s.log" % (self._config["PORT_LOGDIR"],self._config["LOG_COUNTER"],self._config["LOG_PF"])
+ if os.path.exists(self._config["PORTAGE_LOGFILE"]):
+ os.chmod(self._config["PORTAGE_LOGFILE"], 0664)
+ os.chown(self._config["PORTAGE_LOGFILE"], -1,portage_gid)
+ except ValueError, e:
+ self._config["PORT_LOGDIR"]=""
+ print "!!! Unable to chown/chmod PORT_LOGDIR. Disabling logging."
+ print "!!!",e
+ except (OSError, IOError):
+ print "!!! Cannot create log... No write access / Does not exist"
+ print "!!! PORT_LOGDIR:",self._config["PORT_LOGDIR"]
+ self._config["PORT_LOGDIR"]=""
+
+ # if any of these are being called, handle them -- running them out of the sandbox -- and stop now.
+ if mydo in ["help","setup"]:
+ return 0
+# return spawn(EBUILD_SH_BINARY+" "+mydo,self._config,debug=debug,free=1,logfile=logfile)
+ elif mydo in ["prerm","postrm","preinst","postinst","config"]:
+ self._config.load_infodir(pkg_dir)
+ if not use_info_env:
+ print "overloading port_env_file setting to %s" % self._config["T"]+"/environment"
+ self._config["PORT_ENV_FILE"] = self._config["T"] + "/environment"
+ if not os.path.exists(self._config["PORT_ENV_FILE"]):
+ from output import red
+ print red("!!!")+" err.. it doesn't exist. that's bad."
+ sys.exit(1)
+ return 0
+# return spawn(EBUILD_SH_BINARY+" "+mydo,self._config,debug=debug,free=1,logfile=logfile)
+
+ try:
+ self._config["SLOT"], self._config["RESTRICT"] = db["/"]["porttree"].dbapi.aux_get(mycpv,["SLOT","RESTRICT"])
+ except (IOError,KeyError):
+ print red("doebuild():")+" aux_get() error reading "+mycpv+"; aborting."
+ sys.exit(1)
+
+ #initial dep checks complete; time to process main commands
+
+ nosandbox=(("userpriv" in features) and ("usersandbox" not in features))
+ actionmap={
+ "depend": { "args":(0,1)}, # sandbox / portage
+ "setup": { "args":(1,0)}, # without / root
+ "unpack": {"dep":"setup", "args":(0,1)}, # sandbox / portage
+ "compile": {"dep":"unpack", "args":(nosandbox,1)}, # optional / portage
+ "test": {"dep":"compile", "args":(nosandbox,1)}, # optional / portage
+ "install": {"dep":"test", "args":(0,0)}, # sandbox / root
+ "rpm": {"dep":"install", "args":(0,0)}, # sandbox / root
+ "package": {"dep":"install", "args":(0,0)}, # sandbox / root
+ }
+
+ if mydo in actionmap.keys():
+ if mydo=="package":
+ for x in ["","/"+self._config["CATEGORY"],"/All"]:
+ if not os.path.exists(self._config["PKGDIR"]+x):
+ os.makedirs(self._config["PKGDIR"]+x)
+ # REBUILD CODE FOR TBZ2 --- XXXX
+ return 0
+# return spawnebuild(mydo,actionmap,self._config,debug,logfile=logfile)
+ elif mydo=="qmerge":
+ #check to ensure install was run. this *only* pops up when users forget it and are using ebuild
+ bail=False
+ if not os.path.exists(self._config["BUILDDIR"]+"/.completed_stages"):
+ bail=True
+ else:
+ myf=open(self._config["BUILDDIR"]+"/.completed_stages")
+ myd=myf.readlines()
+ myf.close()
+ if len(myd) == 0:
+ bail = True
+ else:
+ bail = ("install" not in myd[0].split())
+ if bail:
+ print "!!! mydo=qmerge, but install phase hasn't been ran"
+ sys.exit(1)
+
+ #qmerge is specifically not supposed to do a runtime dep check
+ return 0
+# return merge(self._config["CATEGORY"],self._config["PF"],self._config["D"],self._config["BUILDDIR"]+"/build-info",myroot,self._config)
+ elif mydo=="merge":
+ return 0
+# retval=spawnebuild("install",actionmap,self._config,debug,alwaysdep=1,logfile=logfile)
+ if retval:
+ return retval
+
+# return merge(self._config["CATEGORY"],self._config["PF"],self._config["D"],self._config["BUILDDIR"]+"/build-info",myroot,self._config,myebuild=self._config["EBUILD"])
+ else:
+ print "!!! Unknown mydo:",mydo
+ sys.exit(1)
+
+ # phases
+ # my... god... this... is... ugly.
+ # we're talking red headed step child of medusa ugly here.
+
+ def process_phase(self,phase,myebuild,myroot,allstages=False,**keywords):
+ """the public 'doebuild' interface- all phases are called here, along w/ a valid config
+ allstages is the equivalent of 'do merge, and all needed phases to get to it'
+ **keywords is options passed on to __adjust_env. It will be removed as __adjust_env is digested"""
+ from portage import merge,unmerge,features
+
+ validcommands = ["help","clean","prerm","postrm","preinst","postinst",
+ "config","setup","depend","fetch","digest",
+ "unpack","compile","test","install","rpm","qmerge","merge",
+ "package","unmerge", "manifest"]
+
+ if phase not in validcommands:
+ validcommands.sort()
+ writemsg("!!! doebuild: '%s' is not one of the following valid commands:" % phase)
+ for vcount in range(len(validcommands)):
+ if vcount%6 == 0:
+ writemsg("\n!!! ")
+ writemsg(string.ljust(validcommands[vcount], 11))
+ writemsg("\n")
+ return 1
+
+ retval=self.__adjust_env(phase,myebuild,myroot,**keywords)
+ if retval:
+ return retval
+
+ if "userpriv" in features:
+ sandbox = ("usersandbox" in features)
+ else:
+ sandbox = ("sandbox" in features)
+
+ droppriv=(("userpriv" in features) and \
+ ("nouserpriv" not in string.split(self._config["PORTAGE_RESTRICT"])) and portage_exec.userpriv_capable)
+ use_fakeroot=(("userpriv_fakeroot" in features) and droppriv and portage_exec.fakeroot_capable)
+
+ # basically a nasty graph of 'w/ this phase, have it userprived/sandboxed/fakeroot', and run
+ # these phases prior
+ actionmap={
+ "depend": { "sandbox":False, "userpriv":True, "fakeroot":False},
+ "setup": { "sandbox":True, "userpriv":False, "fakeroot":False},
+ "unpack": {"dep":"setup", "sandbox":sandbox, "userpriv":True, "fakeroot":False},
+ "compile": {"dep":"unpack", "sandbox":sandbox,"userpriv":True, "fakeroot":False},
+ "test": {"dep":"compile","sandbox":sandbox,"userpriv":True, "fakeroot":False},
+ "install": {"dep":"test", "sandbox":(not use_fakeroot or (not use_fakeroot and sandbox)),
+ "userpriv":use_fakeroot,"fakeroot":use_fakeroot},
+ "rpm": {"dep":"install","sandbox":False, "userpriv":use_fakeroot, "fakeroot":use_fakeroot},
+ "package": {"dep":"install", "sandbox":False, "userpriv":use_fakeroot, "fakeroot":use_fakeroot},
+ "merge" : {"dep":"install", "sandbox":True, "userpriv":False, "fakeroot":False}
+ }
+
+ merging=False
+ # this shouldn't technically ever be called, get_keys exists for this.
+ # left in for compatability while portage.doebuild still exists
+ if phase=="depend":
+ return retval
+ elif phase=="unmerge":
+ return unmerge(self._config["CATEGORY"],self._config["PF"],myroot,self._config)
+ elif phase in ["fetch","digest","manifest","clean"]:
+ return retval
+ elif phase=="merge":
+ merging=True
+ elif phase=="qmerge":
+ #no phases ran.
+ phase="merge"
+ merging=True
+# return merge(self._config["CATEGORY"],self._config["PF"],self._config["D"],self._config["BUILDDIR"]+"/build-info",myroot,\
+# self._config)
+
+ elif phase in ["help","clean","prerm","postrm","preinst","postinst","config"]:
+ self.__ebp = request_ebuild_processor(self._config, userpriv=False)
+ self.__ebp.write("process_ebuild %s" % phase)
+ self.__ebp.send_env(self._config)
+ self.__ebp.set_sandbox_state(phase in ["help","clean"])
+ self.__ebp.write("start_processing")
+ retval = self.__generic_phase([],self._config)
+ release_ebuild_processor(self.__ebp)
+ self.__ebp = None
+ return not retval
+
+ k=phase
+ # represent the phases to run, grouping each phase based upon if it's sandboxed, fakerooted, and userpriv'd
+ # ugly at a glance, but remember a processor can run multiple phases now.
+ # best to not be wasteful in terms of env saving/restoring, and just run all applicable phases in one shot
+ phases=[[[phase]]]
+ sandboxed=[[actionmap[phase]["sandbox"]]]
+ privs=[(actionmap[phase]["userpriv"],actionmap[phase]["fakeroot"])]
+
+ if allstages:
+ while actionmap[k].has_key("dep"):
+ k=actionmap[k]["dep"]
+ if actionmap[k]["userpriv"] != privs[-1][0] or actionmap[k]["fakeroot"] != privs[-1][1]:
+ phases.append([[k]])
+ sandboxed.append([actionmap[k]["sandbox"]])
+ privs.append((actionmap[k]["userpriv"],actionmap[k]["fakeroot"]))
+ elif actionmap[k]["sandbox"] != sandboxed[-1][-1]:
+ phases[-1].append([k])
+ sandboxed[-1].extend([actionmap[k]["sandbox"]])
+ else:
+ phases[-1][-1].append(k)
+ privs.reverse()
+ phases.reverse()
+ sandboxed.reverse()
+ for x in phases:
+ for y in x:
+ y.reverse()
+ x.reverse()
+ # and now we have our phases grouped in parallel to the sandbox/userpriv/fakeroot state.
+
+ all_phases = portage_util.flatten(phases)
+
+# print "all_phases=",all_phases
+# print "phases=",phases
+# print "sandbox=",sandboxed
+# print "privs=",privs
+# sys.exit(1)
+# print "\n\ndroppriv=",droppriv,"use_fakeroot=",use_fakeroot,"\n\n"
+
+ #temporary hack until sandbox + fakeroot (if ever) play nice.
+ while privs:
+ if self.__ebp == None or (droppriv and self.__ebp.userprived() != privs[0][0]) or \
+ (use_fakeroot and self.__ebp.fakerooted() != privs[0][1]):
+ if self.__ebp != None:
+ print "swapping processors for",phases[0][0]
+ release_ebuild_processor(self.__ebp)
+ self.__ebp = None
+ opts={}
+
+ #only engage fakeroot when userpriv'd
+ if use_fakeroot and privs[0][1]:
+ opts["save_file"] = self._config["T"]+"/fakeroot_db"
+
+ self.__ebp = request_ebuild_processor(self._config, userpriv=(privs[0][0] and droppriv), \
+ fakeroot=(privs[0][1] and use_fakeroot), \
+
+ sandbox=(not (privs[0][1] and use_fakeroot) and portage_exec.sandbox_capable),**opts)
+
+ #loop through the instances where the processor must have the same sandboxed state-
+ #note a sandbox'd process can have it's sandbox disabled.
+ #this seperation is needed since you can't mix sandbox and fakeroot atm.
+ for sandbox in sandboxed[0]:
+ if "merge" in phases[0][0]:
+ if len(phases[0][0]) == 1:
+ print "skipping this phase, it's just merge"
+ continue
+ phases[0][0].remove("merge")
+
+ self.__ebp.write("process_ebuild %s" % string.join(phases[0][0]," "))
+ self.__ebp.send_env(self._config)
+ self.__ebp.set_sandbox_state(sandbox)
+ self.__ebp.write("start_processing")
+ phases[0].pop(0)
+ retval = not self.__generic_phase([],self._config)
+ if retval:
+ release_ebuild_processor(self.__ebp)
+ self.__ebp = None
+ return retval
+ sandboxed.pop(0)
+ privs.pop(0)
+ phases.pop(0)
+ # hey hey. we're done. Now give it back.
+ release_ebuild_processor(self.__ebp)
+ self.__ebp = None
+
+ # packaging moved out of ebuild.sh, and into this code.
+ # makes it so ebuild.sh no longer must run as root for the package phase.
+ if "package" in all_phases:
+ print "processing package"
+ #mv "${PF}.tbz2" "${PKGDIR}/All"
+ if not os.path.exists(self._config["PKGDIR"]+"/All"):
+ os.makedirs(self._config["PKGDIR"]+"/All")
+ if not os.path.exists(self._config["PKGDIR"]+"/"+self._config["CATEGORY"]):
+ os.makedirs(self._config["PKGDIR"]+"/"+self._config["CATEGORY"])
+ if os.path.exists("%s/All/%s.tbz2" % (self._config["PKGDIR"],self._config["PF"])):
+ os.remove("%s/All/%s.tbz2" % (self._config["PKGDIR"],self._config["PF"]))
+ retval = not portage_util.movefile("%s/%s.tbz2" % (self._config["BUILDDIR"],self._config["PF"]),
+ self._config["PKGDIR"]+"/All/"+self._config["PF"]+".tbz2") > 0
+ if retval: return False
+ if os.path.exists("%s/%s/%s.tbz2" % (self._config["PKGDIR"],self._config["CATEGORY"],self._config["PF"])):
+ os.remove("%s/%s/%s.tbz2" % (self._config["PKGDIR"],self._config["CATEGORY"],self._config["PF"]))
+ os.symlink("%s/All/%s.tbz2" % (self._config["PKGDIR"],self._config["PF"]),
+ "%s/%s/%s.tbz2" % (self._config["PKGDIR"],self._config["CATEGORY"],self._config["PF"]))
+
+ #same as the package phase above, removes the root requirement for the rpm phase.
+ if "rpm" in all_phases:
+ rpm_name="%s-%s-%s" % (self._config["PN"],self._config["PV"],self._config["PR"])
+
+ retval = not portage_util.movefile("%s/%s.tar.gz" % (self._config["T"],self._config["PF"]),
+ "/usr/src/redhat/SOURCES/%s.tar.gz" % self._config["PF"]) > 0
+ if retval:
+ print "moving src for rpm failed, retval=",retval
+ return False
+
+ retval=portage_exec.spawn(("rpmbuild","-bb","%s/%s.spec" % \
+ (self._config["BUILDDIR"],self._config["PF"])))
+ if retval:
+ print "Failed to integrate rpm spec file"
+ return retval
+
+ if not os.path.exists(self._config["RPMDIR"]+"/"+self._config["CATEGORY"]):
+ os.makedirs(self._config["RPMDIR"]+"/"+self._config["CATEGORY"])
+
+ retval = not portage_util.movefile("/usr/src/redhat/RPMS/i386/%s.i386.rpm" % rpm_name,
+ "%s/%s/%s.rpm" % (self._config["RPMDIR"],self._config["CATEGORY"],rpm_name)) > 0
+ if retval:
+ print "rpm failed"
+ return retval
+
+
+ # not great check, but it works.
+ # basically, if FEATURES="-buildpkg" emerge package was called, the files in the current
+ # image directory don't have their actual perms. so we use an ugly bit of bash
+ # to make the fakeroot (claimed) permissions/owners a reality.
+ if use_fakeroot and os.path.exists(self._config["T"]+"/fakeroot_db") and merging:
+ print "correcting fakeroot privs"
+ retval=portage_exec.spawn(("/usr/lib/portage/bin/affect-fakeroot-perms.sh", \
+ self._config["T"]+"/fakeroot_db", \
+ self._config["D"]),env={"BASHRC":portage_const.INVALID_ENV_FILE})
+ if retval or retval == None:
+ print red("!!!")+"affecting fakeroot perms after the fact failed"
+ return retval
+
+ if merging:
+ print "processing merge"
+ retval = merge(self._config["CATEGORY"],self._config["PF"],self._config["D"],self._config["BUILDDIR"]+"/build-info",myroot,\
+ self._config,myebuild=self._config["EBUILD"])
+ return retval
+
+ # this basically handles all hijacks from the daemon, whether confcache or portageq.
+ def __generic_phase(self,breakers,interpret_results=True):
+ """internal function that responds to the running ebuild processor's requests
+ this enables portageq hijack, sandbox summaries, confcache among other things
+ interpret_results controls whether this returns true/false, or the string the
+ processor spoke that caused this to release control
+ breaks is list of strings that cause this loop/interpretter to relinquish control"""
+ b = breakers[:]
+ b.extend(["prob","phases failed","phases succeeded","env_receiving_failed"])
+ line=''
+ while line not in b:
+ line=self.__ebp.read()
+ line=line[:-1]
+
+ if line[0:23] == "request_sandbox_summary":
+ self.__ebp.sandbox_summary(line[24:])
+ elif line[0:17] == "request_confcache":
+ self.load_confcache(line[18:])
+ elif line[0:16] == "update_confcache":
+ k=line[17:].split()
+ # sandbox_debug_log, local_cache
+ self.update_confcache(self._config,k[0],k[1])
+ elif line[0:8] == "portageq":
+ keys=line[8:].split()
+ try:
+ e,s=getattr(self.portageq,keys[0])(keys[1:])
+ except SystemExit, e:
+ raise
+ except Exception, ex:
+ sys.stderr.write("caught exception %s\n" % str(ex))
+ e=2
+ s="ERROR: insufficient paramters!"
+ self.__ebp.write("return_code="+str(e))
+ if len(s):
+ self.__ebp.write(s)
+ self.__ebp.write("stop_text")
+ self.processed += 1
+ if interpret_results:
+ return (line=="phases succeeded")
+ return line
+
diff --git a/portage/ebuild/ebuild_package.py b/portage/ebuild/ebuild_package.py
new file mode 100644
index 0000000..1273533
--- /dev/null
+++ b/portage/ebuild/ebuild_package.py
@@ -0,0 +1,89 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/ebuild/ebuild_package.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+import os
+from portage import package
+
+class ebuild_package(package.metadata.package):
+
+ def __getattr__(self, key):
+ if key == "path":
+ return self.__dict__.setdefault("path", os.path.join(self.__dict__["_parent"].base, \
+ self.category, self.package, "%s-%s.ebuild" % (self.package, self.fullver)))
+
+ if key == "_mtime_":
+ #XXX wrap this.
+ return self.__dict__.setdefault("_mtime_",long(os.stat(self.path).st_mtime))
+ elif key == "P":
+ return self.__dict__.setdefault("P", self.package + "-" + self.version)
+ elif key == "PN":
+ return self.__dict__.setdefault("PN", self.package)
+ elif key == "PR":
+ return self.__dict__.setdefault("PR", "-r"+str(self.revision))
+
+ return super(ebuild_package, self).__getattr__(key)
+
+
+ def _fetch_metadata(self):
+# import pdb;pdb.set_trace()
+ data = self._parent._get_metadata(self)
+ doregen = False
+ if data == None:
+ doregen = True
+
+ # got us a dict. yay.
+ if not doregen:
+ if self._mtime_ != data.get("_mtime_"):
+ doregen = True
+ elif data.get("_eclasses_") != None and not self._parent._ecache.is_eclass_data_valid(data["_eclasses_"]):
+ doregen = True
+
+ if doregen:
+ # ah hell.
+ data = self._parent._update_metadata(self)
+
+ for k,v in data.items():
+ self.__dict__[k] = v
+
+ self.__dict__["_finalized"] = True
+ return
+
+
+class ebuild_factory(package.metadata.factory):
+ child_class = ebuild_package
+
+ def __init__(self, parent, cachedb, eclass_cache, *args,**kwargs):
+ super(ebuild_factory, self).__init__(parent, *args,**kwargs)
+ self._cache = cachedb
+ self._ecache = eclass_cache
+ self.base = self._parent_repo.base
+
+ def _get_metadata(self, pkg):
+ if self._cache != None:
+ try:
+ return self._cache[pkg.cpvstr]
+ except KeyError:
+ pass
+ return None
+
+ def _update_metadata(self, pkg):
+
+ import processor
+ ebp=processor.request_ebuild_processor()
+ mydata = ebp.get_keys(pkg, self._ecache)
+ processor.release_ebuild_processor(ebp)
+
+ mydata["_mtime_"] = pkg._mtime_
+ if mydata.get("INHERITED", False):
+ mydata["_eclasses_"] = self.eclassdb.get_eclass_data(mydata["INHERITED"].split() )
+ del mydata["INHERITED"]
+ else:
+ mydata["_eclasses_"] = {}
+
+ if self._cache != None:
+ self._cache[pkg.cpvstr] = mydata
+
+ return mydata
+
diff --git a/portage/ebuild/ebuild_repository.py b/portage/ebuild/ebuild_repository.py
new file mode 100644
index 0000000..130e9fb
--- /dev/null
+++ b/portage/ebuild/ebuild_repository.py
@@ -0,0 +1,65 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/ebuild/ebuild_repository.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+import os, stat
+from portage.repository import prototype, errors
+#import ebuild_internal
+import ebuild_package
+
+class tree(prototype.tree):
+ false_categories = ("eclass","profiles","packages","distfiles","licenses","scripts")
+
+ def __init__(self, location, cache=None, eclass_cache=None):
+ super(tree, self).__init__()
+ self.base = location
+ try:
+ st = os.lstat(self.base)
+ if not stat.S_ISDIR(st.st_mode):
+ raise errors.InitializationError("base not a dir: %s" % self.base)
+ elif not st.st_mode & (os.X_OK|os.R_OK):
+ raise errors.InitializationError("base lacks read/executable: %s" % self.base)
+
+ except OSError:
+ raise errors.InitializationError("lstat failed on base %s" % self.base)
+ if eclass_cache == None:
+ import eclass_cache
+ eclass_cache = eclass_cache.cache(self.base)
+ self.metadata = ebuild_package.ebuild_factory(self, cache, eclass_cache)
+
+ def _get_categories(self, *optionalCategory):
+ # why the auto return? current porttrees don't allow/support categories deeper then one dir.
+ if len(optionalCategory):
+ #raise KeyError
+ return ()
+
+ try: return tuple([x for x in os.listdir(self.base) \
+ if stat.S_ISDIR(os.lstat(os.path.join(self.base,x)).st_mode) and x not in self.false_categories])
+
+ except (OSError, IOError), e:
+ raise KeyError("failed fetching categories: %s" % str(e))
+
+ def _get_packages(self, category):
+
+ cpath = os.path.join(self.base,category.lstrip(os.path.sep))
+ try: return tuple([x for x in os.listdir(cpath) \
+ if stat.S_ISDIR(os.lstat(os.path.join(cpath,x)).st_mode)])
+
+ except (OSError, IOError), e:
+ raise KeyError("failed fetching packages for category %s: %s" % \
+ (os.path.join(self.base,category.lstrip(os.path.sep)), str(e)))
+
+ def _get_versions(self, catpkg):
+
+ pkg = catpkg.split("/")[-1]
+ cppath = os.path.join(self.base, catpkg.lstrip(os.path.sep))
+ # 7 == len(".ebuild")
+ try: return tuple([x[len(pkg):-7].lstrip("-") for x in os.listdir(cppath) \
+ if x.endswith(".ebuild") and x.startswith(pkg) and \
+ stat.S_ISREG(os.lstat(os.path.join(cppath,x)).st_mode)])
+
+ except (OSError, IOError), e:
+ raise KeyError("failed fetching versions for package %s: %s" % \
+ (os.path.join(self.base,catpkg.lstrip(os.path.sep)), str(e)))
+
diff --git a/portage/ebuild/eclass_cache.py b/portage/ebuild/eclass_cache.py
new file mode 100644
index 0000000..c8efc1d
--- /dev/null
+++ b/portage/ebuild/eclass_cache.py
@@ -0,0 +1,77 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/ebuild/eclass_cache.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+from portage.util.fs import normpath
+import os, sys
+
+class cache:
+ """
+ Maintains the cache information about eclasses used in ebuild.
+ get_eclass_path and get_eclass_data are special- one (and only one) can be set to None.
+ Any code trying to get eclass data/path will choose which method it prefers, falling back to what's available if only one option
+ exists.
+
+ get_eclass_path should be defined when local path is possible/preferable.
+ get_eclass_data should be defined when dumping the eclass down the pipe is preferable/required (think remote tree)
+
+ Base defaults to having both set (it's local, and i. Override as needed.
+ """
+ def __init__(self, porttree, *additional_porttrees):
+ self.eclasses = {} # {"Name": ("location","_mtime_")}
+
+ self.porttrees = tuple(map(normpath, [porttree] + list(additional_porttrees)))
+ self._master_eclass_root = os.path.join(self.porttrees[0],"eclass")
+ self.update_eclasses()
+
+
+ def update_eclasses(self):
+ self.eclasses = {}
+ eclass_len = len(".eclass")
+ for x in [normpath(os.path.join(y,"eclass")) for y in self.porttrees]:
+ if not os.path.isdir(x):
+ continue
+ for y in [y for y in os.listdir(x) if y.endswith(".eclass")]:
+ try:
+ mtime=os.stat(x+"/"+y).st_mtime
+ except OSError:
+ continue
+ ys=y[:-eclass_len]
+ self.eclasses[ys] = (x, long(mtime))
+
+
+ def is_eclass_data_valid(self, ec_dict):
+ if not isinstance(ec_dict, dict):
+ return False
+ for eclass, tup in ec_dict.iteritems():
+ if eclass not in self.eclasses or tuple(tup) != self.eclasses[eclass]:
+ return False
+
+ return True
+
+
+ def get_eclass_data(self, inherits, from_master_only=False):
+ ec_dict = {}
+ for x in inherits:
+ try:
+ ec_dict[x] = self.eclasses[x]
+ except:
+ print "ec=",ec_dict
+ print "inherits=",inherits
+ raise
+ if from_master_only and self.eclasses[x][0] != self._master_eclass_root:
+ return None
+
+ return ec_dict
+
+ def get_eclass_path(self, eclass):
+ """get on disk eclass path. remote implementations need a way to say 'piss off tool' if this is called..."""
+ return os.path.join(self.eclasses[eclass][0],eclass+".eclass")
+
+ def get_eclass_contents(self, eclass):
+ """Get the actual contents of the eclass. This should be overridden for remote implementations"""
+ f=file(os.path.join(self.eclasses[eclass][0], eclass+".eclass"),"r")
+ l=f.read()
+ f.close()
+ return l
diff --git a/portage/ebuild/processor.py b/portage/ebuild/processor.py
new file mode 100644
index 0000000..93c7517
--- /dev/null
+++ b/portage/ebuild/processor.py
@@ -0,0 +1,455 @@
+# Copyright: 2004-2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/ebuild/processor.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+inactive_ebp_list = []
+active_ebp_list = []
+
+import portage.spawn, os, logging
+from inspect import isroutine, isclass
+
+def shutdown_all_processors():
+ """kill off all known processors"""
+ global active_ebp_list, inactive_ebp_list
+ if type(active_ebp_list) != types.ListType:
+ print "warning, ebuild.active_ebp_list wasn't a list."
+ active_ebp_list = []
+
+ if type(inactive_ebp_list) != types.ListType:
+ print "warning, ebuild.inactive_ebp_list wasn't a list."
+ inactive_ebp_list = []
+
+ while len(active_ebp_list) > 0:
+ try: active_ebp_list[0].shutdown_processor()
+ except (IOError,OSError):
+ active_ebp_list.pop(0)
+ continue
+ try: active_ebp_list.pop(0)
+ except IndexError: pass
+
+ while len(inactive_ebp_list) > 0:
+ try:
+ inactive_ebp_list[0].shutdown_processor()
+ except (IOError,OSError):
+ inactive_ebp_list.pop(0)
+ continue
+ try: inactive_ebp_list.pop(0)
+ except IndexError: pass
+
+
+def request_ebuild_processor(userpriv=False, sandbox=None, fakeroot=False, save_file=None):
+ """request an ebuild_processor instance from the pool, or create a new one
+ this walks through the requirements, matching a inactive processor if one exists
+ note fakerooted processors are never reused, do to the nature of fakeroot"""
+
+ if sandbox == None:
+ sandbox = portage.spawn.sandbox_capable
+
+ global inactive_ebp_list, active_ebp_list
+ if not fakeroot:
+ for x in inactive_ebp_list:
+ if x.userprived() == userpriv and (x.sandboxed() or not sandbox):
+ inactive_ebp_list.remove(x)
+ active_ebp_list.append(x)
+ return x
+ e=ebuild_processor(userpriv, sandbox, fakeroot, save_file)
+ active_ebp_list.append(e)
+ return e
+
+
+def release_ebuild_processor(ebp):
+ """the inverse of request_ebuild_processor. Any processor requested via request_ebuild_processor
+ _must_ be released via this function once it's no longer in use.
+ this includes fakerooted processors.
+ Returns True exempting when the processor requested to be released isn't marked as active"""
+
+ global inactive_ebp_list, active_ebp_list
+ try: active_ebp_list.remove(ebp)
+ except ValueError: return False
+
+ try: inactive_ebp_list.index(ebp)
+ except ValueError:
+ # if it's a fakeroot'd process, we throw it away. it's not useful outside of a chain of calls
+ if not ebp.onetime():
+ inactive_ebp_list.append(ebp)
+ else:
+ del ebp
+ return True
+
+ # if it makes it this far, that means ebp was already in the inactive list.
+ # which is indicative of an internal fsck up.
+ import traceback
+ print "ebp was requested to be free'd, yet it already is claimed inactive _and_ was in the active list"
+ print "this means somethings horked, badly"
+ traceback.print_stack()
+ return False
+
+
+class ebuild_processor:
+ """abstraction of a running ebuild.sh instance- the env, functions, etc that ebuilds expect."""
+ def __init__(self, userpriv, sandbox, fakeroot, save_file):
+ """ebuild_daemon_path shouldn't be fooled with unless the caller knows what they're doing.
+ sandbox enables a sandboxed processor
+ userpriv enables a userpriv'd processor
+ fakeroot enables a fakeroot'd processor- this is a mutually exclusive option to sandbox, and
+ requires userpriv to be enabled. Violating this will result in nastyness"""
+
+ from portage.const import EBUILD_DAEMON_PATH, PORTAGE_BIN_PATH
+
+ self.ebd = EBUILD_DAEMON_PATH
+ self.ebd_libs = PORTAGE_BIN_PATH
+ from portage_data import portage_uid, portage_gid
+ spawn_opts = {}
+
+ if fakeroot and (sandbox or not userpriv):
+ import traceback
+ traceback.print_stack()
+ print "warning, was asking to enable fakeroot but-"
+ print "sandbox",sandbox,"userpriv",userpriv
+ print "this isn't valid. bailing"
+ raise Exception,"cannot initialize with sandbox and fakeroot"
+
+ if userpriv:
+ self.__userpriv = True
+ spawn_opts.update({"uid":portage_uid,"gid":portage_gid,"groups":[portage_gid],"umask":002})
+ else:
+ if portage.spawn.userpriv_capable:
+ spawn_opts.update({"gid":portage_gid,"groups":[0,portage_gid]})
+ self.__userpriv = False
+
+ # open the pipes to be used for chatting with the new daemon
+ cread, cwrite = os.pipe()
+ dread, dwrite = os.pipe()
+ self.__sandbox = False
+ self.__fakeroot = False
+
+ # since it's questionable which spawn method we'll use (if sandbox or fakeroot fex),
+ # we ensure the bashrc is invalid.
+ env={"BASHRC":"/etc/portage/spork/not/valid/ha/ha", PORTAGE_BIN_PATH:PORTAGE_BIN_PATH}
+ args = []
+ if sandbox:
+ if fakeroot:
+ print "!!! ERROR: fakeroot was on, but sandbox was also on"
+ sys.exit(1)
+ self.__sandbox = True
+ spawn_func = portage.spawn.spawn_sandbox
+ env.update({"SANDBOX_DEBUG":"1","SANDBOX_DEBUG_LOG":"/var/tmp/test"})
+
+ elif fakeroot:
+ self.__fakeroot = True
+ spawn_func = portage.spawn.spawn_fakeroot
+ args.append(save_file)
+ else:
+ spawn_func = portage.spawn.spawn
+
+ self.pid = spawn_func(self.ebd+" daemonize", fd_pipes={0:0, 1:1, 2:2, 3:cread, 4:dwrite},
+ returnpid=True, env=env, *args, **spawn_opts)[0]
+
+ os.close(cread)
+ os.close(dwrite)
+ self.ebd_write = os.fdopen(cwrite,"w")
+ self.ebd_read = os.fdopen(dread,"r")
+
+ # basically a quick "yo" to the daemon
+ self.write("dude?")
+ if not self.expect("dude!"):
+ print "error in server coms, bailing."
+ raise Exception("expected 'dude!' response from ebd, which wasn't received. likely a bug")
+ self.write(PORTAGE_BIN_PATH)
+ if self.__sandbox:
+ self.write("sandbox_log?")
+ self.__sandbox_log = self.read().split()[0]
+ self.dont_export_vars=self.read().split()
+ # locking isn't used much, but w/ threading this will matter
+
+
+ def sandboxed(self):
+ """is this instance sandboxed?"""
+ return self.__sandbox
+
+
+ def userprived(self):
+ """is this instance userprived?"""
+ return self.__userpriv
+
+
+ def fakerooted(self):
+ """is this instance fakerooted?"""
+ return self.__fakeroot
+
+
+ def onetime(self):
+ """is this instance going to be discarded after usage; eg is it fakerooted?"""
+ return self.__fakeroot
+
+
+ def write(self, string,flush=True):
+ """talk to running daemon. Disabling flush is useful when dumping large amounts of data
+ all strings written are automatically \\n terminated"""
+ if string[-1] == "\n":
+ self.ebd_write.write(string)
+ else:
+ self.ebd_write.write(string +"\n")
+ if flush:
+ self.ebd_write.flush()
+
+
+ def expect(self, want):
+ """read from the daemon, and return true or false if the returned string is what is expected"""
+ got=self.ebd_read.readline()
+ return want==got[:-1]
+
+
+ def read(self,lines=1):
+ """read data from the daemon. Shouldn't be called except internally"""
+ mydata=''
+ while lines > 0:
+ mydata += self.ebd_read.readline()
+ lines -= 1
+ return mydata
+
+
+ def sandbox_summary(self, move_log=False):
+ """if the instance is sandboxed, print the sandbox access summary"""
+ if not os.path.exists(self.__sandbox_log):
+ self.write("end_sandbox_summary")
+ return 0
+ violations=portage_util.grabfile(self.__sandbox_log)
+ if len(violations)==0:
+ self.write("end_sandbox_summary")
+ return 0
+ if not move_log:
+ move_log=self.__sandbox_log
+ elif move_log != self.__sandbox_log:
+ myf=open(move_log)
+ for x in violations:
+ myf.write(x+"\n")
+ myf.close()
+ from output import red
+ self.ebd_write.write(red("--------------------------- ACCESS VIOLATION SUMMARY ---------------------------")+"\n")
+ self.ebd_write.write(red("LOG FILE = \"%s\"" % move_log)+"\n\n")
+ for x in violations:
+ self.ebd_write.write(x+"\n")
+ self.write(red("--------------------------------------------------------------------------------")+"\n")
+ self.write("end_sandbox_summary")
+ try:
+ os.remove(self.__sandbox_log)
+ except (IOError, OSError), e:
+ print "exception caught when cleansing sandbox_log=%s" % str(e)
+ return 1
+
+
+ def preload_eclasses(self, ec_file):
+ """this preloades eclasses into a function, thus avoiding the cost of going to disk.
+ preloading eutils (which is heaviliy inherited) speeds up regen times fex"""
+ if not os.path.exists(ec_file):
+ return 1
+ self.write("preload_eclass %s" % ec_file)
+ if self.expect("preload_eclass succeeded"):
+ self.preloaded_eclasses=True
+ return True
+ return False
+
+
+ def lock(self):
+ """lock the processor. Currently doesn't block any access, but will"""
+ self.processing_lock = True
+
+
+ def unlock(self):
+ """unlock the processor"""
+ self.processing_lock = False
+
+
+ def locked(self):
+ """is the processor locked?"""
+ return self.processing_lock
+
+
+ def is_alive(self):
+ """returns if it's known if the processor has been shutdown.
+ Currently doesn't check to ensure the pid is still running, yet it should"""
+ return self.pid != None
+
+
+ def shutdown_processor(self):
+ """tell the daemon to shut itself down, and mark this instance as dead"""
+ try:
+ if self.is_alive():
+ self.write("shutdown_daemon")
+ self.ebd_write.close()
+ self.ebd_read.close()
+
+ # now we wait.
+ os.waitpid(self.pid,0)
+ except (IOError,OSError,ValueError):
+ pass
+
+ # currently, this assumes all went well.
+ # which isn't always true.
+ self.pid = None
+
+
+ def set_sandbox_state(self,state):
+ """tell the daemon whether to enable the sandbox, or disable it"""
+ if state:
+ self.write("set_sandbox_state 1")
+ else:
+ self.write("set_sandbox_state 0")
+
+
+ def send_env(self, env_dict):
+ """transfer the ebuild's desired env (env_dict) to the running daemon"""
+
+ self.write("start_receiving_env\n")
+ exported_keys = ''
+ for x in env_dict.keys():
+ if x not in self.dont_export_vars:
+ self.write("%s=%s\n" % (x, env_dict[x]), flush=False)
+ exported_keys += x+' '
+ self.write("export "+exported_keys,flush=False)
+ self.write("end_receiving_env")
+ return self.expect("env_received")
+
+
+ def set_logfile(self,logfile=''):
+ """relevant only when the daemon is sandbox'd, set the logfile"""
+ self.write("logging %s" % logfile)
+ return self.expect("logging_ack")
+
+
+ def __del__(self):
+ """simply attempts to notify the daemon to die"""
+ # for this to be reached means we ain't in a list no more.
+ if self.pid:
+ self.shutdown_processor()
+
+
+ def get_keys(self, package_inst, eclass_cache):
+ """request the auxdbkeys from an ebuild
+ returns a dict when successful, None when failed"""
+
+ env={}
+ for x in ("P", "PN", "PR"):
+ env[x] = getattr(package_inst, x)
+
+ env["EBUILD"] = package_inst.path
+ env["CATEGORY"] = package_inst.category
+ self.write("process_ebuild depend")
+ self.send_env(env)
+ self.set_sandbox_state(True)
+ self.write("start_processing")
+
+ self.expect("starting depend")
+ metadata_keys = {}
+ val=self.generic_handler(additional_commands={ \
+ "inherit":(self.__class__._inherit, [eclass_cache],{}), \
+ "key":(self.__class__._receive_key, [metadata_keys], {})} )
+
+ if not val:
+ logging.error("returned val from get_keys was '%s'" % str(val))
+ raise Exception(val)
+
+ return metadata_keys
+
+ def _receive_key(self, line, keys_dict):
+ line=line.split("=",1)
+ l=len(line)
+ if l != 2:
+ raise FinishedProcessing(True)
+ else:
+ keys_dict[line[0]] = line[1]
+
+
+ def _inherit(self, line, ecache):
+ """callback for implementing inherit digging into eclass_cache. not for normal consumption."""
+ if line == None:
+ self.write("failed")
+ raise UnhandledCommand("inherit requires an eclass specified, none specified")
+
+ line=line.strip()
+ if ecache.get_eclass_path != None:
+ value = ecache.get_eclass_path(line)
+ self.write("path")
+ self.write(value)
+ elif ecache.get_eclass_data != None:
+ value = ecache.get_eclass_data(line)
+ self.write("transfer")
+ self.write(value)
+ else:
+ raise AttributeError("neither get_eclass_data nor get_eclass_path is usable on ecache!")
+
+
+ # this basically handles all hijacks from the daemon, whether confcache or portageq.
+ def generic_handler(self, additional_commands={}):
+ """internal function that responds to the running ebuild processor's requests
+
+ additional_commands is a dict of command:callable, or command:(callable,args,kwargs)
+ Note that the processor still is inserted as the first arg for positional, with line as second.
+
+ commands cannot have spaces. the callable is called with the processor as first arg, and
+ remaining string (None if no remaining fragment) as second arg
+ (if you need to split the args to command, whitespace splitting falls to your func.)
+
+ Chucks an UnhandledCommand exception when an unknown command is encountered.
+ """
+
+ # note that self is passed in. so... we just pass in the unbound instance. Specifically, via digging through __class__
+ # if you don't do it, sandbox_summary (fex) cannot be overriden, this func will just use this classes version.
+ # so dig through self.__class__ for it. :P
+
+ handlers = {"request_sandbox_summary":(self.__class__.sandbox_summary,[],{})}
+ for x in ("prob", "env_receiving_failed"):
+ handlers[x] = (chuck_UnhandledCommand, [False], {})
+ handlers["phases"] = (chuck_StoppingCommand, [lambda f: f.lower().strip()=="succeeded"], {})
+
+ handlers.update(additional_commands)
+
+ for x in handlers.keys():
+ if isroutine(handlers[x]):
+ handlers[x] = (handlers[x], [], {})
+ # if it's a list, must be len 3 and isroutine==true for [0]
+ elif not (isinstance(handlers[x], list) or isinstance(handlers[x], tuple)) or \
+ not (len(handlers[x]) == 3 and isroutine(handlers[x][0])):
+ raise TypeError(handlers[x])
+
+ try:
+ while True:
+ line=self.read().rstrip()
+ # split on first whitespace.
+
+ s=line.split(None,1)
+ if s[0] in handlers:
+ if len(s) == 1:
+ s.append(None)
+ # looks nasty, but isn't. just inserts self + line for positional expansion
+ handlers[s[0]][0](self, s[1], *handlers[s[0]][1], **handlers[s[0]][2])
+ else:
+ print "chucking unhandled here, baby.s=",s
+ raise UnhandledCommand(s[0])
+
+ except FinishedProcessing, fp:
+ v = fp.val; del fp
+ return v
+
+def chuck_UnhandledCommand(processor, line):
+ print "chucking unhandled"
+ raise UnhandledCommand(line)
+
+def chuck_StoppingCommand(processor, val, *args):
+ if isroutine(val) or isclass(val):
+ raise FinishedProcessing(val, args[0])
+ raise FinishedProcessing(val)
+
+class ProcessingInterruption(Exception):
+ pass
+
+class FinishedProcessing(ProcessingInterruption):
+ def __init__(self, val, msg=None): self.val, self.msg = val, msg
+ def __str__(self): return "Finished processing with val, %s" % str(self.val)
+
+class UnhandledCommand(ProcessingInterruption):
+ def __init__(self, line=None): self.line=line
+ def __str__(self): return "unhandled command, %s" % self.line
+
diff --git a/portage/package/__init__.py b/portage/package/__init__.py
new file mode 100644
index 0000000..bcd3699
--- /dev/null
+++ b/portage/package/__init__.py
@@ -0,0 +1,6 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/package/__init__.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+import metadata
diff --git a/portage/package/cpv.py b/portage/package/cpv.py
new file mode 100644
index 0000000..d417643
--- /dev/null
+++ b/portage/package/cpv.py
@@ -0,0 +1,328 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Jason Stubbs (jstubbs@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/package/cpv.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+import re
+
+
+pkg_regexp = re.compile("^[a-zA-Z0-9]([-_+a-zA-Z0-9]*[+a-zA-Z0-9])?$")
+ver_regexp = re.compile("^(cvs\\.)?(\\d+)((\\.\\d+)*)([a-z]?)((_(pre|p|beta|alpha|rc)\\d*)*)(-r(\\d+))?$")
+suffix_regexp = re.compile("^(alpha|beta|rc|pre|p)(\\d*)$")
+suffix_value = {"pre": -2, "p": 0, "alpha": -4, "beta": -3, "rc": -1}
+
+class CPV(object):
+
+ """
+ Attributes
+
+ str category
+ str package
+ str key (cat/pkg)
+ str version
+ int revision
+
+ Methods
+
+ int __hash__()
+ str __repr__()
+ int __cmp__(CPV)
+ """
+
+ def __init__(self, cpvstr):
+ if not isinstance(cpvstr, str):
+ raise ValueError(cpvstr)
+ self.__dict__["cpvstr"] = cpvstr
+ self.__dict__["hash"] = hash(cpvstr)
+
+ def __hash__(self):
+ return self.hash
+
+ def __repr__(self):
+ return self.cpvstr
+
+ def __setattr__(self, name, value):
+ raise Exception()
+
+ def __getattr__(self, name):
+
+ if name == "category":
+ myparts = self.cpvstr.split("/")
+ if len(myparts) >= 2:
+ if not pkg_regexp.match(myparts[0]):
+ raise ValueError(self.cpvstr)
+ self.__dict__["category"] = myparts[0]
+ else:
+ self.__dict__["category"] = None
+
+ if name == "package":
+ if self.category:
+ myparts = self.cpvstr[len(self.category)+1:].split("-")
+ else:
+ myparts = self.cpvstr.split("-")
+ if ver_regexp.match(myparts[0]):
+ raise ValueError(self.cpvstr)
+ pos = 1
+ while pos < len(myparts) and not ver_regexp.match(myparts[pos]):
+ pos += 1
+ pkgname = "-".join(myparts[:pos])
+ if not pkg_regexp.match(pkgname):
+ raise ValueError(self.cpvstr)
+ self.__dict__["package"] = pkgname
+
+ if name == "key":
+ if self.category:
+ self.__dict__["key"] = self.category +"/"+ self.package
+ else:
+ self.__dict__["key"] = self.package
+
+ if name in ("version","revision","fullver"):
+ if self.category:
+ myparts = self.cpvstr[len(self.category+self.package)+2:].split("-")
+ else:
+ myparts = self.cpvstr[len(self.package)+1:].split("-")
+
+ if not myparts[0]:
+ self.__dict__["version"] = None
+ self.__dict__["revision"] = None
+
+ else:
+ if myparts[-1][0] == "r" and myparts[-1][1:].isdigit():
+ self.__dict__["revision"] = int(myparts[-1][1:])
+ myparts = myparts[:-1]
+ else:
+# self.__dict__["revision"] = 0 # harring changed this
+ self.__dict__["revision"] = None
+
+ for x in myparts:
+ if not ver_regexp.match(x):
+ raise ValueError(self.mycpv)
+
+ self.__dict__["version"] = "-".join(myparts)
+ if name == "fullver":
+ if self.version == None:
+ self.__dict__["fullver"] = None
+ elif self.revision == None:
+ self.__dict__["fullver"] = self.version
+ else:
+ self.__dict__["fullver"] = "%s-r%i" % (self.version,self.revision)
+
+ if name in self.__dict__:
+ return self.__dict__[name]
+ raise AttributeError,name
+
+ def __cmp__(self, other):
+
+ if self.cpvstr == other.cpvstr:
+ return 0
+
+ if self.category and other.category and self.category != other.category:
+ return cmp(self.category, other.category)
+
+ if self.package and other.package and self.package != other.package:
+ return cmp(self.package, other.package)
+
+ if self.version != other.version:
+
+ if self.version is None:
+ raise ValueError(self)
+
+ if other.version is None:
+ raise ValueError(other)
+
+ match1 = ver_regexp.match(self.version)
+ match2 = ver_regexp.match(other.version)
+
+ # shortcut for cvs ebuilds (new style)
+ if match1.group(1) and not match2.group(1):
+ return 1
+ elif match2.group(1) and not match1.group(1):
+ return -1
+
+ # building lists of the version parts before the suffix
+ # first part is simple
+ list1 = [int(match1.group(2))]
+ list2 = [int(match2.group(2))]
+
+ # this part would greatly benefit from a fixed-length version pattern
+ if len(match1.group(3)) or len(match2.group(3)):
+ vlist1 = match1.group(3)[1:].split(".")
+ vlist2 = match2.group(3)[1:].split(".")
+ for i in range(0, max(len(vlist1), len(vlist2))):
+ if len(vlist1) <= i or len(vlist1[i]) == 0:
+ list1.append(0)
+ list2.append(int(vlist2[i]))
+ elif len(vlist2) <= i or len(vlist2[i]) == 0:
+ list1.append(int(vlist1[i]))
+ list2.append(0)
+ # Let's make life easy and use integers unless we're forced to use floats
+ elif (vlist1[i][0] != "0" and vlist2[i][0] != "0"):
+ list1.append(int(vlist1[i]))
+ list2.append(int(vlist2[i]))
+ # now we have to use floats so 1.02 compares correctly against 1.1
+ else:
+ list1.append(float("0."+vlist1[i]))
+ list2.append(float("0."+vlist2[i]))
+
+ # and now the final letter
+ if len(match1.group(5)):
+ list1.append(ord(match1.group(5)))
+ if len(match2.group(5)):
+ list2.append(ord(match2.group(5)))
+
+ for i in range(0, max(len(list1), len(list2))):
+ if len(list1) <= i:
+ return -1
+ elif len(list2) <= i:
+ return 1
+ elif list1[i] != list2[i]:
+ return list1[i] - list2[i]
+
+ # main version is equal, so now compare the _suffix part
+ list1 = match1.group(6).split("_")[1:]
+ list2 = match2.group(6).split("_")[1:]
+
+ for i in range(0, max(len(list1), len(list2))):
+ if len(list1) <= i:
+ s1 = ("p","0")
+ else:
+ s1 = suffix_regexp.match(list1[i]).groups()
+ if len(list2) <= i:
+ s2 = ("p","0")
+ else:
+ s2 = suffix_regexp.match(list2[i]).groups()
+ if s1[0] != s2[0]:
+ return suffix_value[s1[0]] - suffix_value[s2[0]]
+ if s1[1] != s2[1]:
+ # it's possible that the s(1|2)[1] == ''
+ # in such a case, fudge it.
+ try: r1 = int(s1[1])
+ except ValueError: r1 = 0
+ try: r2 = int(s2[1])
+ except ValueError: r2 = 0
+ return r1 - r2
+
+ return cmp(self.revision, other.revision)
+
+
+class Atom(object):
+
+ """
+ Attributes
+
+ bool blocks
+ str operator
+ bool glob_match
+ CPV cpv
+
+ Methods
+ int __hash__()
+ str __repr__()
+ bool match(CPV)
+ """
+
+ def __init__(self, atomstr):
+ if not isinstance(atomstr, str):
+ raise ValueError(atomstr)
+ self.__dict__["atomstr"] = atomstr
+ self.__dict__["hash"] = hash(atomstr)
+
+ def __hash__(self):
+ return self.hash
+
+ def __repr__(self):
+ return self.atomstr
+
+ def __setattr__(self, name, value):
+ raise Exception()
+
+ def __getattr__(self, name):
+
+ if not self.__dict__.has_key("category"):
+
+ myatom = self.atomstr
+
+ if myatom[0] == "!":
+ self.__dict__["blocks"] = True
+ myatom = myatom[1:]
+ else:
+ self.__dict__["blocks"] = False
+
+ if myatom[0:2] in ["<=", ">="]:
+ self.__dict__["operator"] = myatom[0:2]
+ myatom = myatom[2:]
+ elif myatom[0] in ["<", ">", "=", "~"]:
+ self.__dict__["operator"] = myatom[0]
+ myatom = myatom[1:]
+ else:
+ self.__dict__["operator"] = None
+
+ if myatom[-1] == "*":
+ self.__dict__["glob_match"] = True
+ myatom = myatom[:-1]
+ else:
+ self.__dict__["glob_match"] = False
+
+ self.__dict__["cpv"] = CPV(myatom)
+
+ if self.operator != "=" and self.glob_match:
+ raise ValueError(self.atomstr)
+
+ if self.operator and not self.cpv.version:
+ raise ValueError(self.atomstr)
+
+ if not self.operator and self.cpv.version:
+ raise ValueError(self.atomstr)
+
+ if self.operator == "~" and self.cpv.revision:
+ raise ValueError(self.atomstr)
+
+ if self.glob_match and self.cpv.revision:
+ raise ValueError(self.atomstr)
+
+ if not self.__dict__.has_key(name):
+ raise AttributeError(name)
+
+ return self.__dict__[name]
+
+ def match(self, cpv):
+
+ if self.cpv.category and cpv.category and self.cpv.category != cpv.category:
+ return False
+
+ if self.cpv.package and cpv.package and self.cpv.package != cpv.package:
+ return False
+
+ if not self.operator:
+ return True
+
+ if self.operator == "=":
+ if self.glob_match and cpv.version.startswith(self.cpv.version):
+ return True
+ if self.cpv.version != cpv.version:
+ return False
+ if self.cpv.revision != cpv.revision:
+ return False
+
+ if self.operator == "~" and self.cpv.version == cpv.version:
+ return True
+
+ diff = cmp(self.cpv, cpv)
+
+ if not diff:
+ if self.operator == "<=" or self.operator == ">=":
+ return True
+ else:
+ return False
+
+ if diff > 0:
+ if self.operator[0] == "<":
+ return True
+ else:
+ return False
+
+ #if diff < 0:
+ if self.operator[0] == ">":
+ return True
+ #else:
+ return False
diff --git a/portage/package/metadata.py b/portage/package/metadata.py
new file mode 100644
index 0000000..e4d3ace
--- /dev/null
+++ b/portage/package/metadata.py
@@ -0,0 +1,81 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/package/metadata.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+import weakref
+from cpv import CPV
+
+class package(CPV):
+ def __init__(self, cpv, parent_repository):
+ super(package,self).__init__(cpv)
+ self.__dict__["_cpv_finalized"] = False
+ self.__dict__["_finalized"] = False
+ self.__dict__["_parent"] = parent_repository
+
+ def __setattr__(self, *args, **kwargs):
+ raise AttributeError
+
+ def __delattr__(self, *args, **kwargs):
+ raise AttributeError
+
+ def __getitem__(self, key):
+ try: return getattr(self,key)
+ except AttributeError:
+ raise KeyError(key)
+
+ def __getattr__(self, attr):
+ if not self._cpv_finalized:
+ try: return super(package,self).__getattr__(attr)
+ except AttributeError:
+ #enable this when CPV does it.
+ #self.__cpv_finalized = True
+ pass
+ if self._finalized:
+ raise AttributeError, attr
+
+ # if we've made it here, then more is needed.
+ self._fetch_metadata()
+ self.__dict__["_finalized"] = True
+ if attr in self.__dict__:
+ return self.__dict__[attr]
+ raise AttributeError,attr
+
+ def _fetch_metadata(self):
+ raise NotImplementedError
+
+
+class factory(object):
+ child_class = package
+ def __init__(self, parent_repo):
+ self._parent_repo = parent_repo
+ self._cached_instances = weakref.WeakValueDictionary()
+
+ def new_package(self, cpv):
+ if cpv in self._cached_instances:
+ return self._cached_instances[cpv]
+ d = self._get_new_child_data(cpv)
+ m = self.child_class(cpv, self, *d[0], **d[1])
+ self._cached_instances[cpv] = m
+ return m
+
+ def clear(self):
+ self._cached_instances.clear()
+
+ def _load_package_metadata(self, inst):
+ raise NotImplementedError
+
+ def __del__(self):
+ self.clear()
+
+ def _get_metadata(self, *args):
+ raise NotImplementedError
+
+ def _update_metadata(self, *args):
+ raise NotImplementedError
+
+ def _get_new_child_data(self, cpv):
+ """return pargs,kwargs for any new children generated by this factory.
+ defaults to [], {}
+ Probably will be rolled into a class/instance attribute whenever someone cleans this up"""
+ return ([],{})
diff --git a/portage/repository/__init__.py b/portage/repository/__init__.py
new file mode 100644
index 0000000..42e0d16
--- /dev/null
+++ b/portage/repository/__init__.py
@@ -0,0 +1,6 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/repository/__init__.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+import prototype
diff --git a/portage/repository/errors.py b/portage/repository/errors.py
new file mode 100644
index 0000000..8a30b71
--- /dev/null
+++ b/portage/repository/errors.py
@@ -0,0 +1,14 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/repository/errors.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+class TreeCorruption(Exception):
+ def __init__(self, err):
+ self.err = err
+ def __str__(self):
+ return "unexpected tree corruption: %s" % str(self.err)
+
+class InitializationError(TreeCorruption):
+ def __str__(self):
+ return "initialization failed: %s" % str(self.err)
diff --git a/portage/repository/multiplex.py b/portage/repository/multiplex.py
new file mode 100644
index 0000000..b575ff5
--- /dev/null
+++ b/portage/repository/multiplex.py
@@ -0,0 +1,72 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/repository/multiplex.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+import prototype, errors
+
+class tree(prototype.tree):
+ def __init__(self, *trees):
+ super(tree,self).__init__()
+ for x in trees:
+ if not isinstance(x, prototype.tree):
+ raise errors.InitializationError("%s is not a repository tree derivative" % str(x))
+ self.trees=trees
+
+ def _get_categories(self, *optionalCategory):
+ d={}
+ failures=0
+ if len(optionalCategory):
+ optionalCategory=optionalCategory[0]
+ for x in self.trees:
+ try:
+ for y in x.categories[optionalCategory]:
+ d[y] = None
+ except KeyError:
+ failures+=1
+ else:
+ for x in self.trees:
+ try:
+ for y in x.categories:
+ d[y] = None
+ except (errors.TreeCorruption, KeyError):
+ failures+=1
+ if failures == len(self.trees):
+ if optionalCategory:
+ raise KeyError("category base '%s' not found" % str(optionalCategory))
+ raise KeyError("failed getting categories")
+ return tuple(d.keys())
+
+ def _get_packages(self, category):
+ d={}
+ failures=0
+ for x in self.trees:
+ try:
+ for y in x.packages[category]:
+ d[y] = None
+ except (errors.TreeCorruption, KeyError):
+ failures+=1
+ if failures == len(self.trees):
+ raise KeyError("category '%s' not found" % category)
+ return tuple(d.keys())
+
+ def _get_versions(self,package):
+ d={}
+ failures=0
+ for x in self.trees:
+ try:
+ for y in x.versions[package]:
+ d[y] = None
+ except (errors.TreeCorruption, KeyError):
+ failures+=1
+
+ if failures == len(self.trees):
+ raise KeyError("category '%s' not found" % package)
+ return tuple(d.keys())
+
+ def itermatch(self, atom):
+ d={}
+ for t in self.trees:
+ for m in t.match(atom):
+ d[m] = None
+ return d.keys()
diff --git a/portage/repository/prototype.py b/portage/repository/prototype.py
new file mode 100644
index 0000000..26da8c9
--- /dev/null
+++ b/portage/repository/prototype.py
@@ -0,0 +1,96 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/repository/prototype.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+from portage.util.dicts import IndexableSequence
+from weakref import proxy
+
+def ix_cat_callable(*cat):
+ return "/".join(cat)
+
+class tree(object):
+ package_class = None
+ def __init__(self, frozen=True):
+ self.categories = IndexableSequence(self._get_categories, self._get_categories,
+ returnIterFunc=ix_cat_callable, returnEmpty=True, modifiable=(not frozen))
+ self.packages = IndexableSequence(self.categories.iterkeys, self._get_packages, \
+ returnIterFunc=lambda x,y: str(x)+"/"+str(y), modifiable=(not frozen))
+ self.versions = IndexableSequence(self.packages.__iter__, self._get_versions, \
+ returnIterFunc=lambda x,y: str(x)+"-"+str(y), modifiable=(not frozen))
+ self.raw_repo = proxy(self)
+ self.frozen = frozen
+
+ def _get_categories(self, *arg):
+ raise NotImplementedError
+
+ def _get_packages(self, category):
+ raise NotImplementedError
+
+ def _get_versions(self, package):
+ raise NotImplementedError
+
+ def __getitem__(self, cpv):
+ cpv_inst = self.metadata.new_package(cpv)
+ if cpv_inst.fullver not in self.versions[cpv_inst.key]:
+ del cpv_inst
+ raise KeyError(cpv)
+ return cpv_inst
+
+ def __setitem__(self, *values):
+ raise AttributeError
+
+ def __delitem__(self, cpv):
+ raise AttributeError
+
+ def __iter__(self):
+ for cpv in self.versions:
+ yield self.metadata.new_package(cpv)
+ return
+
+ def match(self, atom):
+ return list(self.itermatch(atom))
+
+ def itermatch(self, atom):
+ if atom.cpv.category == None:
+ candidates = self.packages
+ else:
+ if atom.cpv.package == None:
+ try: candidates = self.packages[atom.cpv.category]
+ except KeyError:
+ # just stop now. no category matches == no yielded cpvs.
+ return
+ else:
+ try:
+ if atom.cpv.package not in self.packages[atom.cpv.category]:
+ # no matches possible
+ return
+ candidates = [atom.cpv.key]
+
+ except KeyError:
+ # atom.cpv.category wasn't valid. no matches possible.
+ return
+
+ #actual matching.
+ for catpkg in candidates:
+ for ver in self.versions[catpkg]:
+ if atom.match(self.package_class(catpkg+"-"+ver)):
+ yield self[catpkg+"-"+ver]
+ return
+
+
+ def add_package(self, pkg):
+ if self.frozen:
+ raise AttributeError,"repo is frozen"
+ return self._add_new_package(self, pkg)
+
+ def _add_new_package(self, pkg):
+ raise NotImplementedError
+
+ def del_package(self, key):
+ if self.frozen:
+ raise AttributeError,"repo is frozen"
+ return self._del_package(self,key)
+
+ def _del_package(self,pkg):
+ raise NotImplementedError
diff --git a/portage/repository/visibility.py b/portage/repository/visibility.py
new file mode 100644
index 0000000..a1fdc5f
--- /dev/null
+++ b/portage/repository/visibility.py
@@ -0,0 +1,25 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/repository/visibility.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+# icky.
+# ~harring
+import prototype, errors
+
+class tree(prototype.tree):
+
+ """wrap an existing repository filtering results based upon passed in restrictions"""
+ def __init__(self, repo, restrictions):
+ self.raw_repo = repo
+ if not isinstance(raw_repo, prototype.tree):
+ raise errors.InitializationError("%s is not a repository tree derivative" % str(raw_repo))
+ if not isinstance(restrictions, list):
+ restrictions = [restrictions]
+ self._restrictions = restrictions
+
+ def itermatch(self, atom):
+ for cpv in self.raw_repo.itermatch(atom):
+ for r in self._restrictions:
+ if r.match(cpv):
+ yield cpv
diff --git a/portage/restrictions/__init__.py b/portage/restrictions/__init__.py
new file mode 100644
index 0000000..f2569d3
--- /dev/null
+++ b/portage/restrictions/__init__.py
@@ -0,0 +1,5 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/restrictions/__init__.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
diff --git a/portage/restrictions/restriction.py b/portage/restrictions/restriction.py
new file mode 100644
index 0000000..25ff2ef
--- /dev/null
+++ b/portage/restrictions/restriction.py
@@ -0,0 +1,117 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/restrictions/restriction.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+import re
+
+class Restriction(object):
+
+ def match(self, *arg, **kwargs):
+ raise NotImplementedError
+
+
+class RestrictionClause(object):
+ """base restriction matching object; overrides setattr to provide the usual write once trickery
+ all derivatives *must* be __slot__ based"""
+
+ def __setattr__(self, name, value):
+ try: getattr(self, name)
+
+ except AttributeError:
+ object.__setattr__(self, name, value)
+ else: raise AttributeError
+
+class VersionRestriction(RestrictionClause):
+ pass
+
+
+class StrMatch(RestrictionClause):
+ """ Base string matching restriction. all derivatives must be __slot__ based classes"""
+ pass
+
+
+class StrRegexMatch(StrMatch):
+ #potentially redesign this to jit the compiled_re object
+ __slots__ = ("regex", "compiled_re", "flags")
+
+ def __init__(self, regex, CaseSensitive=True):
+ self.regex = regex
+ flags = 0
+ if not CaseSensitive:
+ flags = re.I
+ self.flags = flags
+ self.compiled_re = re.compile(regex, flags)
+
+
+ def match(self, value):
+ return self.compiled_re.match(str(value)) != None
+
+
+class StrExactMatch(StrMatch):
+ __slots__ = ("exact", "flags")
+
+ def __init__(self, exact, CaseSensitive=True):
+ if not CaseSensitive:
+ self.flags = re.I
+ self.exact = str(exact).lower()
+ else:
+ self.flags = 0
+ self.exact = str(exact)
+
+
+ def match(self, value):
+ if self.flags & re.I: return self.exact == str(value).lower()
+ else: return self.exact == str(value)
+
+
+class StrSubstringMatch(StrMatch):
+ __slots__ = ("substr")
+
+ def __init__(self, substr, CaseSensitive=True):
+ if not CaseSensitive:
+ self.flags = re.I
+ substr = str(substr).lower()
+ else:
+ self.flags = 0
+ substr = str(substr)
+ self.substr = substr;
+
+
+ def match(self, value):
+ if self.flags & re.I: value = str(value).lower()
+ else: value = str(value)
+ return value.find(self.substr) != -1
+
+
+class PackageDataRestriction(Restriction):
+ __slots__ = ("metadata_key", "strmatch")
+
+ def __init__(self, metadata_key, StrMatchInstance):
+ self.metadata_key = metadata_key
+ self.strmatch = StrMatchInstance
+
+
+ def pmatch(self, packageinstance):
+ try: return self.match(getattr(packageinstance.data, self.metadatakey))
+
+ except AttributeError:
+ return False
+
+
+ def match(self, value):
+ return self.strmatch.match(value)
+
+
+ def __setattr__(self, name, value):
+ try: getattr(self, name)
+
+ except AttributeError:
+ object.__setattr__(self, name, value)
+
+ else: raise AttributeError
+
+
+#cough. yeah. somebody fill thus out please :)
+class ConfigRestriction(Restriction):
+ pass
diff --git a/portage/restrictions/restrictionSet.py b/portage/restrictions/restrictionSet.py
new file mode 100644
index 0000000..0cdf1b9
--- /dev/null
+++ b/portage/restrictions/restrictionSet.py
@@ -0,0 +1,82 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/restrictions/Attic/restrictionSet.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+import restriction
+
+class RestrictionSet(restriction.Restriction):
+ __slots__ = ("restrictions")
+
+ def __init__(self, initialRestrictions=[]):
+ for x in initialRestrictions:
+ if not isinstance(x, Restriction.Restriction):
+ #bad monkey.
+ raise TypeError, x
+ self.restrictions = list(initialRestrictions)
+
+
+ def addRestriction(self, NewRestriction):
+ if not isinstance(NewRestriction, Restriction.Restriction):
+ raise TypeError, NewRestriction
+
+ self.restrictions.append(NewRestriction)
+
+
+ def get_tree_restrictions(self):
+ l = []
+ for x in self.restrictions:
+ if isinstance(x, restriction.RestrictionSet):
+ l2 = x.get_tree_restrictions()
+ if len(l2):
+ l.append(l2)
+ elif not isinstance(x, restriction.ConfigRestriction):
+ l.append(x)
+ return self.__class__(l)
+
+
+ def get_conditionals(self):
+ l = []
+ for x in self.restrictions:
+ if isinstance(x, restriction.RestrictionSet):
+ l2 = x.get_conditionals()
+ if len(l2):
+ l.append(l2)
+ elif isinstance(x, restriction.ConfigRestriction):
+ l.append(x)
+ return self.__class__(l)
+
+
+ def pmatch(self, packagedataInstance):
+ raise NotImplementedError
+
+
+ def finalize(self):
+ self.restrictions = tuple(self.restrictions)
+
+
+class AndRestrictionSet(RestrictionSet):
+ __slots__ = tuple(RestrictionSet.__slots__)
+
+ def match(self, packagedataInstance):
+ for rest in self.restrictions:
+ if not rest.pmatch(packagedataInstance):
+ return False
+ return True
+
+
+class OrRestrictionSet(RestrictionSet):
+ __slots__ = tuple(RestrictionSet.__slots__)
+
+ def match(self, packagedataInstance):
+ for rest in self.restrictions:
+ if rest.pmatch(packagedataInstance):
+ return True
+ return False
+
+
+# this may not be used. intended as a way to identify a restrictionSet as specifically identifying a package.
+# resolver shouldn't need it anymore
+class PackageRestriction(AndRestrictionSet):
+ pass
+
diff --git a/portage/sync/__init__.py b/portage/sync/__init__.py
new file mode 100644
index 0000000..8638d13
--- /dev/null
+++ b/portage/sync/__init__.py
@@ -0,0 +1,10 @@
+# sync/__init__.py; sync module namespace initialization
+# Copyright 2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+#$Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/sync/__init__.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+import rsync
+import cvs
+import snapshot
+from parseuri import parseSyncUri
+
diff --git a/portage/sync/cvs.py b/portage/sync/cvs.py
new file mode 100644
index 0000000..196f860
--- /dev/null
+++ b/portage/sync/cvs.py
@@ -0,0 +1,95 @@
+# cvs.py; provides cvs sync capabilities, encapsulates the necessary cvs binary calls
+# Copyright 2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+#$Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/sync/cvs.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+import os, stat
+from portage.spawn import spawn, spawn_bash, CommandNotFound
+#import sync
+from portage.const import CVS_BIN
+
+import sync.syncexceptions
+class CVSIOError(sync.syncexceptions.SyncException):
+ def __init__(self,errmsg,command):
+ self.errmsg=errmsg
+ self.command=command
+ def __str__(self):
+ return "cvs error: command %s, %s" % (self.command, self.errmsg)
+
+class CvsHost:
+ def __init__(self,host_uri,cvs_binary=CVS_BIN):
+ if not os.access(cvs_binary, os.X_OK):
+ raise CommandNotFound(cvs_binary)
+ self.__binary=cvs_binary
+ #parse the bugger.
+ #new format.
+ #cvs://[CVS_RSH binary:]user@host:cvs_root:module
+ #example
+ #cvs://ssh:ferringb@dev.gentoo.org:/var/cvsroot:gentoo-x86
+ #old format
+ #cvs://user@host:cvsroot
+ #implicit gentoo-x86 module, and ext w/ ssh.
+ #here we go. :/
+
+ if host_uri.count(":") >= 2:
+ self.__init_new_host_uri(host_uri)
+ else:
+ self.__init_deprecated_uri(host_uri)
+
+ def __init_new_host_uri(self,host):
+ #cvs://ssh:ferringb@dev.gentoo.org:/var/cvsroot:gentoo-x86
+ s=host.split(":")
+ if len(s) == 4:
+ self.__ext=s.pop(0)
+ s[0] = ":ext:" + s[0]
+ else:
+ self.__ext=None
+ self.__cvsroot=s[0]+":"+s[1]
+ self.__cvsmodule=s[2]
+
+ def __init_deprecated_uri(self,host):
+ self.__ext="ssh"
+ self.__cvsmodule="gentoo-x86"
+ self.__cvsroot=host
+
+ def sync(self,local_path,verbosity=1,compress=False):
+ while local_path[-1] == "/":
+ local_path = local_path[:-1]
+ if compress:
+ c_arg='-z9'
+ else:
+ c_arg=''
+
+ env={}
+ if self.__ext:
+ env = {"CVS_RSH":self.__ext}
+
+ l=len(self.__cvsmodule)
+ if not os.path.exists(local_path):
+ newdir=os.path.basename(local_path)
+ basedir=local_path[:-len(newdir)]
+ if os.path.exists(basedir+"/"+self.__cvsmodule):
+ raise Exception("unable to checkout to %s, module directory %s exists already" % \
+ (basedir, self.__cvsmodule))
+ elif os.path.exists(basedir+"/CVS"):
+ raise Exception("unable to checkout to %s, a CVS directory exists w/in already" % basedir)
+ command="cd '%s' ; %s %s -d %s co -P %s" % \
+ (basedir, self.__binary, c_arg, self.__cvsroot, self.__cvsmodule)
+
+ ret=spawn_bash(command,env=env,opt_name="cvs co")
+ if ret:
+ raise CVSIOError("failed checkout",command)
+ if newdir != self.__cvsmodule:
+ ret = spawn(('mv','%s/%s' % (basedir,self.__cvsmodule),local_path))
+ if ret:
+ raise Exception("failed moving %s/%s to %s" % (basedir,self.__cvsmodule,local_path))
+ elif stat.S_ISDIR(os.stat(local_path).st_mode):
+
+ command="cd '%s'; %s %s -d %s up" % (local_path, self.__binary, c_arg,self.__cvsroot)
+ ret = spawn_bash(command, env=env,opt_name="cvs up")
+ if ret:
+ raise CVSIOError("failed updated", command)
+ else:
+ raise Exception("%s exists, and is not a directory. rectify please" % local_path)
+ return True
+
diff --git a/portage/sync/parseuri.py b/portage/sync/parseuri.py
new file mode 100644
index 0000000..06b0a88
--- /dev/null
+++ b/portage/sync/parseuri.py
@@ -0,0 +1,28 @@
+# parseuri.py; parses a SYNC uri, returning protocol/host_uri
+# Copyright 2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+#$Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/sync/parseuri.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+
+#sanitize this to use listdir
+#~harring
+
+import portage_const
+
+def parseSyncUri(uri):
+ """parse a SYNC uri, returning a tuple of protocol,host_uri"""
+ u=uri.lower()
+ if u.startswith("rsync") or len(u) == 0:
+ if len(u) <= 5:
+ return ('rsync',portage_const.RSYNC_HOST)
+ return ('rsync',u[8:])
+ elif u.startswith("cvs://"):
+ u=u[6:]
+ return ('cvs',u)
+ elif u.startswith("snapshot"):
+ if len(u)==8:
+ # the caller gets to randomly crapshoot a mirror for it.
+ return ('snapshot',None)
+ return ('snapshot',u[9:])
+ else:
+ return (None,None)
diff --git a/portage/sync/rsync.py b/portage/sync/rsync.py
new file mode 100644
index 0000000..7503c9b
--- /dev/null
+++ b/portage/sync/rsync.py
@@ -0,0 +1,161 @@
+# rsync.py; module providing an abstraction over the rsync binary
+# Copyright 2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+#$Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/sync/rsync.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+from portage_const import RSYNC_BIN, RSYNC_HOST
+import os, portage_exec
+import portage_exception,socket
+
+import sync.syncexceptions
+
+class RSyncSyntaxError(sync.syncexceptions.SyncException):
+ """Exception thrown when generated rsync syntax is invalid"""
+ def __init__(self,command):
+ self.command=command
+ def __str__(self):
+ return "Invalid rsync syntax: %s" % str(self.command)
+
+class RsyncHost:
+ """abstraction over the rsync binary"""
+
+ def __init__(self,host_uri,local_host=False,rsync_binary=RSYNC_BIN):
+ """self explanatory"""
+ if not os.access(rsync_binary,os.X_OK):
+ raise portage_exceptions.CommandNotFound(rsync_binary)
+
+ self.__binary = rsync_binary
+ self.__host = host_uri
+ self.__ips = []
+
+ self.__local = local_host
+ if self.__local:
+ self.__host_name=''
+ self.__remote_path = host_uri
+ self.__host_uri=''
+ return
+
+ f=host_uri.split("/",1)
+ if len(f) == 1:
+ #deprecated, assume /gentoo-portage
+ self.__remote_path = "/gentoo-portage"
+ else:
+ self.__remote_path = "/"+f[1]
+ host_uri = f[0]
+
+ f=host_uri.find("@")
+ if f != -1:
+ host_uri=host_uri[f+1:]
+ f=host_uri.find(":")
+ if f != -1:
+ host_uri=host_uri[:f]
+
+ self.__host_name = host_uri
+
+ def get_remote_path(self):
+ return self.__remote_path
+
+ def get_ips(self):
+ if self.__local:
+ return None
+ try:
+ self.__ips = socket.gethostbyname_ex(self.__host_name)[2]
+ except socket.error,e:
+ print "caught exception for %s" % self.__host_name,e
+ return None
+ return self.__ips
+
+ def sync(self,settings, local_path,remote_path=None,verbosity=1,excludes=[],compress=True, \
+ timeout=180,ip=None,cleanup=True):
+ """sync up local_path with remote_path on host
+ settings is a portage.config, at some point hopefully removed and specific options
+ passed in instead.
+ verbosity ranges 0-4
+ 0 is absolutely quiet, 1 is quiet, 2 is normal, 3 is noisy.
+ ip is used to control which ip of the host is used.
+ cleanup controls deletion."""
+
+ args=[self.__binary,
+ "--recursive", # Recurse directories
+ "--links", # Consider symlinks
+ "--safe-links", # Ignore links outside of tree
+ "--perms", # Preserve permissions
+ "--times", # Preserive mod times
+ "--force", # Force deletion on non-empty dirs
+ "--whole-file", # Don't do block transfers, only entire files
+ "--stats", # Show final statistics about what was transfered
+ "--timeout="+str(timeout), # IO timeout if not done in X seconds
+ ]
+
+ if cleanup:
+ args.append("--delete")# # Delete files that aren't in the master tree
+ args.append("--delete-after") # Delete only after everything else is done
+
+ if compress:
+ args.append("--compress")
+ for x in excludes:
+ args.append("--exclude=%s" % str(x))
+ if verbosity >=3:
+ args.append("--progress")
+ args.append("--verbose")
+ elif verbosity == 2:
+ args.append("--progress")
+ elif verbosity == 1:
+ args.append("--quiet")
+ else:
+ args.append("--quiet")
+ args.remove("--stats")
+
+ if verbosity:
+ fd_pipes={1:1,2:2}
+ else:
+ fd_pipes={}
+
+ #why do this if has_key crap? cause portage.config lacks a get function
+ #this allows dicts to be passed in and used.
+ if settings.has_key("RSYNC_INCLUDE"):
+ for x in settings["RSYNC_INCLUDE"].split():
+ args.append("--include=%s" % x)
+ if settings.has_key("RSYNC_INCLUDEFROM"):
+ for x in settings["RSYNC_INCLUDEFROM"].split():
+ args.append("--include-from=%s" % x)
+ if settings.has_key("RSYNC_EXCLUDE"):
+ for x in settings["RSYNC_EXCLUDE"].split():
+ args.append("--exclude=%s" % x)
+ if settings.has_key("RSYNC_EXCLUDEFROM"):
+ for x in settings["RSYNC_EXCLUDEFROM"].split():
+ args.append("--exclude-from=%s" % x)
+
+ if settings.has_key("RSYNC_RATELIMIT"):
+ args.append("--bwlimit=%s" % settings["RSYNC_RATELIMIT"])
+
+ prefix="rsync://"
+ if remote_path == None:
+ if self.__local:
+ host=self.__remote_path
+ prefix=''
+ else:
+ host=self.__host
+ else:
+ if remote_path[0] != "/":
+ host = self.__host_name + '/' + remote_path
+ else:
+ host = self.__host_name + remote_path
+
+ if ip:
+ args.append("%s%s" % (prefix,host.replace(self.__host_name,ip)))
+ else:
+ args.append("%s%s" % (prefix,host))
+ args.append(local_path)
+
+ # tie a debug option into this
+ #print "options are",args
+
+ ret=portage_exec.spawn(args,fd_pipes=fd_pipes)
+ if ret == 0:
+ return True
+ elif ret == 1:
+ raise RSyncSyntaxError(args)
+ elif ret == 11:
+ raise IOError("Rsync returned exit code 11; disk space remaining?")
+ return ret
diff --git a/portage/sync/snapshot.py b/portage/sync/snapshot.py
new file mode 100644
index 0000000..02346d0
--- /dev/null
+++ b/portage/sync/snapshot.py
@@ -0,0 +1,147 @@
+# snapshot.py; provides the capability of fetching a portage tree snapshot, and syncing a tree with it.
+# Copyright 2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+#$Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/sync/snapshot.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+
+raise Exception("won't work. portage_checksum clean ups needed")
+
+import os
+import time
+import portage_checksum
+from portage.spawn import spawn
+import shutil
+import sync.rsync
+
+"""snapshot-http://gentoo.chem.wisc.edu/gentoo"""
+class SnapshotHost:
+ def __init__(self,host_uri,snapshots_dir,tmp_dir,fetcher=None,use_md5=True):
+ if fetcher==None:
+ import transports.bundled_lib
+ fetcher=transports.bundled_lib.BundledConnection()
+ self.__fetcher=fetcher
+ self.__host_uri=host_uri
+ self.__tmpdir = tmp_dir
+ self.__snapshots_dir = snapshots_dir
+ self.__use_md5 = use_md5
+
+ def sync(self, local_path,verbosity=1):
+ attempts=0
+ downloaded=False
+ while attempts < 40 and not downloaded:
+ file="portage-%s.tar.bz2" % time.strftime("%Y%m%d",
+ time.localtime(time.time() - (attempts*24*60*60)))
+ loc=self.__snapshots_dir+"/"+file
+ rem=self.__host_uri+"/"+file
+ downloaded=self.__fetch_snapshot(file,loc,rem,verbosity)
+ attempts += 1
+
+ if not downloaded:
+ # no snapshot, no syncy-poo.
+ return False
+
+ return self.__apply_snapshot(loc,local_path,verbosity)
+
+ def __apply_snapshot(self,snapshot,local_tree,verbosity):
+ """apply the actual snapshot. for different methods of this, inherit this class
+ and overload this function
+ current it untars to a temp location, and rsync's it over to local_path."""
+
+ #this should be checked
+ spawn(("tar","-jxf",snapshot,"-C",self.__tmpdir))
+ syncer=sync.rsync.RsyncHost("%s/portage/" % self.__tmpdir,local_host=True)
+ try:
+ ret = syncer.sync({},local_tree,excludes=("/distfiles","/local","/packages"),verbosity=verbosity)
+ except sync.rsync.RSyncSyntaxError,e:
+ print "caught rsync syntax exception:",e
+ return False
+ except IOError, ie:
+ print "issue: ",ie
+ return False
+ if verbosity:
+ print "cleaning tempoary snapshot directory- %s/portage" % self.__tmpdir
+ shutil.rmtree(self.__tmpdir+"/portage")
+
+ #nuke all other return codes.
+ if ret != True:
+ return False
+ return ret
+
+ def __fetch_snapshot(self,file,loc,rem,verbosity):
+ grab_md5=True
+ hash=None
+ md5=None
+ md5_existed=False
+ ret=False
+ if self.__use_md5 and os.path.exists(loc+".md5sum"):
+ hash=self.__read_md5sum(loc+".md5sum")
+ if hash==None:
+ os.remove(loc+".md5sum")
+ else:
+ md5_existed=True
+ grab_md5=False
+
+ if self.__use_md5 and grab_md5:
+ ret=self.__fetcher.fetch(rem+".md5sum",file_name=loc+".md5sum",verbose=(verbosity==1))
+ if not ret:
+ hash=self.__read_md5sum(loc+".md5sum")
+
+ if ret:
+ if verbosity:
+ print "!!! failed to fetch md5 for %s" % file
+ return False
+
+ # at this point we have the md5, and know the image *should* exist.
+ ret = False
+ if os.path.exists(loc):
+ if self.__use_md5:
+ md5=portage_checksum.perform_md5(loc)
+ if hash == md5 or not self.__use_md5:
+ if verbosity:
+ print ">>> reusing %s" % loc
+ return True
+ else:
+ if verbosity:
+ print ">>> resuming %s" % rem
+ ret=self.__fetcher.resume(rem,file_name=loc,verbose=(verbosity==1))
+ else:
+ if verbosity:
+ print ">>> fetching %s" % rem
+ ret=self.__fetcher.fetch(rem,file_name=loc,verbose=(verbosity==1))
+
+ if ret:
+ if verbosity:
+ print "!!! failed %s" % file
+ return False
+
+ if self.__use_md5 and md5==None:
+ md5=portage_checksum.perform_md5(loc)
+
+ if self.__use_md5 and md5_existed and md5!= hash:
+ print ">>> re-grabbing the hash"
+ # grab the md5 anew to be safe.
+ os.remove(loc+".md5sum")
+ if not self.__fetcher.fetch(rem+".md5sum",file_name=loc+".md5sum",verbose=True):
+ hash=self.__read_md5sum(loc+".md5sum")
+
+ if md5!=hash and self.__use_md5:
+ if verbosity:
+ print "!!! snapshots correct md5: %s" % hash
+ print "!!! snapshots actual md5 : %s" % md5
+ print "!!! cycling to next candidate."
+ print
+ return False
+ # if this point has been reached, things are sane.
+ return True
+
+
+ def __read_md5sum(self,file):
+ try:
+ myf=open(file,"r")
+ hash=myf.readline().split()[0]
+ if len(hash)!=32:
+ return None
+ return hash
+ except (OSError,IOError,IndexError),e:
+ print e
+ return None
diff --git a/portage/sync/syncexceptions.py b/portage/sync/syncexceptions.py
new file mode 100644
index 0000000..da7f842
--- /dev/null
+++ b/portage/sync/syncexceptions.py
@@ -0,0 +1,11 @@
+# syncexceptions.py: base sync exception class. not used currently (should be though)
+# Copyright 2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+#$Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/sync/syncexceptions.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+class SyncException(Exception):
+ """base sync exception"""
+ def __init__(self,value):
+ self.value=value
+ def __str__(self):
+ return value
diff --git a/portage/transports/__init__.py b/portage/transports/__init__.py
new file mode 100644
index 0000000..037b136
--- /dev/null
+++ b/portage/transports/__init__.py
@@ -0,0 +1,9 @@
+# transports/__init__.py; worthless file to keep python happy.
+# Copyright 2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+#$Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/transports/__init__.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+# worthless comment so diff creates this file.
+# yay.
+# this will ultimately hold the 'preferred fetcher' in the transport namespace,
+# along with default options for fetchers (chunk size fex)
diff --git a/portage/transports/bundled_lib.py b/portage/transports/bundled_lib.py
new file mode 100644
index 0000000..37634fc
--- /dev/null
+++ b/portage/transports/bundled_lib.py
@@ -0,0 +1,363 @@
+# bundled_lib.py; implementation of a fetcher class useing httplib and ftplib.
+# Copyright 2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+#$Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/transports/bundled_lib.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+import httplib, ftplib, urlparse, base64, re, sys, os
+
+class BundledConnection:
+ """a fetcher abstraction using httplib and ftplib.
+ offers api access to specify the specific window/chunk of a uri you want"""
+ def __init__(self, persistant=False,chunk_size=8192,verbose=True):
+ self.__persistant = persistant
+ self.__chunk_size = chunk_size
+ self.__verbose = verbose
+
+ # lifting the connection caching from check_src_uri's code- combine them if possible
+ self._http_conn = {}
+ self._https_conn = {}
+ self._ftp_conn = {}
+
+ def __get_connection(self, uri):
+ """internal function to raid from the instances cache of connections, or return a new one"""
+ proto,host,con_dict,url,hash = self.__process_uri(uri)
+ cons = getattr(self,"_%s_conn" % proto)
+ if not self.__persistant or not cons.has_key(hash):
+ if proto in ('http','https'):
+ con = httpConnection(host,**con_dict)
+ else:
+ con = ftpConnection(host,chunk_size=self.__chunk_size,**con_dict)
+ if self.__persistant:
+ cons[hash] = con
+ else:
+ con = cons[hash]
+ return proto, con, url
+
+ def fetch(self,uri, file_name=None,verbose=None):
+ """fetch uri, storing it in file_name"""
+ if verbose==None:
+ verbose=self.__verbose
+ proto, con, url = self.__get_connection(uri)
+ if not file_name:
+ x= url.find("/")
+ if x == -1:
+ raise Exception,"Unable to deterimine file_name from %s" % uri
+ file_name = url[x+1:]
+ try:
+ myf=open(file_name,"w",0)
+
+ except (IOError, OSError), e:
+ sys.stderr.write("Failed opening file %s for saving:\n%s\n" % (file_name,str(e)))
+ return True
+ if verbose:
+ print "fetching '%s' -> '%s'" % (uri, file_name)
+ if proto in ('http','https'):
+ try:
+ ret= self.__execute_transfer(con, url, myf, 0)
+ except UnableToAccess,ua:
+ if verbose:
+ print ua
+ myf.close()
+ os.remove(file_name)
+ return True
+ else:
+ ret = con.request(url, callback=myf.write)
+ myf.close()
+ return not ret
+
+ def resume(self, uri, file_name=None,verbose=None):
+ """resume uri into file_name"""
+ if verbose==None:
+ verbose=self.__verbose
+ proto, con, url = self.__get_connection(uri)
+ if not file_name:
+ x= url.find("/")
+ if x == -1:
+ raise Exception,"Unable to deterimine file_name from %s" % uri
+ file_name = url[x+1:]
+ try:
+ pos = os.stat(file_name).st_size
+
+ # open it manually, since open(file_name,"a") has the lovely feature of
+ # ignoring _all_ previous seek requests the minute a write is issued.
+ fd=os.open(file_name, os.O_WRONLY)
+ myf = os.fdopen(fd,"w",0)
+ if pos > self.__chunk_size:
+ pos -= self.__chunk_size
+ myf.seek(pos, 0)
+ else:
+ pos=0
+
+ except OSError, e:
+ if e.errno == 2:
+ # file not found
+ pos = 0
+ myf = open(file_name, "w",0)
+ else:
+ sys.stderr.write("Failed opening file %s for saving:\n%s\n" % (file_name,str(e)))
+ return True
+
+ if verbose:
+ print "resuming '%s' -> '%s'" % (uri, file_name)
+ if proto in ('http','https'):
+ try:
+ ret = self.__execute_transfer(con, url, myf, pos)
+ except UnableToAccess,ua:
+ if verbose:
+ print ua
+ myf.close()
+ return True
+ else:
+ ret = con.request(url, callback=myf.write, start=pos)
+ myf.close()
+ return not ret
+
+ def __execute_transfer(self, con, url, fd, pos):
+ """internal http(|s) function for looping over requests storing to fd"""
+ rc=1
+ redirect_max = 10
+ while rc:
+# print "fetching %i-%i" % (pos, pos + (self.__chunk_size *8) -1)
+ try:
+ data,rc=con.request(url,start=pos, end=(pos+(self.__chunk_size*8) -1))
+ except MovedLocation, ml:
+ sys.stderr.write(str(ml)+"\n")
+ url = ml.location
+ redirect_max -= 1
+ if redirect_max <= 0:
+ print "we've been redirected too many times- bailing"
+ return False
+ else:
+ continue
+ except UnhandledError, ue:
+ print ue
+ return False
+ fd.write(data)
+ pos += len(data)
+ return True
+
+ default_ports = {'http':80, 'https':443, 'ftp':21}
+
+ def __process_uri(self, uri):
+ """internal function to determine the proto, host, uri, options for
+ __get_connection, and a hash representing this host's specific options.
+ username, password, port, ssl, etc"""
+ con_dict = {}
+ proto,parts,url = urlparse.urlparse(uri)[0:3]
+ if not self.default_ports.has_key(proto):
+# port = self.default_ports[proto]
+# else:
+ raise Exception, "unknown protocol %s for uri %s" % (proto,uri)
+
+ parts = parts.split("@")
+ if len(parts) > 1:
+ con_dict["user"] = parts.pop(0)
+
+ r=re.compile(":\d+$").search(parts[0])
+ if r:
+ # found a port
+ con_dict["port"] = parts[0][r.start() + 1:]
+ parts[0] = parts[0][0:r.start()]
+ del r
+
+ #grab the pass.
+ parts = parts[0].split(":")
+ if len(parts) > 1:
+ con_dict["passwd"] = parts.pop(0)
+
+ hash=''
+ k=con_dict.keys()
+ k.sort()
+ for x in k:
+ hash += '%s:%s\n' % (x, str(con_dict[x]))
+ hash += "host:%s" % parts[0]
+ return [proto, parts[0], con_dict, url,hash]
+
+class ftpConnection:
+ """higher level interface over ftplib"""
+ def __init__(self,host,ssl=False,port=21,user=None,passwd=None,passive=True, chunk_size=8192,debug=0):
+ """ssl is currently not supported
+ debug flips on a bit of extra info
+ chunk_size controls the size of chunks transferred per callback- useful for performance tweaking
+
+ note a ftpConnection instance *must not* be called in a threaded/parallel manner-
+ the connection can handle one, and only one request, and is currently not smart
+ enough to lock itself for protection"""
+ self.__chunk = chunk_size
+ self.__pos = 0
+ self.__end = 0
+ self.__endlimit = False
+ if ssl:
+ raise Exception, "sftp isn't support atm"
+ args=[]
+ self.__con = ftplib.FTP()
+ self.__con.set_debuglevel(debug)
+ self.__con.connect(host,port)
+ if user and passwd:
+ self.__con.login(user,passwd)
+ else:
+ self.__con.login()
+ self.__con.set_pasv(passive)
+
+ def request(self, file, start=0, end=0, callback=None):
+ """make a request of file, with optional start/end
+ callback is used for processing data- if callback, then callback is called, and the return
+ is true/false depending on success
+
+ if no callback, then the requested window is returned, eg string.
+
+ technical note: do to the way the protocol works, the remote host may send more then what
+ was specified via start/end. Despite this, only that window is processed/returned.
+ Just worth noting since it means if you're requested 3 bytes from a file, depending on how
+ quickly the server disconnects, 13k may've been sent by that point (still, only the 3 bytes
+ are actually processed by callback/returned)."""
+
+ self.__pos = start
+ self.__end = end
+ self.__callback = callback
+ self.__data = ''
+ self.__aborted = False
+
+ if end:
+ self.__endlimit = True
+ if end:
+ block = end - start
+ else:
+ block = self.__chunk
+ if block > self.__chunk:
+ block = self.__chunk
+
+ try:
+ d=self.__con.retrbinary("retr %s" % file, self.__transfer_callback, block, start)
+ except ftplib.all_errors:
+ self.__data = ''
+ return False
+
+ if callback == None:
+ d = self.__data
+ self.__data = ''
+ return d
+
+
+ def __transfer_callback(self, data):
+ """internal callback function used with ftplib. This either appends the returned data,
+ or passes it off to the requested callback function"""
+ if self.__aborted:
+ return
+
+ l=len(data)
+
+ if self.__endlimit and self.__pos + l >= self.__end:
+ data = data[:self.__end - l]
+ l = self.__end - self.__pos
+ self.__aborted = True
+ self.__con.abort()
+
+ self.__pos += l
+ if self.__callback:
+ self.__callback(data)
+ else:
+ self.__data += data
+ return
+
+class httpConnection:
+ """higher level abstraction over httplib allowing window level access to a remote uri"""
+ def __init__(self,host,ssl=False,port=None,user=None,passwd=None):
+ """options for this host connection, sent to the server via the headers
+ note you cannot just specify the port as 443 and assume it'll do ssl-
+ you must flip on ssl to enable descryption/encryption of the protocol
+
+ just like with ftpConnection, instances *must not* be called in a parallel/threaded
+ manner without external locking.
+ This class isn't smart enough to protect itself, although it will be made so at
+ some point."""
+
+ self.__headers = {}
+ self.__host = host
+ if user and passwd:
+ self.__headers.extend({"Authorization": "Basic %s" %
+ base64.encodestring("%s:%s" % (user,passwd)).replace("\012","")
+ })
+
+ if port == None:
+ if ssl:
+ self.__port = httplib.HTTPS_PORT
+ else:
+ self.__port = httplib.HTTP_PORT
+ else:
+ self.__port = port
+ if ssl:
+ self.__conn = httplib.HTTPSConnection(self.__host, self.__port)
+ else:
+ self.__conn = httplib.HTTPConnection(self.__host, self.__port)
+
+ def request(self,uri,start=None,end=None):
+ """returns httpconnection.response, our chucks an exception."""
+ if end==None and start:
+ end += 3000
+ size = None
+ if not (start == None or end == None):
+ size = end - start
+ self.__headers["Range"]="bytes=%i-%i" % (start, end)
+ try:
+ self.__conn.request("GET",uri,{},self.__headers)
+ except httplib.HTTPException, e:
+ print "caught exception %s" % str(e)
+ if start and end:
+ del self.__headers["Range"]
+ return None,False
+ if start and end:
+ del self.__headers["Range"]
+ response = self.__conn.getresponse()
+ rc = response.status
+ if rc in (301,302):
+ response.read()
+ for x in str(response.msg).split("\n"):
+ p = x.split(":")
+ # assume the remote server is dumb, and ignore case.
+ if x[0].lower() == "location":
+ raise MovedLocation(rc, x[1])
+
+ # if we hit this point, the server is stupid.
+ raise Exception,"received %i, but no location" % rc
+
+ if rc in (404,403):
+ response.read()
+ raise UnableToAccess(rc,response.reason)
+ elif rc not in (200,206):
+ response.read()
+ raise UnhandledError(rc,response.reason)
+ data=response.read()
+ if size != None:
+ return data,(rc==206 and len(data) -1 == size)
+ return data,(rc==206)
+
+
+class UnhandledError(Exception):
+ """basically an unknown state/condition was encountered, so control is being relinquished"""
+ def __init__(self, status, reason):
+ self.status = status
+ self.reason = str(reason)
+ def __str__(self):
+ return "Unhandled Status code: %i: %s" % (self.status, self.reason)
+
+class UnableToAccess(Exception):
+ """404's and 403's"""
+ def __init__(self,status,reason):
+ self.status = status
+ self.reason = str(reason)
+ def __str__(self):
+ return "%s code: %s" % (self.status,self.reason)
+
+class MovedLocation(Exception):
+ """http 301/302 exception"""
+ def __init__(self,status,new_loc):
+ self.status=status
+ self.location = str(new_loc)
+
+ def __str__(self):
+ if self.status == 301:
+ return "301: Location has moved: %s" % self.location
+ else:
+ return "302: Location has temporarily moved: %s" % self.location
+
diff --git a/portage/transports/fetchcommand.py b/portage/transports/fetchcommand.py
new file mode 100644
index 0000000..5d10eb1
--- /dev/null
+++ b/portage/transports/fetchcommand.py
@@ -0,0 +1,72 @@
+# fetchcommand.py; fetcher class encapsulating make.conf FETCHCOMMAND/RESUMECOMMAND, and the ensueing spawn calls
+# Copyright 2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+#$Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/transports/fetchcommand.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+import urlparse,types
+from portage.exec import spawn_bash
+
+class CustomConnection:
+ """abstraction representing a portage.config instances FETCHCOMMAND and RESUMECOMMAND"""
+ def __init__(self,mysettings,selinux_context=None, verbose=True):
+ """selinux_context should always be passed in, actual control of whether or not
+ the context is switched is based upon if it's a selinux capable system
+ verbose controls whether this instance makes noise or not"""
+ self.__fc = mysettings["FETCHCOMMAND"]
+ self.__rc = mysettings["RESUMECOMMAND"]
+ self.__verbose = verbose
+ self.__cfc = {}
+ self.__crc = {}
+ self.__selinux_context = selinux_context
+ self.__distdir=mysettings["DISTDIR"]
+ for k in mysettings.environ().keys():
+ if k.startswith("FETCHCOMMAND_"):
+ self.__cfc[k[13:]] = mysettings[k]
+ elif k.startswith("RESUMECOMMAND_"):
+ self.__crc[k[14:]] = mysettings[k]
+
+
+ def fetch(self, uri, file_name=None, verbose=None):
+ """fetch uri, storing it to file_name"""
+ return self.__execute(uri,file_name,False,verbose)
+
+ def resume(self, uri, file_name=None,verbose=None):
+ """resume uri into file_name"""
+ return self.__execute(uri,file_name,True,verbose)
+
+ def __execute(self, uri, file_name, resume,verbose):
+ """internal function doing the actual work of fetch/resume"""
+ if verbose==None:
+ verbose=self.__verbose
+
+ proto = urlparse.urlparse(uri)[0].upper()
+
+ if not file_name:
+ x = uri.rfind("/")
+ distdir=self.__distdir
+ if x == -1:
+ raise Exception,"Unable to deterimine file_name from %s" % uri
+ file_name = uri[x+1:]
+ else:
+ x = file_name.rfind("/")
+ if x == -1:
+ distdir=self.__distdir
+ else:
+ distdir=file_name[:x]
+ file_name=file_name[x+1:]
+
+ if resume:
+ f = self.__crc.get(proto, self.__rc)
+ else:
+ f = self.__cfc.get(proto, self.__fc)
+
+
+ f=f.replace("${DISTDIR}", distdir)
+ f=f.replace("${URI}",uri)
+ f=f.replace("${FILE}",file_name)
+ if verbose:
+ fd_pipes={1:1,2:2}
+ else:
+ fd_pipes={}
+ return spawn_bash(f,fd_pipes=fd_pipes,selinux_context=self.__selinux_context)
+
diff --git a/portage/util/IndexableSequence.py b/portage/util/IndexableSequence.py
new file mode 100644
index 0000000..7ec57cc
--- /dev/null
+++ b/portage/util/IndexableSequence.py
@@ -0,0 +1,97 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/util/Attic/IndexableSequence.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+class IndexableSequence(object):
+ def __init__(self, get_keys, get_values, recursive=False, returnEmpty=False,
+ returnIterFunc=None, modifiable=False, delfunc=None, updatefunc=None):
+ self.__get_keys = get_keys
+ self.__get_values = get_values
+ self.__cache = {}
+ self.__cache_complete = False
+ self.__cache_can_be_complete = not recursive and not modifiable
+ self.__return_empty = returnEmpty
+ self.__returnFunc = returnIterFunc
+ self._frozen = not modifiable
+ if not self._frozen:
+ self.__del_func = delfunc
+ self.__update_func = updatefunc
+
+ def __getitem__(self, key):
+ if not (self.__cache_complete or self.__cache.has_key(key)):
+ self.__cache[key] = self.__get_values(key)
+ return self.__cache[key]
+
+ def keys(self):
+ return list(self.iterkeys())
+
+ def __delitem__(self, key):
+ if self._frozen:
+ raise AttributeError
+ if not key in self:
+ raise KeyError(key)
+ return self.__del_func(key)
+
+ def __setitem__(self, key, value):
+ if self._frozen:
+ raise AttributeError
+ if not key in self:
+ raise KeyError(key)
+ return self.__update_func(key, value)
+
+ def __contains__(self, key):
+ try:
+ self[key]
+ return True
+ except KeyError:
+ return False
+
+ def iterkeys(self):
+# print "iterkeys called, cache-complete=",self.__cache_complete
+ if self.__cache_complete:
+ return self.__cache.keys()
+ return self.__gen_keys()
+
+ def __gen_keys(self):
+ for key in self.__get_keys():
+ if not self.__cache.has_key(key):
+ self.__cache[key] = self.__get_values(key)
+# print "adding %s:%s" % (str(key), str(self.__cache[key]))
+ yield key
+ self.__cache_complete = self.__cache_can_be_complete
+ return
+
+ def __iter__(self):
+ if self.__returnFunc:
+ for key, value in self.iteritems():
+ if len(value) == 0:
+ if self.__return_empty:
+ yield key
+ else:
+ for x in value:
+ yield self.__returnFunc(key, x)
+ else:
+ for key, value in self.iteritems():
+ if len(value) == 0:
+ if self.__return_empty:
+ yield key
+ else:
+ for x in value:
+ yield key+'/'+x
+ return
+
+ def items(self):
+ return list(self.iteritems())
+
+ def iteritems(self):
+# print "iteritems called, cache-complete=",self.__cache_complete
+ if self.__cache_complete:
+ return self.__cache.items()
+ return self.__gen_items()
+
+ def __gen_items(self):
+ for key in self.iterkeys():
+ yield key, self[key]
+ return
+
diff --git a/portage/util/__init__.py b/portage/util/__init__.py
new file mode 100644
index 0000000..bcffcd1
--- /dev/null
+++ b/portage/util/__init__.py
@@ -0,0 +1 @@
+#import modules, fs, misc
diff --git a/portage/util/currying.py b/portage/util/currying.py
new file mode 100644
index 0000000..0f4d6af
--- /dev/null
+++ b/portage/util/currying.py
@@ -0,0 +1,9 @@
+# Author(s): Lifted from python cookbook, Scott David Daniels, Ben Wolfson, Nick Perkins, Alex Martelli for curry routine.
+# License: GPL2
+
+def curry(*args, **kargs):
+ def callit(*moreargs, **morekargs):
+ kw = kargs.copy()
+ kw.update(morekargs)
+ return args[0](*(args[1:]+moreargs), **kw)
+ return callit
diff --git a/portage/util/dicts.py b/portage/util/dicts.py
new file mode 100644
index 0000000..6283193
--- /dev/null
+++ b/portage/util/dicts.py
@@ -0,0 +1,158 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/util/Attic/dicts.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+class IndexableSequence(object):
+ def __init__(self, get_keys, get_values, recursive=False, returnEmpty=False,
+ returnIterFunc=None, modifiable=False, delfunc=None, updatefunc=None):
+ self.__get_keys = get_keys
+ self.__get_values = get_values
+ self.__cache = {}
+ self.__cache_complete = False
+ self.__cache_can_be_complete = not recursive and not modifiable
+ self.__return_empty = returnEmpty
+ self.__returnFunc = returnIterFunc
+ self._frozen = not modifiable
+ if not self._frozen:
+ self.__del_func = delfunc
+ self.__update_func = updatefunc
+
+ def __getitem__(self, key):
+ if not (self.__cache_complete or self.__cache.has_key(key)):
+ self.__cache[key] = self.__get_values(key)
+ return self.__cache[key]
+
+ def keys(self):
+ return list(self.iterkeys())
+
+ def __delitem__(self, key):
+ if self._frozen:
+ raise AttributeError
+ if not key in self:
+ raise KeyError(key)
+ return self.__del_func(key)
+
+ def __setitem__(self, key, value):
+ if self._frozen:
+ raise AttributeError
+ if not key in self:
+ raise KeyError(key)
+ return self.__update_func(key, value)
+
+ def __contains__(self, key):
+ try:
+ self[key]
+ return True
+ except KeyError:
+ return False
+
+ def iterkeys(self):
+# print "iterkeys called, cache-complete=",self.__cache_complete
+ if self.__cache_complete:
+ return self.__cache.keys()
+ return self.__gen_keys()
+
+ def __gen_keys(self):
+ for key in self.__get_keys():
+ if not self.__cache.has_key(key):
+ self.__cache[key] = self.__get_values(key)
+# print "adding %s:%s" % (str(key), str(self.__cache[key]))
+ yield key
+ self.__cache_complete = self.__cache_can_be_complete
+ return
+
+ def __iter__(self):
+ if self.__returnFunc:
+ for key, value in self.iteritems():
+ if len(value) == 0:
+ if self.__return_empty:
+ yield key
+ else:
+ for x in value:
+ yield self.__returnFunc(key, x)
+ else:
+ for key, value in self.iteritems():
+ if len(value) == 0:
+ if self.__return_empty:
+ yield key
+ else:
+ for x in value:
+ yield key+'/'+x
+ return
+
+ def items(self):
+ return list(self.iteritems())
+
+ def iteritems(self):
+# print "iteritems called, cache-complete=",self.__cache_complete
+ if self.__cache_complete:
+ return self.__cache.items()
+ return self.__gen_items()
+
+ def __gen_items(self):
+ for key in self.iterkeys():
+ yield key, self[key]
+ return
+
+
+class LazyValDict(object):
+
+ def __init__(self, get_keys_func, get_val_func):
+ self.__val_func = get_val_func
+ self.__keys_func = get_keys_func
+ self.__vals = {}
+ self.__keys = {}
+
+
+ def __setitem__(self):
+ raise AttributeError
+
+
+ def __delitem__(self):
+ raise AttributeError
+
+
+ def __getitem__(self, key):
+ if self.__keys_func != None:
+ map(self.__keys.setdefault, self.__keys_func())
+ self.__keys_func = None
+ if key in self.__vals:
+ return self.__vals[key]
+ if key in self.__keys:
+ v = self.__vals[key] = self.__val_func(key)
+ del self.__keys[key]
+ return v
+ raise KeyError(key)
+
+
+ def iterkeys(self):
+ if self.__keys_func != None:
+ map(self.__keys.setdefault, self.__keys_func())
+ self.__keys_func = None
+ for k in self.__keys.keys():
+ yield k
+ for k in self.__vals.keys():
+ yield k
+
+ def keys(self):
+ return list(self.iterkeys())
+
+ def __contains__(self, key):
+ if self.__keys_func != None:
+ map(self.__keys.setdefault, self.__keys_func())
+ self.__keys_func = None
+ return key in self.__keys or key in self.__vals
+
+ __iter__ = iterkeys
+ has_key = __contains__
+
+
+ def iteritems(self):
+ for k in self.iterkeys():
+ yield k, self[k]
+
+
+ def items(self):
+ return list(self.iteritems())
+
diff --git a/portage/util/fs.py b/portage/util/fs.py
new file mode 100644
index 0000000..e448990
--- /dev/null
+++ b/portage/util/fs.py
@@ -0,0 +1,519 @@
+# Copyright 2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/util/fs.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+cvs_id_string="$Id: fs.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $"[5:-2]
+
+import os
+
+try:
+ #XXX: This should get renamed to bsd_chflags, I think.
+ import chflags
+ bsd_chflags = chflags
+except SystemExit, e:
+ raise
+except:
+ # XXX: This should get renamed to bsd_chflags, I think.
+ bsd_chflags = None
+
+
+def grabfile(myfilename, compat_level=0):
+ """This function grabs the lines in a file, normalizes whitespace and returns lines in a list; if a line
+ begins with a #, it is ignored, as are empty lines"""
+
+ try:
+ myfile=open(myfilename,"r")
+ except IOError:
+ return []
+ mylines=myfile.readlines()
+ myfile.close()
+ newlines=[]
+ for x in mylines:
+ #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
+ #into single spaces.
+ myline = ' '.join(x.split())
+ if not len(myline):
+ continue
+ if myline[0]=="#":
+ # Check if we have a compat-level string. BC-integration data.
+ # '##COMPAT==>N<==' 'some string attached to it'
+ mylinetest = myline.split("<==", 1)
+ if len(mylinetest) == 2:
+ myline_potential = mylinetest[1]
+ mylinetest = mylinetest[0].split("##COMPAT==>")
+ if len(mylinetest) == 2:
+ if compat_level >= int(mylinetest[1]):
+ # It's a compat line, and the key matches.
+ newlines.append(myline_potential)
+ continue
+ else:
+ continue
+ newlines.append(myline)
+ return newlines
+
+
+def grab_multiple(basename, locations, handler, all_must_exist=0):
+ mylist = []
+ for x in locations:
+ mylist.append(handler(x+"/"+basename))
+ return mylist
+
+
+def grabdict(myfilename,juststrings=0,empty=0):
+ """This function grabs the lines in a file, normalizes whitespace and returns lines in a dictionary"""
+ newdict={}
+ try:
+ myfile=open(myfilename,"r")
+ except IOError,e:
+ return newdict
+ mylines=myfile.readlines()
+ myfile.close()
+ for x in mylines:
+ #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
+ #into single spaces.
+ if x[0] == "#":
+ continue
+ myline = x.split()
+ if len(myline)<2 and empty==0:
+ continue
+ if len(myline)<1 and empty==1:
+ continue
+ if juststrings:
+ newdict[myline[0]]=" ".join( myline[1:] )
+ else:
+ newdict[myline[0]]=myline[1:]
+ return newdict
+
+
+def grabints(myfilename):
+ newdict={}
+ try:
+ myfile=open(myfilename,"r")
+ except IOError:
+ return newdict
+ mylines=myfile.readlines()
+ myfile.close()
+ for x in mylines:
+ #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
+ #into single spaces.
+ myline=x.split()
+ if len(myline)!=2:
+ continue
+ newdict[myline[0]]=int(myline[1])
+ return newdict
+
+
+def writeints(mydict,myfilename):
+ try:
+ myfile=open(myfilename,"w")
+ except IOError:
+ return 0
+ for x in mydict.keys():
+ myfile.write(x+" "+`mydict[x]`+"\n")
+ myfile.close()
+ return 1
+
+
+def writedict(mydict,myfilename,writekey=1):
+ """Writes out a dict to a file; writekey=0 mode doesn't write out
+ the key and assumes all values are strings, not lists."""
+ try:
+ myfile=open(myfilename,"w")
+ except IOError:
+ writemsg("Failed to open file for writedict(): "+str(myfilename)+"\n")
+ return 0
+ if not writekey:
+ for x in mydict.values():
+ myfile.write(x+"\n")
+ else:
+ for x in mydict.keys():
+ myfile.write(x+" ")
+ for y in mydict[x]:
+ myfile.write(y+" ")
+ myfile.write("\n")
+ myfile.close()
+ return 1
+
+
+def getconfig(mycfg,tolerant=0,allow_sourcing=False):
+ import shlex, string
+ mykeys={}
+ try:
+ f=open(mycfg,'r')
+ except IOError:
+ return None
+ try:
+ lex=shlex.shlex(f)
+ lex.wordchars=string.digits+string.letters+"~!@#$%*_\:;?,./-+{}"
+ lex.quotes="\"'"
+ if allow_sourcing:
+ lex.source="source"
+ while 1:
+ key=lex.get_token()
+ if (key==''):
+ #normal end of file
+ break;
+ equ=lex.get_token()
+ if (equ==''):
+ #unexpected end of file
+ #lex.error_leader(self.filename,lex.lineno)
+ if not tolerant:
+ writemsg("!!! Unexpected end of config file: variable "+str(key)+"\n")
+ raise Exception("ParseError: Unexpected EOF: "+str(mycfg)+": on/before line "+str(lex.lineno))
+ else:
+ return mykeys
+ elif (equ!='='):
+ #invalid token
+ #lex.error_leader(self.filename,lex.lineno)
+ if not tolerant:
+ writemsg("!!! Invalid token (not \"=\") "+str(equ)+"\n")
+ raise Exception("ParseError: Invalid token (not '='): "+str(mycfg)+": line "+str(lex.lineno))
+ else:
+ return mykeys
+ val=lex.get_token()
+ if (val==''):
+ #unexpected end of file
+ #lex.error_leader(self.filename,lex.lineno)
+ if not tolerant:
+ writemsg("!!! Unexpected end of config file: variable "+str(key)+"\n")
+ raise portage_exception.CorruptionError("ParseError: Unexpected EOF: "+str(mycfg)+": line "+str(lex.lineno))
+ else:
+ return mykeys
+ mykeys[key]=varexpand(val,mykeys)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ raise e.__class__, str(e)+" in "+mycfg
+ return mykeys
+
+
+#cache expansions of constant strings
+cexpand={}
+def varexpand(mystring,mydict={}):
+ """
+ new variable expansion code. Removes quotes, handles \n, etc.
+ This code is used by the configfile code, as well as others (parser)
+ This would be a good bunch of code to port to C.
+ """
+ mystring=" "+mystring
+ if mystring in cexpand:
+ return cexpand[mystring]
+ numvars=0
+ #in single, double quotes
+ insing=0
+ indoub=0
+ pos=1
+ newstring=" "
+ while (pos<len(mystring)):
+ if (mystring[pos]=="'") and (mystring[pos-1]!="\\"):
+ if (indoub):
+ newstring=newstring+"'"
+ else:
+ insing=not insing
+ pos=pos+1
+ continue
+ elif (mystring[pos]=='"') and (mystring[pos-1]!="\\"):
+ if (insing):
+ newstring=newstring+'"'
+ else:
+ indoub=not indoub
+ pos=pos+1
+ continue
+ if (not insing):
+ #expansion time
+ if (mystring[pos]=="\n"):
+ #convert newlines to spaces
+ newstring=newstring+" "
+ pos=pos+1
+ elif (mystring[pos]=="\\"):
+ #backslash expansion time
+ if (pos+1>=len(mystring)):
+ newstring=newstring+mystring[pos]
+ break
+ else:
+ a=mystring[pos+1]
+ pos=pos+2
+ if a=='a':
+ newstring=newstring+chr(007)
+ elif a=='b':
+ newstring=newstring+chr(010)
+ elif a=='e':
+ newstring=newstring+chr(033)
+ elif (a=='f') or (a=='n'):
+ newstring=newstring+chr(012)
+ elif a=='r':
+ newstring=newstring+chr(015)
+ elif a=='t':
+ newstring=newstring+chr(011)
+ elif a=='v':
+ newstring=newstring+chr(013)
+ elif a!='\n':
+ #remove backslash only, as bash does: this takes care of \\ and \' and \" as well
+ newstring=newstring+mystring[pos-1:pos]
+ continue
+ elif (mystring[pos]=="$") and (mystring[pos-1]!="\\"):
+ pos=pos+1
+ if mystring[pos]=="{":
+ pos=pos+1
+ braced=True
+ else:
+ braced=False
+ myvstart=pos
+ validchars=string.ascii_letters+string.digits+"_"
+ while mystring[pos] in validchars:
+ if (pos+1)>=len(mystring):
+ if braced:
+ cexpand[mystring]=""
+ return ""
+ else:
+ pos=pos+1
+ break
+ pos=pos+1
+ myvarname=mystring[myvstart:pos]
+ if braced:
+ if mystring[pos]!="}":
+ cexpand[mystring]=""
+ return ""
+ else:
+ pos=pos+1
+ if len(myvarname)==0:
+ cexpand[mystring]=""
+ return ""
+ numvars=numvars+1
+ if mydict.has_key(myvarname):
+ newstring=newstring+mydict[myvarname]
+ else:
+ newstring=newstring+mystring[pos]
+ pos=pos+1
+ else:
+ newstring=newstring+mystring[pos]
+ pos=pos+1
+ if numvars==0:
+ cexpand[mystring]=newstring[1:]
+ return newstring[1:]
+
+
+def pickle_write(data,filename,debug=0):
+ import cPickle,os
+ try:
+ myf=open(filename,"w")
+ cPickle.dump(data,myf,cPickle.HIGHEST_PROTOCOL)
+ myf.close()
+ writemsg("Wrote pickle: "+str(filename)+"\n",1)
+ os.chown(myefn,uid,portage_gid)
+ os.chmod(myefn,0664)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ return 0
+ return 1
+
+
+def pickle_read(filename,default=None,debug=0):
+ import cPickle,os
+ if not os.access(filename, os.R_OK):
+ writemsg("pickle_read(): File not readable. '"+filename+"'\n",1)
+ return default
+ data = None
+ try:
+ myf = open(filename)
+ mypickle = cPickle.Unpickler(myf)
+ mypickle.find_global = None
+ data = mypickle.load()
+ myf.close()
+ del mypickle,myf
+ writemsg("pickle_read(): Loaded pickle. '"+filename+"'\n",1)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ writemsg("!!! Failed to load pickle: "+str(e)+"\n",1)
+ data = default
+ return data
+
+# throw this out.
+def movefile(src,dest,newmtime=None,sstat=None,mysettings=None):
+ """moves a file from src to dest, preserving all permissions and attributes; mtime will
+ be preserved even when moving across filesystems. Returns true on success and false on
+ failure. Move is atomic."""
+ #print "movefile("+str(src)+","+str(dest)+","+str(newmtime)+","+str(sstat)+")"
+ from portage.spawn import selinux_capable
+ import stat, shutil, os.path
+ if selinux_capable:
+ import selinux
+ from portage.os_data import lchown
+ try:
+ if not sstat:
+ sstat=os.lstat(src)
+ if bsd_chflags:
+ sflags=bsd_chflags.lgetflags(src)
+ if sflags < 0:
+ # Problem getting flags...
+ print "!!! Couldn't get flags for "+dest+"\n"
+ return None
+
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print "!!! Stating source file failed... movefile()"
+ print "!!!",e
+ return None
+
+ destexists=1
+ try:
+ dstat=os.lstat(dest)
+ except SystemExit, e:
+ raise
+ except:
+ dstat=os.lstat(os.path.dirname(dest))
+ destexists=0
+
+ if bsd_chflags:
+ # Check that we can actually unset schg etc flags...
+ # Clear the flags on source and destination; we'll reinstate them after merging
+ if(destexists):
+ if bsd_chflags.lchflags(dest, 0) < 0:
+ print "!!! Couldn't clear flags on file being merged: \n"
+ # We might have an immutable flag on the parent dir; save and clear.
+ pflags=bsd_chflags.lgetflags(os.path.dirname(dest))
+ bsd_chflags.lchflags(os.path.dirname(dest), 0)
+
+ # Don't bother checking the return value here; if it fails then the next line will catch it.
+ bsd_chflags.lchflags(src, 0)
+
+ if bsd_chflags.lhasproblems(src)>0 or (destexists and bsd_chflags.lhasproblems(dest)>0) or bsd_chflags.lhasproblems(os.path.dirname(dest))>0:
+ # This is bad: we can't merge the file with these flags set.
+ print "!!! Can't merge file "+dest+" because of flags set\n"
+ return None
+
+ if destexists:
+ if stat.S_ISLNK(dstat.st_mode):
+ try:
+ os.unlink(dest)
+ destexists=0
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ pass
+
+ if stat.S_ISLNK(sstat.st_mode):
+ try:
+ target=os.readlink(src)
+ if mysettings and mysettings["D"]:
+ if target.find(mysettings["D"])==0:
+ target=target[len(mysettings["D"]):]
+ if destexists and not stat.S_ISDIR(dstat.st_mode):
+ os.unlink(dest)
+ if selinux_capable:
+ sid = selinux.get_lsid(src)
+ selinux.secure_symlink(target,dest,sid)
+ else:
+ os.symlink(target,dest)
+ lchown(dest,sstat.st_uid, sstat.st_gid)
+ if bsd_chflags:
+ # Restore the flags we saved before moving
+ if bsd_chflags.lchflags(dest, sflags) < 0 or bsd_chflags.lchflags(os.path.dirname(dest), pflags) < 0:
+ writemsg("!!! Couldn't restore flags ("+str(flags)+") on " + dest+":\n")
+ writemsg("!!! %s\n" % str(e))
+ return None
+ return os.lstat(dest).st_mtime
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print "!!! failed to properly create symlink:"
+ print "!!!",dest,"->",target
+ print "!!!",e
+ return None
+
+ renamefailed=1
+ if sstat.st_dev == dstat.st_dev or selinux_capable:
+ try:
+ if selinux_capable:
+ ret=selinux.secure_rename(src,dest)
+ else:
+ ret=os.rename(src,dest)
+ renamefailed=0
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ import errno
+ if e[0]!=errno.EXDEV:
+ # Some random error.
+ print "!!! Failed to move",src,"to",dest
+ print "!!!",e
+ return None
+ # Invalid cross-device-link 'bind' mounted or actually Cross-Device
+ if renamefailed:
+ didcopy=0
+ if stat.S_ISREG(sstat.st_mode):
+ try: # For safety copy then move it over.
+ if selinux_capable:
+ selinux.secure_copy(src,dest+"#new")
+ selinux.secure_rename(dest+"#new",dest)
+ else:
+ shutil.copyfile(src,dest+"#new")
+ os.rename(dest+"#new",dest)
+ didcopy=1
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print '!!! copy',src,'->',dest,'failed.'
+ print "!!!",e
+ return None
+ else:
+ #we don't yet handle special, so we need to fall back to /bin/mv
+ if selinux_capable:
+ a=portage_exec.spawn_get_output(MOVE_BINARY+" -c -f '%s' '%s'" % (src,dest))
+ else:
+ a=portage_exec.spawn_get_output(MOVE_BINARY+" -f '%s' '%s'" % (src,dest))
+ if a[0]!=0:
+ print "!!! Failed to move special file:"
+ print "!!! '"+src+"' to '"+dest+"'"
+ print "!!!",a
+ return None # failure
+ try:
+ if didcopy:
+ lchown(dest,sstat.st_uid, sstat.st_gid)
+ os.chmod(dest, stat.S_IMODE(sstat.st_mode)) # Sticky is reset on chown
+ os.unlink(src)
+ except SystemExit, e:
+ os.unlink(src)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print "!!! Failed to chown/chmod/unlink in movefile()"
+ print "!!!",dest
+ print "!!!",e
+ return None
+
+ if newmtime:
+ os.utime(dest,(newmtime,newmtime))
+ else:
+ os.utime(dest, (sstat.st_atime, sstat.st_mtime))
+ newmtime=sstat.st_mtime
+
+ if bsd_chflags:
+ # Restore the flags we saved before moving
+ if bsd_chflags.lchflags(dest, sflags) < 0 or bsd_chflags.lchflags(os.path.dirname(dest), pflags) < 0:
+ writemsg("!!! Couldn't restore flags ("+str(sflags)+") on " + dest+":\n")
+ return None
+
+ return newmtime
+
+
+def abssymlink(symlink):
+ """
+ This reads symlinks, resolving the relative symlinks, and returning the absolute.
+ """
+ import os.path
+ mylink=os.readlink(symlink)
+ if mylink[0] != '/':
+ mydir=os.path.dirname(symlink)
+ mylink=mydir+"/"+mylink
+ return os.path.normpath(mylink)
+
+
+def normpath(mypath):
+ newpath = os.path.normpath(mypath)
+ if len(newpath) > 1:
+ if newpath[:2] == "//":
+ return newpath[1:]
+ return newpath
diff --git a/portage/util/misc.py b/portage/util/misc.py
new file mode 100644
index 0000000..38df0f9
--- /dev/null
+++ b/portage/util/misc.py
@@ -0,0 +1,99 @@
+# Copyright 1998-2005 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/util/misc.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+cvs_id_string="$Id: misc.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $"[5:-2]
+
+#clean this up.
+import sys,string,shlex,os.path,stat,types
+import shutil
+
+
+def stack_dictlist(original_dicts, incremental=0, incrementals=[], ignore_none=0):
+ """Stacks an array of dict-types into one array. Optionally merging or
+ overwriting matching key/value pairs for the dict[key]->list.
+ Returns a single dict. Higher index in lists is preferenced."""
+ final_dict = None
+ kill_list = {}
+ for mydict in original_dicts:
+ if mydict == None:
+ continue
+ if final_dict == None:
+ final_dict = {}
+ for y in mydict.keys():
+ if not final_dict.has_key(y):
+ final_dict[y] = []
+ if not kill_list.has_key(y):
+ kill_list[y] = []
+
+ mydict[y].reverse()
+ for thing in mydict[y]:
+ if thing and (thing not in kill_list[y]) and ("*" not in kill_list[y]):
+ if (incremental or (y in incrementals)) and thing[0] == '-':
+ if thing[1:] not in kill_list[y]:
+ kill_list[y] += [thing[1:]]
+ else:
+ if thing not in final_dict[y]:
+ final_dict[y].append(thing[:])
+ mydict[y].reverse()
+ if final_dict.has_key(y) and not final_dict[y]:
+ del final_dict[y]
+ return final_dict
+
+
+def stack_dicts(dicts, incremental=0, incrementals=[], ignore_none=0):
+ """Stacks an array of dict-types into one array. Optionally merging or
+ overwriting matching key/value pairs for the dict[key]->string.
+ Returns a single dict."""
+ final_dict = None
+ for mydict in dicts:
+ if mydict == None:
+ if ignore_none:
+ continue
+ else:
+ return None
+ if final_dict == None:
+ final_dict = {}
+ for y in mydict.keys():
+ if mydict[y]:
+ if final_dict.has_key(y) and (incremental or (y in incrementals)):
+ final_dict[y] += " "+mydict[y][:]
+ else:
+ final_dict[y] = mydict[y][:]
+ mydict[y] = string.join(mydict[y].split()) # Remove extra spaces.
+ return final_dict
+
+def stack_lists(lists, incremental=1):
+ """Stacks an array of list-types into one array. Optionally removing
+ distinct values using '-value' notation. Higher index is preferenced."""
+ new_list = []
+ for x in lists:
+ for y in x:
+ if y:
+ if incremental and y[0]=='-':
+ while y[1:] in new_list:
+ del new_list[new_list.index(y[1:])]
+ else:
+ if y not in new_list:
+ new_list.append(y[:])
+ return new_list
+
+
+def unique_array(array):
+ """Takes an array and makes sure each element is unique."""
+ newarray = []
+ for x in array:
+ if x not in newarray:
+ newarray.append(x)
+ return newarray
+
+
+def flatten(mytokens):
+ """this function now turns a [1,[2,3]] list into
+ a [1,2,3] list and returns it."""
+ newlist=[]
+ for x in mytokens:
+ if type(x)==list:
+ newlist.extend(flatten(x))
+ else:
+ newlist.append(x)
+ return newlist
diff --git a/portage/util/modules.py b/portage/util/modules.py
new file mode 100644
index 0000000..b6aad7e
--- /dev/null
+++ b/portage/util/modules.py
@@ -0,0 +1,22 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/util/modules.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+def load_module(name):
+ m = __import__(name)
+ nl = name.split('.')
+ # __import__ returns nl[0]... so.
+ nl.pop(0)
+ while len(nl):
+ m = getattr(m, nl[0])
+ nl.pop(0)
+ return m
+
+def load_attribute(name):
+ i = name.rfind(".")
+ if i == -1:
+ raise ValueError("name isn't an attribute, it's a module... : %s" % name)
+ m = load_module(name[:i])
+ m = getattr(m, name[i+1:])
+ return m
diff --git a/portage/vdb/__init__.py b/portage/vdb/__init__.py
new file mode 100644
index 0000000..efcbd88
--- /dev/null
+++ b/portage/vdb/__init__.py
@@ -0,0 +1 @@
+import repository
diff --git a/portage/vdb/repository.py b/portage/vdb/repository.py
new file mode 100644
index 0000000..5e20702
--- /dev/null
+++ b/portage/vdb/repository.py
@@ -0,0 +1,76 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Header: /local/data/ulm/cvs/history/var/cvsroot/gentoo-src/portage/portage/vdb/repository.py,v 1.1 2005/07/10 09:21:05 ferringb Exp $
+
+# hack, remove when it's fixed
+raise Exception("sorry, this won't work with current portage namespace layout. plsfix, kthnx")
+
+import os,stat
+import prototype, errors
+
+#needed to grab the PN
+import portage_versions
+
+class tree(prototype.tree):
+ def __init__(self, base):
+ super(tree,self).__init__()
+ self.base = base
+ try:
+ st = os.lstat(self.base)
+ if not stat.S_ISDIR(st.st_mode):
+ raise errors.InitializationError("base not a dir: %s" % self.base)
+ elif not st.st_mode & (os.X_OK|os.R_OK):
+ raise errors.InitializationError("base lacks read/executable: %s" % self.base)
+
+ except OSError:
+ raise errors.InitializationError("lstat failed on base %s" % self.base)
+
+
+ def _get_categories(self, *optionalCategory):
+ # return if optionalCategory is passed... cause it's not yet supported
+ if len(optionalCategory):
+ return {}
+
+ try: return tuple([x for x in os.listdir(self.base) \
+ if stat.S_ISDIR(os.lstat(os.path.join(self.base,x)).st_mode) and x != "All"])
+
+ except (OSError, IOError), e:
+ raise KeyError("failed fetching categories: %s" % str(e))
+
+ def _get_packages(self, category):
+ cpath = os.path.join(self.base,category.lstrip(os.path.sep))
+ l=[]
+ try:
+ for x in os.listdir(cpath):
+ if stat.S_ISDIR(os.stat(os.path.join(cpath,x)).st_mode) and not x.endswith(".lockfile"):
+ l.append(portage_versions.pkgsplit(x)[0])
+ return tuple(l)
+
+ except (OSError, IOError), e:
+ raise KeyError("failed fetching packages for category %s: %s" % \
+ (os.path.join(self.base,category.lstrip(os.path.sep)), str(e)))
+
+
+ def _get_versions(self, catpkg):
+ pkg = catpkg.split("/")[-1]
+ l=[]
+ try:
+ cpath=os.path.join(self.base, os.path.dirname(catpkg.lstrip("/").rstrip("/")))
+ for x in os.listdir(cpath):
+ if x.startswith(pkg) and stat.S_ISDIR(os.stat(os.path.join(cpath,x)).st_mode) and not x.endswith(".lockfile"):
+ ver=portage_versions.pkgsplit(x)
+
+ #pkgsplit returns -r0, when it's not always there
+ if ver[2] == "r0":
+ if x.endswith(ver[2]):
+ l.append("%s-%s" % (ver[1], ver[2]))
+ else:
+ l.append(ver[1])
+ else:
+ l.append("%s-%s" % (ver[1], ver[2]))
+ return tuple(l)
+ except (OSError, IOError), e:
+ raise KeyError("failed fetching packages for package %s: %s" % \
+ (os.path.join(self.base,catpkg.lstrip(os.path.sep)), str(e)))
+