aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'pym/_emerge')
-rw-r--r--pym/_emerge/AbstractDepPriority.py5
-rw-r--r--pym/_emerge/AbstractEbuildProcess.py58
-rw-r--r--pym/_emerge/AbstractPollTask.py2
-rw-r--r--pym/_emerge/AsynchronousLock.py66
-rw-r--r--pym/_emerge/AsynchronousTask.py14
-rw-r--r--pym/_emerge/Binpkg.py7
-rw-r--r--pym/_emerge/BinpkgExtractorAsync.py15
-rw-r--r--pym/_emerge/BinpkgFetcher.py18
-rw-r--r--pym/_emerge/BinpkgVerifier.py143
-rw-r--r--pym/_emerge/BlockerCache.py10
-rw-r--r--pym/_emerge/BlockerDB.py12
-rw-r--r--pym/_emerge/CompositeTask.py4
-rw-r--r--pym/_emerge/DepPriority.py29
-rw-r--r--pym/_emerge/DepPrioritySatisfiedRange.py24
-rw-r--r--pym/_emerge/DependencyArg.py10
-rw-r--r--pym/_emerge/EbuildBuild.py36
-rw-r--r--pym/_emerge/EbuildBuildDir.py11
-rw-r--r--pym/_emerge/EbuildExecuter.py13
-rw-r--r--pym/_emerge/EbuildFetcher.py68
-rw-r--r--pym/_emerge/EbuildMetadataPhase.py66
-rw-r--r--pym/_emerge/EbuildPhase.py63
-rw-r--r--pym/_emerge/EbuildProcess.py12
-rw-r--r--pym/_emerge/EbuildSpawnProcess.py10
-rw-r--r--pym/_emerge/FakeVartree.py123
-rw-r--r--pym/_emerge/FifoIpcDaemon.py43
-rw-r--r--pym/_emerge/JobStatusDisplay.py44
-rw-r--r--pym/_emerge/MergeListItem.py18
-rw-r--r--pym/_emerge/MetadataRegen.py93
-rw-r--r--pym/_emerge/MiscFunctionsProcess.py7
-rw-r--r--pym/_emerge/Package.py317
-rw-r--r--pym/_emerge/PackageMerge.py7
-rw-r--r--pym/_emerge/PackageUninstall.py6
-rw-r--r--pym/_emerge/PackageVirtualDbapi.py4
-rw-r--r--pym/_emerge/PipeReader.py37
-rw-r--r--pym/_emerge/PollScheduler.py129
-rw-r--r--pym/_emerge/QueueScheduler.py105
-rw-r--r--pym/_emerge/RootConfig.py13
-rw-r--r--pym/_emerge/Scheduler.py240
-rw-r--r--pym/_emerge/SpawnProcess.py269
-rw-r--r--pym/_emerge/SubProcess.py30
-rw-r--r--pym/_emerge/Task.py9
-rw-r--r--pym/_emerge/TaskScheduler.py26
-rw-r--r--pym/_emerge/UnmergeDepPriority.py27
-rw-r--r--pym/_emerge/UseFlagDisplay.py10
-rw-r--r--pym/_emerge/actions.py1704
-rw-r--r--pym/_emerge/chk_updated_cfg_files.py42
-rw-r--r--pym/_emerge/clear_caches.py4
-rw-r--r--pym/_emerge/countdown.py18
-rw-r--r--pym/_emerge/create_depgraph_params.py23
-rw-r--r--pym/_emerge/create_world_atom.py25
-rw-r--r--pym/_emerge/depgraph.py2451
-rw-r--r--pym/_emerge/emergelog.py12
-rw-r--r--pym/_emerge/getloadavg.py5
-rw-r--r--pym/_emerge/help.py10
-rw-r--r--pym/_emerge/is_valid_package_atom.py7
-rw-r--r--pym/_emerge/main.py1297
-rw-r--r--pym/_emerge/post_emerge.py165
-rw-r--r--pym/_emerge/resolver/backtracking.py38
-rw-r--r--pym/_emerge/resolver/circular_dependency.py24
-rw-r--r--pym/_emerge/resolver/output.py537
-rw-r--r--pym/_emerge/resolver/output_helpers.py95
-rw-r--r--pym/_emerge/resolver/package_tracker.py301
-rw-r--r--pym/_emerge/resolver/slot_collision.py230
-rw-r--r--pym/_emerge/search.py4
-rw-r--r--pym/_emerge/stdout_spinner.py13
-rw-r--r--pym/_emerge/unmerge.py5
66 files changed, 5657 insertions, 3606 deletions
diff --git a/pym/_emerge/AbstractDepPriority.py b/pym/_emerge/AbstractDepPriority.py
index 94f26efc5..1fcd04345 100644
--- a/pym/_emerge/AbstractDepPriority.py
+++ b/pym/_emerge/AbstractDepPriority.py
@@ -1,11 +1,12 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import copy
from portage.util.SlotObject import SlotObject
class AbstractDepPriority(SlotObject):
- __slots__ = ("buildtime", "runtime", "runtime_post")
+ __slots__ = ("buildtime", "buildtime_slot_op",
+ "runtime", "runtime_post", "runtime_slot_op")
def __lt__(self, other):
return self.__int__() < other
diff --git a/pym/_emerge/AbstractEbuildProcess.py b/pym/_emerge/AbstractEbuildProcess.py
index c7b8f83ca..31127f474 100644
--- a/pym/_emerge/AbstractEbuildProcess.py
+++ b/pym/_emerge/AbstractEbuildProcess.py
@@ -1,8 +1,10 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import io
+import platform
import stat
+import subprocess
import textwrap
from _emerge.SpawnProcess import SpawnProcess
from _emerge.EbuildBuildDir import EbuildBuildDir
@@ -20,8 +22,10 @@ class AbstractEbuildProcess(SpawnProcess):
__slots__ = ('phase', 'settings',) + \
('_build_dir', '_ipc_daemon', '_exit_command', '_exit_timeout_id')
+
_phases_without_builddir = ('clean', 'cleanrm', 'depend', 'help',)
_phases_interactive_whitelist = ('config',)
+ _phases_without_cgroup = ('preinst', 'postinst', 'prerm', 'postrm', 'config')
# Number of milliseconds to allow natural exit of the ebuild
# process after it has called the exit command via IPC. It
@@ -52,13 +56,48 @@ class AbstractEbuildProcess(SpawnProcess):
if need_builddir and \
not os.path.isdir(self.settings['PORTAGE_BUILDDIR']):
msg = _("The ebuild phase '%s' has been aborted "
- "since PORTAGE_BUILDIR does not exist: '%s'") % \
+ "since PORTAGE_BUILDDIR does not exist: '%s'") % \
(self.phase, self.settings['PORTAGE_BUILDDIR'])
self._eerror(textwrap.wrap(msg, 72))
self._set_returncode((self.pid, 1 << 8))
- self.wait()
+ self._async_wait()
return
+ # Check if the cgroup hierarchy is in place. If it's not, mount it.
+ if (os.geteuid() == 0 and platform.system() == 'Linux'
+ and 'cgroup' in self.settings.features
+ and self.phase not in self._phases_without_cgroup):
+ cgroup_root = '/sys/fs/cgroup'
+ cgroup_portage = os.path.join(cgroup_root, 'portage')
+ cgroup_path = os.path.join(cgroup_portage,
+ '%s:%s' % (self.settings["CATEGORY"],
+ self.settings["PF"]))
+ try:
+ # cgroup tmpfs
+ if not os.path.ismount(cgroup_root):
+ # we expect /sys/fs to be there already
+ if not os.path.isdir(cgroup_root):
+ os.mkdir(cgroup_root, 0o755)
+ subprocess.check_call(['mount', '-t', 'tmpfs',
+ '-o', 'rw,nosuid,nodev,noexec,mode=0755',
+ 'tmpfs', cgroup_root])
+
+ # portage subsystem
+ if not os.path.ismount(cgroup_portage):
+ if not os.path.isdir(cgroup_portage):
+ os.mkdir(cgroup_portage, 0o755)
+ subprocess.check_call(['mount', '-t', 'cgroup',
+ '-o', 'rw,nosuid,nodev,noexec,none,name=portage',
+ 'tmpfs', cgroup_portage])
+
+ # the ebuild cgroup
+ if not os.path.isdir(cgroup_path):
+ os.mkdir(cgroup_path)
+ except (subprocess.CalledProcessError, OSError):
+ pass
+ else:
+ self.cgroup = cgroup_path
+
if self.background:
# Automatically prevent color codes from showing up in logs,
# since we're not displaying to a terminal anyway.
@@ -67,7 +106,7 @@ class AbstractEbuildProcess(SpawnProcess):
if self._enable_ipc_daemon:
self.settings.pop('PORTAGE_EBUILD_EXIT_FILE', None)
if self.phase not in self._phases_without_builddir:
- if 'PORTAGE_BUILDIR_LOCKED' not in self.settings:
+ if 'PORTAGE_BUILDDIR_LOCKED' not in self.settings:
self._build_dir = EbuildBuildDir(
scheduler=self.scheduler, settings=self.settings)
self._build_dir.lock()
@@ -143,9 +182,14 @@ class AbstractEbuildProcess(SpawnProcess):
self._exit_command.reply_hook = self._exit_command_callback
query_command = QueryCommand(self.settings, self.phase)
commands = {
- 'best_version' : query_command,
- 'exit' : self._exit_command,
- 'has_version' : query_command,
+ 'available_eclasses' : query_command,
+ 'best_version' : query_command,
+ 'eclass_path' : query_command,
+ 'exit' : self._exit_command,
+ 'has_version' : query_command,
+ 'license_path' : query_command,
+ 'master_repositories' : query_command,
+ 'repository_path' : query_command,
}
input_fifo, output_fifo = self._init_ipc_fifos()
self._ipc_daemon = EbuildIpcDaemon(commands=commands,
diff --git a/pym/_emerge/AbstractPollTask.py b/pym/_emerge/AbstractPollTask.py
index 2c8470925..3f6dd6cef 100644
--- a/pym/_emerge/AbstractPollTask.py
+++ b/pym/_emerge/AbstractPollTask.py
@@ -151,4 +151,4 @@ class AbstractPollTask(AsynchronousTask):
while self._registered and not timeout_cb.timed_out:
self.scheduler.iteration()
finally:
- self.scheduler.unregister(timeout_cb.timeout_id)
+ self.scheduler.source_remove(timeout_cb.timeout_id)
diff --git a/pym/_emerge/AsynchronousLock.py b/pym/_emerge/AsynchronousLock.py
index 587aa4650..c0b9b26dc 100644
--- a/pym/_emerge/AsynchronousLock.py
+++ b/pym/_emerge/AsynchronousLock.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import dummy_threading
@@ -49,7 +49,7 @@ class AsynchronousLock(AsynchronousTask):
pass
else:
self.returncode = os.EX_OK
- self.wait()
+ self._async_wait()
return
if self._force_process or \
@@ -105,44 +105,27 @@ class _LockThread(AbstractPollTask):
"""
__slots__ = ('path',) + \
- ('_files', '_force_dummy', '_lock_obj',
- '_thread', '_reg_id',)
+ ('_force_dummy', '_lock_obj', '_thread',)
def _start(self):
- pr, pw = os.pipe()
- self._files = {}
- self._files['pipe_read'] = pr
- self._files['pipe_write'] = pw
- for f in self._files.values():
- fcntl.fcntl(f, fcntl.F_SETFL,
- fcntl.fcntl(f, fcntl.F_GETFL) | os.O_NONBLOCK)
- self._reg_id = self.scheduler.register(self._files['pipe_read'],
- self.scheduler.IO_IN, self._output_handler)
self._registered = True
threading_mod = threading
if self._force_dummy:
threading_mod = dummy_threading
self._thread = threading_mod.Thread(target=self._run_lock)
+ self._thread.daemon = True
self._thread.start()
def _run_lock(self):
self._lock_obj = lockfile(self.path, wantnewlockfile=True)
- os.write(self._files['pipe_write'], b'\0')
-
- def _output_handler(self, f, event):
- buf = None
- if event & self.scheduler.IO_IN:
- try:
- buf = os.read(self._files['pipe_read'], self._bufsize)
- except OSError as e:
- if e.errno not in (errno.EAGAIN,):
- raise
- if buf:
- self._unregister()
- self.returncode = os.EX_OK
- self.wait()
+ # Thread-safe callback to EventLoop
+ self.scheduler.idle_add(self._run_lock_cb)
- return True
+ def _run_lock_cb(self):
+ self._unregister()
+ self.returncode = os.EX_OK
+ self.wait()
+ return False
def _cancel(self):
# There's currently no way to force thread termination.
@@ -163,15 +146,6 @@ class _LockThread(AbstractPollTask):
self._thread.join()
self._thread = None
- if self._reg_id is not None:
- self.scheduler.unregister(self._reg_id)
- self._reg_id = None
-
- if self._files is not None:
- for f in self._files.values():
- os.close(f)
- self._files = None
-
class _LockProcess(AbstractPollTask):
"""
This uses the portage.locks module to acquire a lock asynchronously,
@@ -190,16 +164,28 @@ class _LockProcess(AbstractPollTask):
self._files = {}
self._files['pipe_in'] = in_pr
self._files['pipe_out'] = out_pw
+
fcntl.fcntl(in_pr, fcntl.F_SETFL,
fcntl.fcntl(in_pr, fcntl.F_GETFL) | os.O_NONBLOCK)
- self._reg_id = self.scheduler.register(in_pr,
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(in_pr, fcntl.F_SETFD,
+ fcntl.fcntl(in_pr, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self._reg_id = self.scheduler.io_add_watch(in_pr,
self.scheduler.IO_IN, self._output_handler)
self._registered = True
self._proc = SpawnProcess(
args=[portage._python_interpreter,
os.path.join(portage._bin_path, 'lock-helper.py'), self.path],
env=dict(os.environ, PORTAGE_PYM_PATH=portage._pym_path),
- fd_pipes={0:out_pr, 1:in_pw, 2:sys.stderr.fileno()},
+ fd_pipes={0:out_pr, 1:in_pw, 2:sys.__stderr__.fileno()},
scheduler=self.scheduler)
self._proc.addExitListener(self._proc_exit)
self._proc.start()
@@ -273,7 +259,7 @@ class _LockProcess(AbstractPollTask):
self._registered = False
if self._reg_id is not None:
- self.scheduler.unregister(self._reg_id)
+ self.scheduler.source_remove(self._reg_id)
self._reg_id = None
if self._files is not None:
diff --git a/pym/_emerge/AsynchronousTask.py b/pym/_emerge/AsynchronousTask.py
index 7a193ce7d..da58261db 100644
--- a/pym/_emerge/AsynchronousTask.py
+++ b/pym/_emerge/AsynchronousTask.py
@@ -60,6 +60,20 @@ class AsynchronousTask(SlotObject):
def _wait(self):
return self.returncode
+ def _async_wait(self):
+ """
+ For cases where _start exits synchronously, this method is a
+ convenient way to trigger an asynchronous call to self.wait()
+ (in order to notify exit listeners), avoiding excessive event
+ loop recursion (or stack overflow) that synchronous calling of
+ exit listeners can cause. This method is thread-safe.
+ """
+ self.scheduler.idle_add(self._async_wait_cb)
+
+ def _async_wait_cb(self):
+ self.wait()
+ return False
+
def cancel(self):
"""
Cancel the task, but do not wait for exit status. If asynchronous exit
diff --git a/pym/_emerge/Binpkg.py b/pym/_emerge/Binpkg.py
index ea8a1ad13..a740efdb9 100644
--- a/pym/_emerge/Binpkg.py
+++ b/pym/_emerge/Binpkg.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.EbuildPhase import EbuildPhase
@@ -298,6 +298,7 @@ class Binpkg(CompositeTask):
extractor = BinpkgExtractorAsync(background=self.background,
env=self.settings.environ(),
+ features=self.settings.features,
image_dir=self._image_dir,
pkg=self.pkg, pkg_path=self._pkg_path,
logfile=self.settings.get("PORTAGE_LOG_FILE"),
@@ -328,11 +329,13 @@ class Binpkg(CompositeTask):
self.wait()
return
+ env = self.settings.environ()
+ env["PYTHONPATH"] = self.settings["PORTAGE_PYTHONPATH"]
chpathtool = SpawnProcess(
args=[portage._python_interpreter,
os.path.join(self.settings["PORTAGE_BIN_PATH"], "chpathtool.py"),
self.settings["D"], self._build_prefix, self.settings["EPREFIX"]],
- background=self.background, env=self.settings.environ(),
+ background=self.background, env=env,
scheduler=self.scheduler,
logfile=self.settings.get('PORTAGE_LOG_FILE'))
self._writemsg_level(">>> Adjusting Prefix to %s\n" % self.settings["EPREFIX"])
diff --git a/pym/_emerge/BinpkgExtractorAsync.py b/pym/_emerge/BinpkgExtractorAsync.py
index f25cbf933..be74c2fb7 100644
--- a/pym/_emerge/BinpkgExtractorAsync.py
+++ b/pym/_emerge/BinpkgExtractorAsync.py
@@ -1,23 +1,31 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.SpawnProcess import SpawnProcess
import portage
import signal
+import subprocess
class BinpkgExtractorAsync(SpawnProcess):
- __slots__ = ("image_dir", "pkg", "pkg_path")
+ __slots__ = ("features", "image_dir", "pkg", "pkg_path")
_shell_binary = portage.const.BASH_BINARY
def _start(self):
+ tar_options = ""
+ if "xattr" in self.features:
+ process = subprocess.Popen(["tar", "--help"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ output = process.communicate()[0]
+ if b"--xattrs" in output:
+ tar_options = "--xattrs"
+
# Add -q to bzip2 opts, in order to avoid "trailing garbage after
# EOF ignored" warning messages due to xpak trailer.
# SIGPIPE handling (128 + SIGPIPE) should be compatible with
# assert_sigpipe_ok() that's used by the ebuild unpack() helper.
self.args = [self._shell_binary, "-c",
- ("${PORTAGE_BUNZIP2_COMMAND:-${PORTAGE_BZIP2_COMMAND} -d} -cq -- %s | tar -xp -C %s -f - ; " + \
+ ("${PORTAGE_BUNZIP2_COMMAND:-${PORTAGE_BZIP2_COMMAND} -d} -cq -- %s | tar -xp %s -C %s -f - ; " + \
"p=(${PIPESTATUS[@]}) ; " + \
"if [[ ${p[0]} != 0 && ${p[0]} != %d ]] ; then " % (128 + signal.SIGPIPE) + \
"echo bzip2 failed with status ${p[0]} ; exit ${p[0]} ; fi ; " + \
@@ -25,6 +33,7 @@ class BinpkgExtractorAsync(SpawnProcess):
"echo tar failed with status ${p[1]} ; exit ${p[1]} ; fi ; " + \
"exit 0 ;") % \
(portage._shell_quote(self.pkg_path),
+ tar_options,
portage._shell_quote(self.image_dir))]
SpawnProcess._start(self)
diff --git a/pym/_emerge/BinpkgFetcher.py b/pym/_emerge/BinpkgFetcher.py
index f415e2ec7..543881ee6 100644
--- a/pym/_emerge/BinpkgFetcher.py
+++ b/pym/_emerge/BinpkgFetcher.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.AsynchronousLock import AsynchronousLock
@@ -63,7 +63,7 @@ class BinpkgFetcher(SpawnProcess):
if pretend:
portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
self._set_returncode((self.pid, os.EX_OK << 8))
- self.wait()
+ self._async_wait()
return
protocol = urllib_parse_urlparse(uri)[0]
@@ -80,6 +80,12 @@ class BinpkgFetcher(SpawnProcess):
"FILE" : os.path.basename(pkg_path)
}
+ for k in ("PORTAGE_SSH_OPTS",):
+ try:
+ fcmd_vars[k] = settings[k]
+ except KeyError:
+ pass
+
fetch_env = dict(settings.items())
fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
for x in portage.util.shlex_split(fcmd)]
@@ -91,9 +97,9 @@ class BinpkgFetcher(SpawnProcess):
# Redirect all output to stdout since some fetchers like
# wget pollute stderr (if portage detects a problem then it
# can send it's own message to stderr).
- fd_pipes.setdefault(0, sys.stdin.fileno())
- fd_pipes.setdefault(1, sys.stdout.fileno())
- fd_pipes.setdefault(2, sys.stdout.fileno())
+ fd_pipes.setdefault(0, portage._get_stdin().fileno())
+ fd_pipes.setdefault(1, sys.__stdout__.fileno())
+ fd_pipes.setdefault(2, sys.__stdout__.fileno())
self.args = fetch_args
self.env = fetch_env
@@ -104,7 +110,7 @@ class BinpkgFetcher(SpawnProcess):
def _pipe(self, fd_pipes):
"""When appropriate, use a pty so that fetcher progress bars,
like wget has, will work properly."""
- if self.background or not sys.stdout.isatty():
+ if self.background or not sys.__stdout__.isatty():
# When the output only goes to a log file,
# there's no point in creating a pty.
return os.pipe()
diff --git a/pym/_emerge/BinpkgVerifier.py b/pym/_emerge/BinpkgVerifier.py
index 0052967f6..2c6979265 100644
--- a/pym/_emerge/BinpkgVerifier.py
+++ b/pym/_emerge/BinpkgVerifier.py
@@ -1,75 +1,120 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from _emerge.AsynchronousTask import AsynchronousTask
-from portage.util import writemsg
+import errno
import io
import sys
+
+from _emerge.CompositeTask import CompositeTask
import portage
from portage import os
+from portage.checksum import (_apply_hash_filter,
+ _filter_unaccelarated_hashes, _hash_filter)
+from portage.output import EOutput
+from portage.util._async.FileDigester import FileDigester
from portage.package.ebuild.fetch import _checksum_failure_temp_file
-class BinpkgVerifier(AsynchronousTask):
- __slots__ = ("logfile", "pkg", "scheduler")
+class BinpkgVerifier(CompositeTask):
+ __slots__ = ("logfile", "pkg", "_digests", "_pkg_path")
def _start(self):
- """
- Note: Unlike a normal AsynchronousTask.start() method,
- this one does all work is synchronously. The returncode
- attribute will be set before it returns.
- """
-
- pkg = self.pkg
- root_config = pkg.root_config
- bintree = root_config.trees["bintree"]
- rval = os.EX_OK
+
+ bintree = self.pkg.root_config.trees["bintree"]
+ digests = bintree._get_digests(self.pkg)
+ if "size" not in digests:
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ digests = _filter_unaccelarated_hashes(digests)
+ hash_filter = _hash_filter(
+ bintree.settings.get("PORTAGE_CHECKSUM_FILTER", ""))
+ if not hash_filter.transparent:
+ digests = _apply_hash_filter(digests, hash_filter)
+
+ self._digests = digests
+ self._pkg_path = bintree.getname(self.pkg.cpv)
+
+ try:
+ size = os.stat(self._pkg_path).st_size
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ self.scheduler.output(("!!! Fetching Binary failed "
+ "for '%s'\n") % self.pkg.cpv, log_path=self.logfile,
+ background=self.background)
+ self.returncode = 1
+ self._async_wait()
+ return
+ else:
+ if size != digests["size"]:
+ self._digest_exception("size", size, digests["size"])
+ self.returncode = 1
+ self._async_wait()
+ return
+
+ self._start_task(FileDigester(file_path=self._pkg_path,
+ hash_names=(k for k in digests if k != "size"),
+ background=self.background, logfile=self.logfile,
+ scheduler=self.scheduler),
+ self._digester_exit)
+
+ def _digester_exit(self, digester):
+
+ if self._default_exit(digester) != os.EX_OK:
+ self.wait()
+ return
+
+ for hash_name in digester.hash_names:
+ if digester.digests[hash_name] != self._digests[hash_name]:
+ self._digest_exception(hash_name,
+ digester.digests[hash_name], self._digests[hash_name])
+ self.returncode = 1
+ self.wait()
+ return
+
+ if self.pkg.root_config.settings.get("PORTAGE_QUIET") != "1":
+ self._display_success()
+
+ self.returncode = os.EX_OK
+ self.wait()
+
+ def _display_success(self):
stdout_orig = sys.stdout
stderr_orig = sys.stderr
global_havecolor = portage.output.havecolor
out = io.StringIO()
- file_exists = True
try:
sys.stdout = out
sys.stderr = out
if portage.output.havecolor:
portage.output.havecolor = not self.background
- try:
- bintree.digestCheck(pkg)
- except portage.exception.FileNotFound:
- writemsg("!!! Fetching Binary failed " + \
- "for '%s'\n" % pkg.cpv, noiselevel=-1)
- rval = 1
- file_exists = False
- except portage.exception.DigestException as e:
- writemsg("\n!!! Digest verification failed:\n",
- noiselevel=-1)
- writemsg("!!! %s\n" % e.value[0],
- noiselevel=-1)
- writemsg("!!! Reason: %s\n" % e.value[1],
- noiselevel=-1)
- writemsg("!!! Got: %s\n" % e.value[2],
- noiselevel=-1)
- writemsg("!!! Expected: %s\n" % e.value[3],
- noiselevel=-1)
- rval = 1
- if rval == os.EX_OK:
- pass
- elif file_exists:
- pkg_path = bintree.getname(pkg.cpv)
- head, tail = os.path.split(pkg_path)
- temp_filename = _checksum_failure_temp_file(head, tail)
- writemsg("File renamed to '%s'\n" % (temp_filename,),
- noiselevel=-1)
+
+ eout = EOutput()
+ eout.ebegin("%s %s ;-)" % (os.path.basename(self._pkg_path),
+ " ".join(sorted(self._digests))))
+ eout.eend(0)
+
finally:
sys.stdout = stdout_orig
sys.stderr = stderr_orig
portage.output.havecolor = global_havecolor
- msg = out.getvalue()
- if msg:
- self.scheduler.output(msg, log_path=self.logfile,
- background=self.background)
+ self.scheduler.output(out.getvalue(), log_path=self.logfile,
+ background=self.background)
- self.returncode = rval
- self.wait()
+ def _digest_exception(self, name, value, expected):
+
+ head, tail = os.path.split(self._pkg_path)
+ temp_filename = _checksum_failure_temp_file(head, tail)
+ self.scheduler.output((
+ "\n!!! Digest verification failed:\n"
+ "!!! %s\n"
+ "!!! Reason: Failed on %s verification\n"
+ "!!! Got: %s\n"
+ "!!! Expected: %s\n"
+ "File renamed to '%s'\n") %
+ (self._pkg_path, name, value, expected, temp_filename),
+ log_path=self.logfile,
+ background=self.background)
diff --git a/pym/_emerge/BlockerCache.py b/pym/_emerge/BlockerCache.py
index fce81f83a..53342d6d6 100644
--- a/pym/_emerge/BlockerCache.py
+++ b/pym/_emerge/BlockerCache.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import errno
@@ -62,7 +62,9 @@ class BlockerCache(portage.cache.mappings.MutableMapping):
self._cache_data = mypickle.load()
f.close()
del f
- except (AttributeError, EOFError, EnvironmentError, ValueError, pickle.UnpicklingError) as e:
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except Exception as e:
if isinstance(e, EnvironmentError) and \
getattr(e, 'errno', None) in (errno.ENOENT, errno.EACCES):
pass
@@ -126,9 +128,9 @@ class BlockerCache(portage.cache.mappings.MutableMapping):
self._modified.clear()
def flush(self):
- """If the current user has permission and the internal blocker cache
+ """If the current user has permission and the internal blocker cache has
been updated, save it to disk and mark it unmodified. This is called
- by emerge after it has proccessed blockers for all installed packages.
+ by emerge after it has processed blockers for all installed packages.
Currently, the cache is only written if the user has superuser
privileges (since that's required to obtain a lock), but all users
have read access and benefit from faster blocker lookups (as long as
diff --git a/pym/_emerge/BlockerDB.py b/pym/_emerge/BlockerDB.py
index 459affdb0..8bb8f5fda 100644
--- a/pym/_emerge/BlockerDB.py
+++ b/pym/_emerge/BlockerDB.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import sys
@@ -9,6 +9,7 @@ from portage import digraph
from portage._sets.base import InternalPackageSet
from _emerge.BlockerCache import BlockerCache
+from _emerge.Package import Package
from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
if sys.hexversion >= 0x3000000:
@@ -38,7 +39,7 @@ class BlockerDB(object):
"""
blocker_cache = BlockerCache(None,
self._vartree.dbapi)
- dep_keys = ["RDEPEND", "PDEPEND"]
+ dep_keys = Package._runtime_keys
settings = self._vartree.settings
stale_cache = set(blocker_cache)
fake_vartree = self._fake_vartree
@@ -50,7 +51,7 @@ class BlockerDB(object):
stale_cache.discard(inst_pkg.cpv)
cached_blockers = blocker_cache.get(inst_pkg.cpv)
if cached_blockers is not None and \
- cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
+ cached_blockers.counter != inst_pkg.counter:
cached_blockers = None
if cached_blockers is not None:
blocker_atoms = cached_blockers.atoms
@@ -71,9 +72,8 @@ class BlockerDB(object):
blocker_atoms = [atom for atom in atoms \
if atom.startswith("!")]
blocker_atoms.sort()
- counter = long(inst_pkg.metadata["COUNTER"])
blocker_cache[inst_pkg.cpv] = \
- blocker_cache.BlockerData(counter, blocker_atoms)
+ blocker_cache.BlockerData(inst_pkg.counter, blocker_atoms)
for cpv in stale_cache:
del blocker_cache[cpv]
blocker_cache.flush()
@@ -92,7 +92,7 @@ class BlockerDB(object):
blocking_pkgs.update(blocker_parents.parent_nodes(atom))
# Check for blockers in the other direction.
- depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
+ depstr = " ".join(new_pkg._metadata[k] for k in dep_keys)
success, atoms = portage.dep_check(depstr,
vardb, settings, myuse=new_pkg.use.enabled,
trees=dep_check_trees, myroot=new_pkg.root)
diff --git a/pym/_emerge/CompositeTask.py b/pym/_emerge/CompositeTask.py
index 3e434780b..40cf8596b 100644
--- a/pym/_emerge/CompositeTask.py
+++ b/pym/_emerge/CompositeTask.py
@@ -142,6 +142,10 @@ class CompositeTask(AsynchronousTask):
a task.
"""
+ try:
+ task.scheduler = self.scheduler
+ except AttributeError:
+ pass
task.addExitListener(exit_handler)
self._current_task = task
task.start()
diff --git a/pym/_emerge/DepPriority.py b/pym/_emerge/DepPriority.py
index 3c2256a8e..34fdb481c 100644
--- a/pym/_emerge/DepPriority.py
+++ b/pym/_emerge/DepPriority.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.AbstractDepPriority import AbstractDepPriority
@@ -16,31 +16,38 @@ class DepPriority(AbstractDepPriority):
Attributes Hardness
- buildtime 0
- runtime -1
- runtime_post -2
- optional -3
- (none of the above) -4
+ buildtime_slot_op 0
+ buildtime -1
+ runtime -2
+ runtime_post -3
+ optional -4
+ (none of the above) -5
"""
if self.optional:
- return -3
- if self.buildtime:
+ return -4
+ if self.buildtime_slot_op:
return 0
- if self.runtime:
+ if self.buildtime:
return -1
- if self.runtime_post:
+ if self.runtime:
return -2
- return -4
+ if self.runtime_post:
+ return -3
+ return -5
def __str__(self):
if self.ignored:
return "ignored"
if self.optional:
return "optional"
+ if self.buildtime_slot_op:
+ return "buildtime_slot_op"
if self.buildtime:
return "buildtime"
+ if self.runtime_slot_op:
+ return "runtime_slot_op"
if self.runtime:
return "runtime"
if self.runtime_post:
diff --git a/pym/_emerge/DepPrioritySatisfiedRange.py b/pym/_emerge/DepPrioritySatisfiedRange.py
index edb29df96..391f5409b 100644
--- a/pym/_emerge/DepPrioritySatisfiedRange.py
+++ b/pym/_emerge/DepPrioritySatisfiedRange.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.DepPriority import DepPriority
@@ -7,17 +7,18 @@ class DepPrioritySatisfiedRange(object):
DepPriority Index Category
not satisfied and buildtime HARD
- not satisfied and runtime 6 MEDIUM
- not satisfied and runtime_post 5 MEDIUM_SOFT
+ not satisfied and runtime 7 MEDIUM
+ not satisfied and runtime_post 6 MEDIUM_SOFT
+ satisfied and buildtime_slot_op 5 SOFT
satisfied and buildtime 4 SOFT
satisfied and runtime 3 SOFT
satisfied and runtime_post 2 SOFT
optional 1 SOFT
(none of the above) 0 NONE
"""
- MEDIUM = 6
- MEDIUM_SOFT = 5
- SOFT = 4
+ MEDIUM = 7
+ MEDIUM_SOFT = 6
+ SOFT = 5
NONE = 0
@classmethod
@@ -50,6 +51,16 @@ class DepPrioritySatisfiedRange(object):
def _ignore_satisfied_buildtime(cls, priority):
if priority.__class__ is not DepPriority:
return False
+ if priority.optional:
+ return True
+ if priority.buildtime_slot_op:
+ return False
+ return bool(priority.satisfied)
+
+ @classmethod
+ def _ignore_satisfied_buildtime_slot_op(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
return bool(priority.optional or \
priority.satisfied)
@@ -80,6 +91,7 @@ DepPrioritySatisfiedRange.ignore_priority = (
DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
DepPrioritySatisfiedRange._ignore_satisfied_runtime,
DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
+ DepPrioritySatisfiedRange._ignore_satisfied_buildtime_slot_op,
DepPrioritySatisfiedRange._ignore_runtime_post,
DepPrioritySatisfiedRange._ignore_runtime
)
diff --git a/pym/_emerge/DependencyArg.py b/pym/_emerge/DependencyArg.py
index 80134c804..29a0072c4 100644
--- a/pym/_emerge/DependencyArg.py
+++ b/pym/_emerge/DependencyArg.py
@@ -1,9 +1,11 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
import sys
-from portage import _encodings, _unicode_encode, _unicode_decode
+from portage import _encodings, _unicode_encode
class DependencyArg(object):
@@ -31,10 +33,10 @@ class DependencyArg(object):
return hash((self.arg, self.root_config.root))
def __str__(self):
- # Force unicode format string for python-2.x safety,
+ # Use unicode_literals format string for python-2.x safety,
# ensuring that self.arg.__unicode__() is used
# when necessary.
- return _unicode_decode("%s") % (self.arg,)
+ return "%s" % (self.arg,)
if sys.hexversion < 0x3000000:
diff --git a/pym/_emerge/EbuildBuild.py b/pym/_emerge/EbuildBuild.py
index 784a3e298..e13b1cf39 100644
--- a/pym/_emerge/EbuildBuild.py
+++ b/pym/_emerge/EbuildBuild.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.EbuildExecuter import EbuildExecuter
@@ -10,11 +10,14 @@ from _emerge.EbuildMerge import EbuildMerge
from _emerge.EbuildFetchonly import EbuildFetchonly
from _emerge.EbuildBuildDir import EbuildBuildDir
from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
+from _emerge.TaskSequence import TaskSequence
+
from portage.util import writemsg
import portage
from portage import os
from portage.output import colorize
from portage.package.ebuild.digestcheck import digestcheck
+from portage.package.ebuild.digestgen import digestgen
from portage.package.ebuild.doebuild import _check_temp_dir
from portage.package.ebuild._spawn_nofetch import spawn_nofetch
@@ -35,7 +38,7 @@ class EbuildBuild(CompositeTask):
if rval != os.EX_OK:
self.returncode = rval
self._current_task = None
- self.wait()
+ self._async_wait()
return
root_config = pkg.root_config
@@ -60,7 +63,7 @@ class EbuildBuild(CompositeTask):
if not self._check_manifest():
self.returncode = 1
self._current_task = None
- self.wait()
+ self._async_wait()
return
prefetcher = self.prefetcher
@@ -91,7 +94,8 @@ class EbuildBuild(CompositeTask):
success = True
settings = self.settings
- if 'strict' in settings.features:
+ if 'strict' in settings.features and \
+ 'digest' not in settings.features:
settings['O'] = os.path.dirname(self._ebuild_path)
quiet_setting = settings.get('PORTAGE_QUIET')
settings['PORTAGE_QUIET'] = '1'
@@ -160,6 +164,10 @@ class EbuildBuild(CompositeTask):
if self.returncode != os.EX_OK:
portdb = self.pkg.root_config.trees[self._tree].dbapi
spawn_nofetch(portdb, self._ebuild_path, settings=self.settings)
+ elif 'digest' in self.settings.features:
+ if not digestgen(mysettings=self.settings,
+ myportdb=self.pkg.root_config.trees[self._tree].dbapi):
+ self.returncode = 1
self.wait()
def _pre_clean_exit(self, pre_clean_phase):
@@ -260,8 +268,8 @@ class EbuildBuild(CompositeTask):
# to be displayed for problematic packages even though they do
# not set RESTRICT=fetch (bug #336499).
- if 'fetch' not in self.pkg.metadata.restrict and \
- 'nofetch' not in self.pkg.metadata.defined_phases:
+ if 'fetch' not in self.pkg.restrict and \
+ 'nofetch' not in self.pkg.defined_phases:
self._unlock_builddir()
self.wait()
return
@@ -300,10 +308,20 @@ class EbuildBuild(CompositeTask):
self.scheduler.output(msg,
log_path=self.settings.get("PORTAGE_LOG_FILE"))
- packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
- scheduler=self.scheduler, settings=self.settings)
+ binpkg_tasks = TaskSequence()
+ requested_binpkg_formats = self.settings.get("PORTAGE_BINPKG_FORMAT", "tar").split()
+ for pkg_fmt in portage.const.SUPPORTED_BINPKG_FORMATS:
+ if pkg_fmt in requested_binpkg_formats:
+ if pkg_fmt == "rpm":
+ binpkg_tasks.add(EbuildPhase(background=self.background,
+ phase="rpm", scheduler=self.scheduler,
+ settings=self.settings))
+ else:
+ binpkg_tasks.add(EbuildBinpkg(background=self.background,
+ pkg=self.pkg, scheduler=self.scheduler,
+ settings=self.settings))
- self._start_task(packager, self._buildpkg_exit)
+ self._start_task(binpkg_tasks, self._buildpkg_exit)
def _buildpkg_exit(self, packager):
"""
diff --git a/pym/_emerge/EbuildBuildDir.py b/pym/_emerge/EbuildBuildDir.py
index 9773bd790..58905c2f6 100644
--- a/pym/_emerge/EbuildBuildDir.py
+++ b/pym/_emerge/EbuildBuildDir.py
@@ -7,7 +7,6 @@ import portage
from portage import os
from portage.exception import PortageException
from portage.util.SlotObject import SlotObject
-import errno
class EbuildBuildDir(SlotObject):
@@ -60,7 +59,7 @@ class EbuildBuildDir(SlotObject):
builddir_lock.wait()
self._assert_lock(builddir_lock)
self._lock_obj = builddir_lock
- self.settings['PORTAGE_BUILDIR_LOCKED'] = '1'
+ self.settings['PORTAGE_BUILDDIR_LOCKED'] = '1'
finally:
self.locked = self._lock_obj is not None
catdir_lock.unlock()
@@ -92,16 +91,14 @@ class EbuildBuildDir(SlotObject):
self._lock_obj.unlock()
self._lock_obj = None
self.locked = False
- self.settings.pop('PORTAGE_BUILDIR_LOCKED', None)
+ self.settings.pop('PORTAGE_BUILDDIR_LOCKED', None)
catdir_lock = AsynchronousLock(path=self._catdir, scheduler=self.scheduler)
catdir_lock.start()
if catdir_lock.wait() == os.EX_OK:
try:
os.rmdir(self._catdir)
- except OSError as e:
- if e.errno not in (errno.ENOENT,
- errno.ENOTEMPTY, errno.EEXIST, errno.EPERM):
- raise
+ except OSError:
+ pass
finally:
catdir_lock.unlock()
diff --git a/pym/_emerge/EbuildExecuter.py b/pym/_emerge/EbuildExecuter.py
index fd663a41d..5587d4eb0 100644
--- a/pym/_emerge/EbuildExecuter.py
+++ b/pym/_emerge/EbuildExecuter.py
@@ -16,16 +16,7 @@ class EbuildExecuter(CompositeTask):
_phases = ("prepare", "configure", "compile", "test", "install")
- _live_eclasses = frozenset([
- "bzr",
- "cvs",
- "darcs",
- "git",
- "git-2",
- "mercurial",
- "subversion",
- "tla",
- ])
+ _live_eclasses = portage.const.LIVE_ECLASSES
def _start(self):
pkg = self.pkg
@@ -83,7 +74,7 @@ class EbuildExecuter(CompositeTask):
pkg = self.pkg
phases = self._phases
- eapi = pkg.metadata["EAPI"]
+ eapi = pkg.eapi
if not eapi_has_src_prepare_and_src_configure(eapi):
# skip src_prepare and src_configure
phases = phases[2:]
diff --git a/pym/_emerge/EbuildFetcher.py b/pym/_emerge/EbuildFetcher.py
index c0a7fddaa..d98d00736 100644
--- a/pym/_emerge/EbuildFetcher.py
+++ b/pym/_emerge/EbuildFetcher.py
@@ -1,23 +1,22 @@
# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-import traceback
-
-from _emerge.SpawnProcess import SpawnProcess
import copy
import io
-import signal
import sys
+
import portage
from portage import os
from portage import _encodings
from portage import _unicode_encode
from portage import _unicode_decode
+from portage.checksum import _hash_filter
from portage.elog.messages import eerror
from portage.package.ebuild.fetch import _check_distfile, fetch
+from portage.util._async.ForkProcess import ForkProcess
from portage.util._pty import _create_pty_or_pipe
-class EbuildFetcher(SpawnProcess):
+class EbuildFetcher(ForkProcess):
__slots__ = ("config_pool", "ebuild_path", "fetchonly", "fetchall",
"pkg", "prefetch") + \
@@ -57,6 +56,9 @@ class EbuildFetcher(SpawnProcess):
if st.st_size != expected_size:
return False
+ hash_filter = _hash_filter(settings.get("PORTAGE_CHECKSUM_FILTER", ""))
+ if hash_filter.transparent:
+ hash_filter = None
stdout_orig = sys.stdout
stderr_orig = sys.stderr
global_havecolor = portage.output.havecolor
@@ -78,7 +80,7 @@ class EbuildFetcher(SpawnProcess):
break
continue
ok, st = _check_distfile(os.path.join(distdir, filename),
- mydigests, eout, show_errors=False)
+ mydigests, eout, show_errors=False, hash_filter=hash_filter)
if not ok:
success = False
break
@@ -115,13 +117,13 @@ class EbuildFetcher(SpawnProcess):
msg_lines.append(msg)
self._eerror(msg_lines)
self._set_returncode((self.pid, 1 << 8))
- self.wait()
+ self._async_wait()
return
if not uri_map:
# Nothing to fetch.
self._set_returncode((self.pid, os.EX_OK << 8))
- self.wait()
+ self._async_wait()
return
settings = self.config_pool.allocate()
@@ -133,7 +135,7 @@ class EbuildFetcher(SpawnProcess):
self._prefetch_size_ok(uri_map, settings, ebuild_path):
self.config_pool.deallocate(settings)
self._set_returncode((self.pid, os.EX_OK << 8))
- self.wait()
+ self._async_wait()
return
nocolor = settings.get("NOCOLOR")
@@ -148,7 +150,7 @@ class EbuildFetcher(SpawnProcess):
settings["NOCOLOR"] = nocolor
self._settings = settings
- SpawnProcess._start(self)
+ ForkProcess._start(self)
# Free settings now since it's no longer needed in
# this process (the subprocess has a private copy).
@@ -156,48 +158,20 @@ class EbuildFetcher(SpawnProcess):
settings = None
self._settings = None
- def _spawn(self, args, fd_pipes=None, **kwargs):
- """
- Fork a subprocess, apply local settings, and call fetch().
- """
-
- pid = os.fork()
- if pid != 0:
- if not isinstance(pid, int):
- raise AssertionError(
- "fork returned non-integer: %s" % (repr(pid),))
- portage.process.spawned_pids.append(pid)
- return [pid]
-
- portage.locks._close_fds()
- # Disable close_fds since we don't exec (see _setup_pipes docstring).
- portage.process._setup_pipes(fd_pipes, close_fds=False)
-
- # Use default signal handlers in order to avoid problems
- # killing subprocesses as reported in bug #353239.
- signal.signal(signal.SIGINT, signal.SIG_DFL)
- signal.signal(signal.SIGTERM, signal.SIG_DFL)
-
+ def _run(self):
# Force consistent color output, in case we are capturing fetch
# output through a normal pipe due to unavailability of ptys.
portage.output.havecolor = self._settings.get('NOCOLOR') \
not in ('yes', 'true')
rval = 1
- allow_missing = self._get_manifest().allow_missing
- try:
- if fetch(self._uri_map, self._settings, fetchonly=self.fetchonly,
- digests=copy.deepcopy(self._get_digests()),
- allow_missing_digests=allow_missing):
- rval = os.EX_OK
- except SystemExit:
- raise
- except:
- traceback.print_exc()
- finally:
- # Call os._exit() from finally block, in order to suppress any
- # finally blocks from earlier in the call stack. See bug #345289.
- os._exit(rval)
+ allow_missing = self._get_manifest().allow_missing or \
+ 'digest' in self._settings.features
+ if fetch(self._uri_map, self._settings, fetchonly=self.fetchonly,
+ digests=copy.deepcopy(self._get_digests()),
+ allow_missing_digests=allow_missing):
+ rval = os.EX_OK
+ return rval
def _get_ebuild_path(self):
if self.ebuild_path is not None:
@@ -297,7 +271,7 @@ class EbuildFetcher(SpawnProcess):
self.scheduler.output(msg, log_path=self.logfile)
def _set_returncode(self, wait_retval):
- SpawnProcess._set_returncode(self, wait_retval)
+ ForkProcess._set_returncode(self, wait_retval)
# Collect elog messages that might have been
# created by the pkg_nofetch phase.
# Skip elog messages for prefetch, in order to avoid duplicates.
diff --git a/pym/_emerge/EbuildMetadataPhase.py b/pym/_emerge/EbuildMetadataPhase.py
index c2d3747f7..bbb1ca9dc 100644
--- a/pym/_emerge/EbuildMetadataPhase.py
+++ b/pym/_emerge/EbuildMetadataPhase.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.SubProcess import SubProcess
@@ -6,12 +6,14 @@ import sys
from portage.cache.mappings import slot_dict_class
import portage
portage.proxy.lazyimport.lazyimport(globals(),
- 'portage.package.ebuild._eapi_invalid:eapi_invalid',
+ 'portage.package.ebuild._metadata_invalid:eapi_invalid',
)
from portage import os
from portage import _encodings
from portage import _unicode_decode
from portage import _unicode_encode
+from portage.dep import extract_unpack_dependencies
+from portage.eapi import eapi_has_automatic_unpack_dependencies
import errno
import fcntl
@@ -25,12 +27,11 @@ class EbuildMetadataPhase(SubProcess):
"""
__slots__ = ("cpv", "eapi_supported", "ebuild_hash", "fd_pipes",
- "metadata", "portdb", "repo_path", "settings") + \
+ "metadata", "portdb", "repo_path", "settings", "write_auxdb") + \
("_eapi", "_eapi_lineno", "_raw_metadata",)
_file_names = ("ebuild",)
_files_dict = slot_dict_class(_file_names, prefix="")
- _metadata_fd = 9
def _start(self):
ebuild_path = self.ebuild_hash.location
@@ -49,14 +50,14 @@ class EbuildMetadataPhase(SubProcess):
# An empty EAPI setting is invalid.
self._eapi_invalid(None)
self._set_returncode((self.pid, 1 << 8))
- self.wait()
+ self._async_wait()
return
self.eapi_supported = portage.eapi_is_supported(parsed_eapi)
if not self.eapi_supported:
self.metadata = {"EAPI": parsed_eapi}
self._set_returncode((self.pid, os.EX_OK << 8))
- self.wait()
+ self._async_wait()
return
settings = self.settings
@@ -74,28 +75,41 @@ class EbuildMetadataPhase(SubProcess):
null_input = open('/dev/null', 'rb')
fd_pipes.setdefault(0, null_input.fileno())
- fd_pipes.setdefault(1, sys.stdout.fileno())
- fd_pipes.setdefault(2, sys.stderr.fileno())
+ fd_pipes.setdefault(1, sys.__stdout__.fileno())
+ fd_pipes.setdefault(2, sys.__stderr__.fileno())
# flush any pending output
+ stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
for fd in fd_pipes.values():
- if fd == sys.stdout.fileno():
- sys.stdout.flush()
- if fd == sys.stderr.fileno():
- sys.stderr.flush()
+ if fd in stdout_filenos:
+ sys.__stdout__.flush()
+ sys.__stderr__.flush()
+ break
self._files = self._files_dict()
files = self._files
master_fd, slave_fd = os.pipe()
+
fcntl.fcntl(master_fd, fcntl.F_SETFL,
fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
- fd_pipes[self._metadata_fd] = slave_fd
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(master_fd, fcntl.F_SETFD,
+ fcntl.fcntl(master_fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ fd_pipes[slave_fd] = slave_fd
+ settings["PORTAGE_PIPE_FD"] = str(slave_fd)
self._raw_metadata = []
files.ebuild = master_fd
- self._reg_id = self.scheduler.register(files.ebuild,
+ self._reg_id = self.scheduler.io_add_watch(files.ebuild,
self._registered_events, self._output_handler)
self._registered = True
@@ -103,6 +117,7 @@ class EbuildMetadataPhase(SubProcess):
settings=settings, debug=debug,
mydbapi=self.portdb, tree="porttree",
fd_pipes=fd_pipes, returnpid=True)
+ settings.pop("PORTAGE_PIPE_FD", None)
os.close(slave_fd)
null_input.close()
@@ -111,11 +126,10 @@ class EbuildMetadataPhase(SubProcess):
# doebuild failed before spawning
self._unregister()
self._set_returncode((self.pid, retval << 8))
- self.wait()
+ self._async_wait()
return
self.pid = retval[0]
- portage.process.spawned_pids.remove(self.pid)
def _output_handler(self, fd, event):
@@ -141,8 +155,7 @@ class EbuildMetadataPhase(SubProcess):
def _set_returncode(self, wait_retval):
SubProcess._set_returncode(self, wait_retval)
# self._raw_metadata is None when _start returns
- # early due to an unsupported EAPI detected with
- # FEATURES=parse-eapi-ebuild-head
+ # early due to an unsupported EAPI
if self.returncode == os.EX_OK and \
self._raw_metadata is not None:
metadata_lines = _unicode_decode(b''.join(self._raw_metadata),
@@ -163,8 +176,7 @@ class EbuildMetadataPhase(SubProcess):
if (not metadata["EAPI"] or self.eapi_supported) and \
metadata["EAPI"] != parsed_eapi:
self._eapi_invalid(metadata)
- if 'parse-eapi-ebuild-head' in self.settings.features:
- metadata_valid = False
+ metadata_valid = False
if metadata_valid:
# Since we're supposed to be able to efficiently obtain the
@@ -181,8 +193,18 @@ class EbuildMetadataPhase(SubProcess):
metadata["_eclasses_"] = {}
metadata.pop("INHERITED", None)
- self.portdb._write_cache(self.cpv,
- self.repo_path, metadata, self.ebuild_hash)
+ if eapi_has_automatic_unpack_dependencies(metadata["EAPI"]):
+ repo = self.portdb.repositories.get_name_for_location(self.repo_path)
+ unpackers = self.settings.unpack_dependencies.get(repo, {}).get(metadata["EAPI"], {})
+ unpack_dependencies = extract_unpack_dependencies(metadata["SRC_URI"], unpackers)
+ if unpack_dependencies:
+ metadata["DEPEND"] += (" " if metadata["DEPEND"] else "") + unpack_dependencies
+
+ # If called by egencache, this cache write is
+ # undesirable when metadata-transfer is disabled.
+ if self.write_auxdb is not False:
+ self.portdb._write_cache(self.cpv,
+ self.repo_path, metadata, self.ebuild_hash)
else:
metadata = {"EAPI": metadata["EAPI"]}
self.metadata = metadata
diff --git a/pym/_emerge/EbuildPhase.py b/pym/_emerge/EbuildPhase.py
index fe44abcbd..b1f7c21df 100644
--- a/pym/_emerge/EbuildPhase.py
+++ b/pym/_emerge/EbuildPhase.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import gzip
@@ -11,6 +11,7 @@ from _emerge.BinpkgEnvExtractor import BinpkgEnvExtractor
from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
from _emerge.EbuildProcess import EbuildProcess
from _emerge.CompositeTask import CompositeTask
+from portage.package.ebuild.prepare_build_dirs import _prepare_workdir
from portage.util import writemsg
try:
@@ -38,7 +39,7 @@ from portage import _unicode_encode
class EbuildPhase(CompositeTask):
- __slots__ = ("actionmap", "phase", "settings") + \
+ __slots__ = ("actionmap", "fd_pipes", "phase", "settings") + \
("_ebuild_lock",)
# FEATURES displayed prior to setup phase
@@ -156,8 +157,7 @@ class EbuildPhase(CompositeTask):
return
self._start_ebuild()
- def _start_ebuild(self):
-
+ def _get_log_path(self):
# Don't open the log file during the clean phase since the
# open file can result in an nfs lock on $T/build.log which
# prevents the clean phase from removing $T.
@@ -165,17 +165,21 @@ class EbuildPhase(CompositeTask):
if self.phase not in ("clean", "cleanrm") and \
self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
logfile = self.settings.get("PORTAGE_LOG_FILE")
+ return logfile
+
+ def _start_ebuild(self):
- fd_pipes = None
- if not self.background and self.phase == 'nofetch':
- # All the pkg_nofetch output goes to stderr since
- # it's considered to be an error message.
- fd_pipes = {1 : sys.stderr.fileno()}
+ fd_pipes = self.fd_pipes
+ if fd_pipes is None:
+ if not self.background and self.phase == 'nofetch':
+ # All the pkg_nofetch output goes to stderr since
+ # it's considered to be an error message.
+ fd_pipes = {1 : sys.__stderr__.fileno()}
ebuild_process = EbuildProcess(actionmap=self.actionmap,
- background=self.background, fd_pipes=fd_pipes, logfile=logfile,
- phase=self.phase, scheduler=self.scheduler,
- settings=self.settings)
+ background=self.background, fd_pipes=fd_pipes,
+ logfile=self._get_log_path(), phase=self.phase,
+ scheduler=self.scheduler, settings=self.settings)
self._start_task(ebuild_process, self._ebuild_exit)
@@ -189,16 +193,21 @@ class EbuildPhase(CompositeTask):
if self._default_exit(ebuild_process) != os.EX_OK:
if self.phase == "test" and \
"test-fail-continue" in self.settings.features:
- pass
+ # mark test phase as complete (bug #452030)
+ try:
+ open(_unicode_encode(os.path.join(
+ self.settings["PORTAGE_BUILDDIR"], ".tested"),
+ encoding=_encodings['fs'], errors='strict'),
+ 'wb').close()
+ except OSError:
+ pass
else:
fail = True
if not fail:
self.returncode = None
- logfile = None
- if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
- logfile = self.settings.get("PORTAGE_LOG_FILE")
+ logfile = self._get_log_path()
if self.phase == "install":
out = io.StringIO()
@@ -213,7 +222,14 @@ class EbuildPhase(CompositeTask):
settings = self.settings
_post_phase_userpriv_perms(settings)
- if self.phase == "install":
+ if self.phase == "unpack":
+ # Bump WORKDIR timestamp, in case tar gave it a timestamp
+ # that will interfere with distfiles / WORKDIR timestamp
+ # comparisons as reported in bug #332217. Also, fix
+ # ownership since tar can change that too.
+ os.utime(settings["WORKDIR"], None)
+ _prepare_workdir(settings)
+ elif self.phase == "install":
out = io.StringIO()
_post_src_install_write_metadata(settings)
_post_src_install_uid_fix(settings, out)
@@ -235,8 +251,9 @@ class EbuildPhase(CompositeTask):
fd, logfile = tempfile.mkstemp()
os.close(fd)
post_phase = MiscFunctionsProcess(background=self.background,
- commands=post_phase_cmds, logfile=logfile, phase=self.phase,
- scheduler=self.scheduler, settings=settings)
+ commands=post_phase_cmds, fd_pipes=self.fd_pipes,
+ logfile=logfile, phase=self.phase, scheduler=self.scheduler,
+ settings=settings)
self._start_task(post_phase, self._post_phase_exit)
return
@@ -311,8 +328,9 @@ class EbuildPhase(CompositeTask):
self.returncode = None
phase = 'die_hooks'
die_hooks = MiscFunctionsProcess(background=self.background,
- commands=[phase], phase=phase,
- scheduler=self.scheduler, settings=self.settings)
+ commands=[phase], phase=phase, logfile=self._get_log_path(),
+ fd_pipes=self.fd_pipes, scheduler=self.scheduler,
+ settings=self.settings)
self._start_task(die_hooks, self._die_hooks_exit)
def _die_hooks_exit(self, die_hooks):
@@ -331,7 +349,8 @@ class EbuildPhase(CompositeTask):
portage.elog.elog_process(self.settings.mycpv, self.settings)
phase = "clean"
clean_phase = EbuildPhase(background=self.background,
- phase=phase, scheduler=self.scheduler, settings=self.settings)
+ fd_pipes=self.fd_pipes, phase=phase, scheduler=self.scheduler,
+ settings=self.settings)
self._start_task(clean_phase, self._fail_clean_exit)
return
diff --git a/pym/_emerge/EbuildProcess.py b/pym/_emerge/EbuildProcess.py
index ce97aff0f..333ad7bd0 100644
--- a/pym/_emerge/EbuildProcess.py
+++ b/pym/_emerge/EbuildProcess.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.AbstractEbuildProcess import AbstractEbuildProcess
@@ -17,5 +17,11 @@ class EbuildProcess(AbstractEbuildProcess):
if actionmap is None:
actionmap = _spawn_actionmap(self.settings)
- return _doebuild_spawn(self.phase, self.settings,
- actionmap=actionmap, **kwargs)
+ if self._dummy_pipe_fd is not None:
+ self.settings["PORTAGE_PIPE_FD"] = str(self._dummy_pipe_fd)
+
+ try:
+ return _doebuild_spawn(self.phase, self.settings,
+ actionmap=actionmap, **kwargs)
+ finally:
+ self.settings.pop("PORTAGE_PIPE_FD", None)
diff --git a/pym/_emerge/EbuildSpawnProcess.py b/pym/_emerge/EbuildSpawnProcess.py
index e1f682a66..26d26fc77 100644
--- a/pym/_emerge/EbuildSpawnProcess.py
+++ b/pym/_emerge/EbuildSpawnProcess.py
@@ -1,4 +1,4 @@
-# Copyright 2010 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.AbstractEbuildProcess import AbstractEbuildProcess
@@ -13,4 +13,10 @@ class EbuildSpawnProcess(AbstractEbuildProcess):
__slots__ = ('fakeroot_state', 'spawn_func')
def _spawn(self, args, **kwargs):
- return self.spawn_func(args, env=self.settings.environ(), **kwargs)
+
+ env = self.settings.environ()
+
+ if self._dummy_pipe_fd is not None:
+ env["PORTAGE_PIPE_FD"] = str(self._dummy_pipe_fd)
+
+ return self.spawn_func(args, env=env, **kwargs)
diff --git a/pym/_emerge/FakeVartree.py b/pym/_emerge/FakeVartree.py
index ce15f5a36..14be50c7f 100644
--- a/pym/_emerge/FakeVartree.py
+++ b/pym/_emerge/FakeVartree.py
@@ -1,6 +1,8 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
import sys
import warnings
@@ -10,11 +12,11 @@ from _emerge.Package import Package
from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
from portage.const import VDB_PATH
from portage.dbapi.vartree import vartree
-from portage.dep._slot_abi import find_built_slot_abi_atoms
+from portage.dep._slot_operator import find_built_slot_operator_atoms
from portage.eapi import _get_eapi_attrs
-from portage.exception import InvalidDependString
-from portage.repository.config import _gen_valid_repo
+from portage.exception import InvalidData, InvalidDependString
from portage.update import grab_updates, parse_updates, update_dbentries
+from portage.versions import _pkg_str
if sys.hexversion >= 0x3000000:
long = int
@@ -33,6 +35,9 @@ class FakeVardbapi(PackageVirtualDbapi):
path =os.path.join(path, filename)
return path
+class _DynamicDepsNotApplicable(Exception):
+ pass
+
class FakeVartree(vartree):
"""This is implements an in-memory copy of a vartree instance that provides
all the interfaces required for use by the depgraph. The vardb is locked
@@ -45,10 +50,10 @@ class FakeVartree(vartree):
is not a matching ebuild in the tree). Instances of this class are not
populated until the sync() method is called."""
def __init__(self, root_config, pkg_cache=None, pkg_root_config=None,
- dynamic_deps=True, ignore_built_slot_abi_deps=False):
+ dynamic_deps=True, ignore_built_slot_operator_deps=False):
self._root_config = root_config
self._dynamic_deps = dynamic_deps
- self._ignore_built_slot_abi_deps = ignore_built_slot_abi_deps
+ self._ignore_built_slot_operator_deps = ignore_built_slot_operator_deps
if pkg_root_config is None:
pkg_root_config = self._root_config
self._pkg_root_config = pkg_root_config
@@ -75,7 +80,7 @@ class FakeVartree(vartree):
self.dbapi.aux_get = self._aux_get_wrapper
self.dbapi.match = self._match_wrapper
self._aux_get_history = set()
- self._portdb_keys = ["EAPI", "KEYWORDS", "DEPEND", "RDEPEND", "PDEPEND"]
+ self._portdb_keys = Package._dep_keys + ("EAPI", "KEYWORDS")
self._portdb = portdb
self._global_updates = None
@@ -102,29 +107,30 @@ class FakeVartree(vartree):
self._aux_get_wrapper(cpv, [])
return matches
- def _aux_get_wrapper(self, pkg, wants, myrepo=None):
- if pkg in self._aux_get_history:
- return self._aux_get(pkg, wants)
- self._aux_get_history.add(pkg)
- # We need to check the EAPI, and this also raises
- # a KeyError to the caller if appropriate.
- pkg_obj = self.dbapi._cpv_map[pkg]
- installed_eapi = pkg_obj.metadata['EAPI']
- repo = pkg_obj.metadata['repository']
- eapi_attrs = _get_eapi_attrs(installed_eapi)
- built_slot_abi_atoms = None
-
- if eapi_attrs.slot_abi and not self._ignore_built_slot_abi_deps:
- try:
- built_slot_abi_atoms = find_built_slot_abi_atoms(pkg_obj)
- except InvalidDependString:
- pass
+ def _aux_get_wrapper(self, cpv, wants, myrepo=None):
+ if cpv in self._aux_get_history:
+ return self._aux_get(cpv, wants)
+ self._aux_get_history.add(cpv)
+
+ # This raises a KeyError to the caller if appropriate.
+ pkg = self.dbapi._cpv_map[cpv]
try:
- # Use the live ebuild metadata if possible.
- repo = _gen_valid_repo(repo)
live_metadata = dict(zip(self._portdb_keys,
- self._portdb.aux_get(pkg, self._portdb_keys, myrepo=repo)))
+ self._portdb.aux_get(cpv, self._portdb_keys,
+ myrepo=pkg.repo)))
+ except (KeyError, portage.exception.PortageException):
+ live_metadata = None
+
+ self._apply_dynamic_deps(pkg, live_metadata)
+
+ return self._aux_get(cpv, wants)
+
+ def _apply_dynamic_deps(self, pkg, live_metadata):
+
+ try:
+ if live_metadata is None:
+ raise _DynamicDepsNotApplicable()
# Use the metadata from the installed instance if the EAPI
# of either instance is unsupported, since if the installed
# instance has an unsupported or corrupt EAPI then we don't
@@ -134,26 +140,46 @@ class FakeVartree(vartree):
# order to respect dep updates without revision bump or EAPI
# bump, as in bug #368725.
if not (portage.eapi_is_supported(live_metadata["EAPI"]) and \
- portage.eapi_is_supported(installed_eapi)):
- raise KeyError(pkg)
+ portage.eapi_is_supported(pkg.eapi)):
+ raise _DynamicDepsNotApplicable()
- # preserve built SLOT/ABI := operator deps
- if built_slot_abi_atoms:
+ # preserve built slot/sub-slot := operator deps
+ built_slot_operator_atoms = None
+ if not self._ignore_built_slot_operator_deps and \
+ _get_eapi_attrs(pkg.eapi).slot_operator:
+ try:
+ built_slot_operator_atoms = \
+ find_built_slot_operator_atoms(pkg)
+ except InvalidDependString:
+ pass
+
+ if built_slot_operator_atoms:
live_eapi_attrs = _get_eapi_attrs(live_metadata["EAPI"])
- if not live_eapi_attrs.slot_abi:
- raise KeyError(pkg)
- for k, v in built_slot_abi_atoms.items():
+ if not live_eapi_attrs.slot_operator:
+ raise _DynamicDepsNotApplicable()
+ for k, v in built_slot_operator_atoms.items():
live_metadata[k] += (" " +
" ".join(_unicode(atom) for atom in v))
- self.dbapi.aux_update(pkg, live_metadata)
- except (KeyError, portage.exception.PortageException):
+ self.dbapi.aux_update(pkg.cpv, live_metadata)
+ except _DynamicDepsNotApplicable:
if self._global_updates is None:
self._global_updates = \
grab_global_updates(self._portdb)
+
+ # Bypass _aux_get_wrapper, since calling that
+ # here would trigger infinite recursion.
+ aux_keys = Package._dep_keys + self.dbapi._pkg_str_aux_keys
+ aux_dict = dict(zip(aux_keys, self._aux_get(pkg.cpv, aux_keys)))
perform_global_updates(
- pkg, self.dbapi, self._global_updates)
- return self._aux_get(pkg, wants)
+ pkg.cpv, aux_dict, self.dbapi, self._global_updates)
+
+ def dynamic_deps_preload(self, pkg, metadata):
+ if metadata is not None:
+ metadata = dict((k, metadata.get(k, ''))
+ for k in self._portdb_keys)
+ self._apply_dynamic_deps(pkg, metadata)
+ self._aux_get_history.add(pkg.cpv)
def cpv_discard(self, pkg):
"""
@@ -251,12 +277,6 @@ class FakeVartree(vartree):
root_config=self._pkg_root_config,
type_name="installed")
- try:
- mycounter = long(pkg.metadata["COUNTER"])
- except ValueError:
- mycounter = 0
- pkg.metadata["COUNTER"] = str(mycounter)
-
self._pkg_cache[pkg] = pkg
return pkg
@@ -285,13 +305,14 @@ def grab_global_updates(portdb):
return retupdates
-def perform_global_updates(mycpv, mydb, myupdates):
- aux_keys = ["DEPEND", "EAPI", "RDEPEND", "PDEPEND", 'repository']
- aux_dict = dict(zip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
- eapi = aux_dict.pop('EAPI')
- repository = aux_dict.pop('repository')
+def perform_global_updates(mycpv, aux_dict, mydb, myupdates):
+ try:
+ pkg = _pkg_str(mycpv, metadata=aux_dict, settings=mydb.settings)
+ except InvalidData:
+ return
+ aux_dict = dict((k, aux_dict[k]) for k in Package._dep_keys)
try:
- mycommands = myupdates[repository]
+ mycommands = myupdates[pkg.repo]
except KeyError:
try:
mycommands = myupdates['DEFAULT']
@@ -301,6 +322,6 @@ def perform_global_updates(mycpv, mydb, myupdates):
if not mycommands:
return
- updates = update_dbentries(mycommands, aux_dict, eapi=eapi)
+ updates = update_dbentries(mycommands, aux_dict, parent=pkg)
if updates:
mydb.aux_update(mycpv, updates)
diff --git a/pym/_emerge/FifoIpcDaemon.py b/pym/_emerge/FifoIpcDaemon.py
index fcc4ab4b9..7468de5e2 100644
--- a/pym/_emerge/FifoIpcDaemon.py
+++ b/pym/_emerge/FifoIpcDaemon.py
@@ -1,6 +1,14 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+import sys
+
+try:
+ import fcntl
+except ImportError:
+ # http://bugs.jython.org/issue1074
+ fcntl = None
+
from portage import os
from _emerge.AbstractPollTask import AbstractPollTask
from portage.cache.mappings import slot_dict_class
@@ -21,7 +29,18 @@ class FifoIpcDaemon(AbstractPollTask):
self._files.pipe_in = \
os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK)
- self._reg_id = self.scheduler.register(
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000 and fcntl is not None:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(self._files.pipe_in, fcntl.F_SETFD,
+ fcntl.fcntl(self._files.pipe_in,
+ fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self._reg_id = self.scheduler.io_add_watch(
self._files.pipe_in,
self._registered_events, self._input_handler)
@@ -32,11 +51,23 @@ class FifoIpcDaemon(AbstractPollTask):
Re-open the input stream, in order to suppress
POLLHUP events (bug #339976).
"""
- self.scheduler.unregister(self._reg_id)
+ self.scheduler.source_remove(self._reg_id)
os.close(self._files.pipe_in)
self._files.pipe_in = \
os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK)
- self._reg_id = self.scheduler.register(
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000 and fcntl is not None:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(self._files.pipe_in, fcntl.F_SETFD,
+ fcntl.fcntl(self._files.pipe_in,
+ fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self._reg_id = self.scheduler.io_add_watch(
self._files.pipe_in,
self._registered_events, self._input_handler)
@@ -47,6 +78,8 @@ class FifoIpcDaemon(AbstractPollTask):
if self.returncode is None:
self.returncode = 1
self._unregister()
+ # notify exit listeners
+ self.wait()
def _wait(self):
if self.returncode is not None:
@@ -67,7 +100,7 @@ class FifoIpcDaemon(AbstractPollTask):
self._registered = False
if self._reg_id is not None:
- self.scheduler.unregister(self._reg_id)
+ self.scheduler.source_remove(self._reg_id)
self._reg_id = None
if self._files is not None:
diff --git a/pym/_emerge/JobStatusDisplay.py b/pym/_emerge/JobStatusDisplay.py
index 5b9b2216f..9f6f09be0 100644
--- a/pym/_emerge/JobStatusDisplay.py
+++ b/pym/_emerge/JobStatusDisplay.py
@@ -1,6 +1,8 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
import formatter
import io
import sys
@@ -9,7 +11,6 @@ import time
import portage
from portage import os
from portage import _encodings
-from portage import _unicode_decode
from portage import _unicode_encode
from portage.output import xtermTitle
@@ -121,7 +122,8 @@ class JobStatusDisplay(object):
term_codes = {}
for k, capname in self._termcap_name_map.items():
- code = tigetstr(capname)
+ # Use _native_string for PyPy compat (bug #470258).
+ code = tigetstr(portage._native_string(capname))
if code is None:
code = self._default_term_codes[capname]
term_codes[k] = code
@@ -233,10 +235,10 @@ class JobStatusDisplay(object):
def _display_status(self):
# Don't use len(self._completed_tasks) here since that also
# can include uninstall tasks.
- curval_str = str(self.curval)
- maxval_str = str(self.maxval)
- running_str = str(self.running)
- failed_str = str(self.failed)
+ curval_str = "%s" % (self.curval,)
+ maxval_str = "%s" % (self.maxval,)
+ running_str = "%s" % (self.running,)
+ failed_str = "%s" % (self.failed,)
load_avg_str = self._load_avg_str()
color_output = io.StringIO()
@@ -248,36 +250,36 @@ class JobStatusDisplay(object):
f = formatter.AbstractFormatter(style_writer)
number_style = "INFORM"
- f.add_literal_data(_unicode_decode("Jobs: "))
+ f.add_literal_data("Jobs: ")
f.push_style(number_style)
- f.add_literal_data(_unicode_decode(curval_str))
+ f.add_literal_data(curval_str)
f.pop_style()
- f.add_literal_data(_unicode_decode(" of "))
+ f.add_literal_data(" of ")
f.push_style(number_style)
- f.add_literal_data(_unicode_decode(maxval_str))
+ f.add_literal_data(maxval_str)
f.pop_style()
- f.add_literal_data(_unicode_decode(" complete"))
+ f.add_literal_data(" complete")
if self.running:
- f.add_literal_data(_unicode_decode(", "))
+ f.add_literal_data(", ")
f.push_style(number_style)
- f.add_literal_data(_unicode_decode(running_str))
+ f.add_literal_data(running_str)
f.pop_style()
- f.add_literal_data(_unicode_decode(" running"))
+ f.add_literal_data(" running")
if self.failed:
- f.add_literal_data(_unicode_decode(", "))
+ f.add_literal_data(", ")
f.push_style(number_style)
- f.add_literal_data(_unicode_decode(failed_str))
+ f.add_literal_data(failed_str)
f.pop_style()
- f.add_literal_data(_unicode_decode(" failed"))
+ f.add_literal_data(" failed")
padding = self._jobs_column_width - len(plain_output.getvalue())
if padding > 0:
- f.add_literal_data(padding * _unicode_decode(" "))
+ f.add_literal_data(padding * " ")
- f.add_literal_data(_unicode_decode("Load avg: "))
- f.add_literal_data(_unicode_decode(load_avg_str))
+ f.add_literal_data("Load avg: ")
+ f.add_literal_data(load_avg_str)
# Truncate to fit width, to avoid making the terminal scroll if the
# line overflows (happens when the load average is large).
diff --git a/pym/_emerge/MergeListItem.py b/pym/_emerge/MergeListItem.py
index 8086c689a..938f8014a 100644
--- a/pym/_emerge/MergeListItem.py
+++ b/pym/_emerge/MergeListItem.py
@@ -1,7 +1,8 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage import os
+from portage.dep import _repo_separator
from portage.output import colorize
from _emerge.AsynchronousTask import AsynchronousTask
@@ -32,7 +33,7 @@ class MergeListItem(CompositeTask):
if pkg.installed:
# uninstall, executed by self.merge()
self.returncode = os.EX_OK
- self.wait()
+ self._async_wait()
return
args_set = self.args_set
@@ -47,7 +48,9 @@ class MergeListItem(CompositeTask):
action_desc = "Emerging"
preposition = "for"
+ pkg_color = "PKG_MERGE"
if pkg.type_name == "binary":
+ pkg_color = "PKG_BINARY_MERGE"
action_desc += " binary"
if build_opts.fetchonly:
@@ -57,16 +60,7 @@ class MergeListItem(CompositeTask):
(action_desc,
colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
- colorize("GOOD", pkg.cpv))
-
- portdb = pkg.root_config.trees["porttree"].dbapi
- portdir_repo_name = portdb.getRepositoryName(portdb.porttree_root)
- if portdir_repo_name:
- pkg_repo_name = pkg.repo
- if pkg_repo_name != portdir_repo_name:
- if pkg_repo_name == pkg.UNKNOWN_REPO:
- pkg_repo_name = "unknown repo"
- msg += " from %s" % pkg_repo_name
+ colorize(pkg_color, pkg.cpv + _repo_separator + pkg.repo))
if pkg.root_config.settings["ROOT"] != "/":
msg += " %s %s" % (preposition, pkg.root)
diff --git a/pym/_emerge/MetadataRegen.py b/pym/_emerge/MetadataRegen.py
index e82015fd1..d92b6a06e 100644
--- a/pym/_emerge/MetadataRegen.py
+++ b/pym/_emerge/MetadataRegen.py
@@ -1,18 +1,20 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import portage
from portage import os
from portage.dep import _repo_separator
from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
-from _emerge.PollScheduler import PollScheduler
+from portage.cache.cache_errors import CacheError
+from portage.util._async.AsyncScheduler import AsyncScheduler
-class MetadataRegen(PollScheduler):
+class MetadataRegen(AsyncScheduler):
def __init__(self, portdb, cp_iter=None, consumer=None,
- max_jobs=None, max_load=None):
- PollScheduler.__init__(self, main=True)
+ write_auxdb=True, **kwargs):
+ AsyncScheduler.__init__(self, **kwargs)
self._portdb = portdb
+ self._write_auxdb = write_auxdb
self._global_cleanse = False
if cp_iter is None:
cp_iter = self._iter_every_cp()
@@ -22,34 +24,21 @@ class MetadataRegen(PollScheduler):
self._cp_iter = cp_iter
self._consumer = consumer
- if max_jobs is None:
- max_jobs = 1
-
- self._max_jobs = max_jobs
- self._max_load = max_load
-
self._valid_pkgs = set()
self._cp_set = set()
self._process_iter = self._iter_metadata_processes()
- self.returncode = os.EX_OK
- self._error_count = 0
self._running_tasks = set()
- self._remaining_tasks = True
- def _terminate_tasks(self):
- for task in list(self._running_tasks):
- task.cancel()
+ def _next_task(self):
+ return next(self._process_iter)
def _iter_every_cp(self):
- portage.writemsg_stdout("Listing available packages...\n")
- every_cp = self._portdb.cp_all()
- portage.writemsg_stdout("Regenerating cache entries...\n")
- every_cp.sort(reverse=True)
- try:
- while not self._terminated_tasks:
- yield every_cp.pop()
- except IndexError:
- pass
+ # List categories individually, in order to start yielding quicker,
+ # and in order to reduce latency in case of a signal interrupt.
+ cp_all = self._portdb.cp_all
+ for category in sorted(self._portdb.categories):
+ for cp in cp_all(categories=(category,)):
+ yield cp
def _iter_metadata_processes(self):
portdb = self._portdb
@@ -57,8 +46,9 @@ class MetadataRegen(PollScheduler):
cp_set = self._cp_set
consumer = self._consumer
+ portage.writemsg_stdout("Regenerating cache entries...\n")
for cp in self._cp_iter:
- if self._terminated_tasks:
+ if self._terminated.is_set():
break
cp_set.add(cp)
portage.writemsg_stdout("Processing %s\n" % cp)
@@ -68,7 +58,7 @@ class MetadataRegen(PollScheduler):
repo = portdb.repositories.get_repo_for_location(mytree)
cpv_list = portdb.cp_list(cp, mytree=[repo.location])
for cpv in cpv_list:
- if self._terminated_tasks:
+ if self._terminated.is_set():
break
valid_pkgs.add(cpv)
ebuild_path, repo_path = portdb.findname2(cpv, myrepo=repo.name)
@@ -84,22 +74,21 @@ class MetadataRegen(PollScheduler):
yield EbuildMetadataPhase(cpv=cpv,
ebuild_hash=ebuild_hash,
portdb=portdb, repo_path=repo_path,
- settings=portdb.doebuild_settings)
+ settings=portdb.doebuild_settings,
+ write_auxdb=self._write_auxdb)
- def _keep_scheduling(self):
- return self._remaining_tasks and not self._terminated_tasks
+ def _wait(self):
- def run(self):
+ AsyncScheduler._wait(self)
portdb = self._portdb
- from portage.cache.cache_errors import CacheError
dead_nodes = {}
- self._main_loop()
-
+ self._termination_check()
if self._terminated_tasks:
- self.returncode = 1
- return
+ portdb.flush_cache()
+ self.returncode = self._cancelled_returncode
+ return self.returncode
if self._global_cleanse:
for mytree in portdb.porttrees:
@@ -142,29 +131,12 @@ class MetadataRegen(PollScheduler):
except (KeyError, CacheError):
pass
- def _schedule_tasks(self):
- if self._terminated_tasks:
- return
-
- while self._can_add_job():
- try:
- metadata_process = next(self._process_iter)
- except StopIteration:
- self._remaining_tasks = False
- return
-
- self._jobs += 1
- self._running_tasks.add(metadata_process)
- metadata_process.scheduler = self.sched_iface
- metadata_process.addExitListener(self._metadata_exit)
- metadata_process.start()
-
- def _metadata_exit(self, metadata_process):
- self._jobs -= 1
- self._running_tasks.discard(metadata_process)
+ portdb.flush_cache()
+ return self.returncode
+
+ def _task_exit(self, metadata_process):
+
if metadata_process.returncode != os.EX_OK:
- self.returncode = 1
- self._error_count += 1
self._valid_pkgs.discard(metadata_process.cpv)
if not self._terminated_tasks:
portage.writemsg("Error processing %s, continuing...\n" % \
@@ -179,5 +151,4 @@ class MetadataRegen(PollScheduler):
metadata_process.ebuild_hash,
metadata_process.eapi_supported)
- self._schedule()
-
+ AsyncScheduler._task_exit(self, metadata_process)
diff --git a/pym/_emerge/MiscFunctionsProcess.py b/pym/_emerge/MiscFunctionsProcess.py
index afa44fb2a..bada79d86 100644
--- a/pym/_emerge/MiscFunctionsProcess.py
+++ b/pym/_emerge/MiscFunctionsProcess.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.AbstractEbuildProcess import AbstractEbuildProcess
@@ -29,6 +29,10 @@ class MiscFunctionsProcess(AbstractEbuildProcess):
AbstractEbuildProcess._start(self)
def _spawn(self, args, **kwargs):
+
+ if self._dummy_pipe_fd is not None:
+ self.settings["PORTAGE_PIPE_FD"] = str(self._dummy_pipe_fd)
+
# Temporarily unset EBUILD_PHASE so that bashrc code doesn't
# think this is a real phase.
phase_backup = self.settings.pop("EBUILD_PHASE", None)
@@ -37,3 +41,4 @@ class MiscFunctionsProcess(AbstractEbuildProcess):
finally:
if phase_backup is not None:
self.settings["EBUILD_PHASE"] = phase_backup
+ self.settings.pop("PORTAGE_PIPE_FD", None)
diff --git a/pym/_emerge/Package.py b/pym/_emerge/Package.py
index 14d069449..a09f73c59 100644
--- a/pym/_emerge/Package.py
+++ b/pym/_emerge/Package.py
@@ -1,8 +1,12 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
import sys
from itertools import chain
+import warnings
+
import portage
from portage import _encodings, _unicode_decode, _unicode_encode
from portage.cache.mappings import slot_dict_class
@@ -10,67 +14,82 @@ from portage.const import EBUILD_PHASES
from portage.dep import Atom, check_required_use, use_reduce, \
paren_enclose, _slot_separator, _repo_separator
from portage.versions import _pkg_str, _unknown_repo
-from portage.eapi import _get_eapi_attrs
+from portage.eapi import _get_eapi_attrs, eapi_has_use_aliases
from portage.exception import InvalidDependString
+from portage.localization import _
from _emerge.Task import Task
if sys.hexversion >= 0x3000000:
basestring = str
long = int
+ _unicode = str
+else:
+ _unicode = unicode
class Package(Task):
__hash__ = Task.__hash__
__slots__ = ("built", "cpv", "depth",
- "installed", "metadata", "onlydeps", "operation",
+ "installed", "onlydeps", "operation",
"root_config", "type_name",
"category", "counter", "cp", "cpv_split",
"inherited", "iuse", "mtime",
- "pf", "root", "slot", "slot_abi", "slot_atom", "version") + \
- ("_invalid", "_raw_metadata", "_masks", "_use",
+ "pf", "root", "slot", "sub_slot", "slot_atom", "version") + \
+ ("_invalid", "_masks", "_metadata", "_raw_metadata", "_use",
"_validated_atoms", "_visible")
metadata_keys = [
"BUILD_TIME", "CHOST", "COUNTER", "DEPEND", "EAPI",
- "INHERITED", "IUSE", "KEYWORDS",
+ "HDEPEND", "INHERITED", "IUSE", "KEYWORDS",
"LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
"repository", "PROPERTIES", "RESTRICT", "SLOT", "USE",
"_mtime_", "DEFINED_PHASES", "REQUIRED_USE"]
- _dep_keys = ('DEPEND', 'PDEPEND', 'RDEPEND',)
+ _dep_keys = ('DEPEND', 'HDEPEND', 'PDEPEND', 'RDEPEND')
+ _buildtime_keys = ('DEPEND', 'HDEPEND')
+ _runtime_keys = ('PDEPEND', 'RDEPEND')
_use_conditional_misc_keys = ('LICENSE', 'PROPERTIES', 'RESTRICT')
UNKNOWN_REPO = _unknown_repo
def __init__(self, **kwargs):
+ metadata = _PackageMetadataWrapperBase(kwargs.pop('metadata'))
Task.__init__(self, **kwargs)
# the SlotObject constructor assigns self.root_config from keyword args
# and is an instance of a '_emerge.RootConfig.RootConfig class
self.root = self.root_config.root
- self._raw_metadata = _PackageMetadataWrapperBase(self.metadata)
- self.metadata = _PackageMetadataWrapper(self, self._raw_metadata)
+ self._raw_metadata = metadata
+ self._metadata = _PackageMetadataWrapper(self, metadata)
if not self.built:
- self.metadata['CHOST'] = self.root_config.settings.get('CHOST', '')
- eapi_attrs = _get_eapi_attrs(self.metadata["EAPI"])
- self.cpv = _pkg_str(self.cpv, slot=self.metadata["SLOT"],
- repo=self.metadata.get('repository', ''),
- eapi=self.metadata["EAPI"])
+ self._metadata['CHOST'] = self.root_config.settings.get('CHOST', '')
+ eapi_attrs = _get_eapi_attrs(self.eapi)
+ self.cpv = _pkg_str(self.cpv, metadata=self._metadata,
+ settings=self.root_config.settings)
if hasattr(self.cpv, 'slot_invalid'):
self._invalid_metadata('SLOT.invalid',
- "SLOT: invalid value: '%s'" % self.metadata["SLOT"])
+ "SLOT: invalid value: '%s'" % self._metadata["SLOT"])
+ self.cpv_split = self.cpv.cpv_split
+ self.category, self.pf = portage.catsplit(self.cpv)
self.cp = self.cpv.cp
+ self.version = self.cpv.version
self.slot = self.cpv.slot
- self.slot_abi = self.cpv.slot_abi
+ self.sub_slot = self.cpv.sub_slot
+ self.slot_atom = Atom("%s%s%s" % (self.cp, _slot_separator, self.slot))
# sync metadata with validated repo (may be UNKNOWN_REPO)
- self.metadata['repository'] = self.cpv.repo
+ self._metadata['repository'] = self.cpv.repo
+
+ if eapi_attrs.iuse_effective:
+ implicit_match = self.root_config.settings._iuse_effective_match
+ else:
+ implicit_match = self.root_config.settings._iuse_implicit_match
+ usealiases = self.root_config.settings._use_manager.getUseAliases(self)
+ self.iuse = self._iuse(self, self._metadata["IUSE"].split(), implicit_match,
+ usealiases, self.eapi)
+
if (self.iuse.enabled or self.iuse.disabled) and \
not eapi_attrs.iuse_defaults:
if not self.installed:
self._invalid_metadata('EAPI.incompatible',
"IUSE contains defaults, but EAPI doesn't allow them")
- self.slot_atom = Atom("%s%s%s" % (self.cp, _slot_separator, self.slot))
- self.category, self.pf = portage.catsplit(self.cpv)
- self.cpv_split = self.cpv.cpv_split
- self.version = self.cpv.version
if self.inherited is None:
self.inherited = frozenset()
@@ -87,6 +106,37 @@ class Package(Task):
type_name=self.type_name)
self._hash_value = hash(self._hash_key)
+ @property
+ def eapi(self):
+ return self._metadata["EAPI"]
+
+ @property
+ def build_time(self):
+ if not self.built:
+ raise AttributeError('build_time')
+ try:
+ return long(self._metadata['BUILD_TIME'])
+ except (KeyError, ValueError):
+ return 0
+
+ @property
+ def defined_phases(self):
+ return self._metadata.defined_phases
+
+ @property
+ def properties(self):
+ return self._metadata.properties
+
+ @property
+ def restrict(self):
+ return self._metadata.restrict
+
+ @property
+ def metadata(self):
+ warnings.warn("_emerge.Package.Package.metadata is deprecated",
+ DeprecationWarning, stacklevel=3)
+ return self._metadata
+
# These are calculated on-demand, so that they are calculated
# after FakeVartree applies its metadata tweaks.
@property
@@ -120,6 +170,10 @@ class Package(Task):
self._validate_deps()
return self._validated_atoms
+ @property
+ def stable(self):
+ return self.cpv.stable
+
@classmethod
def _gen_hash_key(cls, cpv=None, installed=None, onlydeps=None,
operation=None, repo_name=None, root_config=None,
@@ -154,15 +208,15 @@ class Package(Task):
# So overwrite the repo_key with type_name.
repo_key = type_name
- return (type_name, root, cpv, operation, repo_key)
+ return (type_name, root, _unicode(cpv), operation, repo_key)
def _validate_deps(self):
"""
Validate deps. This does not trigger USE calculation since that
is expensive for ebuilds and therefore we want to avoid doing
- in unnecessarily (like for masked packages).
+ it unnecessarily (like for masked packages).
"""
- eapi = self.metadata['EAPI']
+ eapi = self.eapi
dep_eapi = eapi
dep_valid_flag = self.iuse.is_valid_flag
if self.installed:
@@ -175,31 +229,42 @@ class Package(Task):
validated_atoms = []
for k in self._dep_keys:
- v = self.metadata.get(k)
+ v = self._metadata.get(k)
if not v:
continue
try:
- validated_atoms.extend(use_reduce(v, eapi=dep_eapi,
+ atoms = use_reduce(v, eapi=dep_eapi,
matchall=True, is_valid_flag=dep_valid_flag,
- token_class=Atom, flat=True))
+ token_class=Atom, flat=True)
except InvalidDependString as e:
self._metadata_exception(k, e)
+ else:
+ validated_atoms.extend(atoms)
+ if not self.built:
+ for atom in atoms:
+ if not isinstance(atom, Atom):
+ continue
+ if atom.slot_operator_built:
+ e = InvalidDependString(
+ _("Improper context for slot-operator "
+ "\"built\" atom syntax: %s") %
+ (atom.unevaluated_atom,))
+ self._metadata_exception(k, e)
self._validated_atoms = tuple(set(atom for atom in
validated_atoms if isinstance(atom, Atom)))
k = 'PROVIDE'
- v = self.metadata.get(k)
+ v = self._metadata.get(k)
if v:
try:
use_reduce(v, eapi=dep_eapi, matchall=True,
is_valid_flag=dep_valid_flag, token_class=Atom)
except InvalidDependString as e:
- self._invalid_metadata("PROVIDE.syntax",
- _unicode_decode("%s: %s") % (k, e))
+ self._invalid_metadata("PROVIDE.syntax", "%s: %s" % (k, e))
for k in self._use_conditional_misc_keys:
- v = self.metadata.get(k)
+ v = self._metadata.get(k)
if not v:
continue
try:
@@ -209,24 +274,20 @@ class Package(Task):
self._metadata_exception(k, e)
k = 'REQUIRED_USE'
- v = self.metadata.get(k)
- if v:
+ v = self._metadata.get(k)
+ if v and not self.built:
if not _get_eapi_attrs(eapi).required_use:
self._invalid_metadata('EAPI.incompatible',
"REQUIRED_USE set, but EAPI='%s' doesn't allow it" % eapi)
else:
try:
check_required_use(v, (),
- self.iuse.is_valid_flag)
+ self.iuse.is_valid_flag, eapi=eapi)
except InvalidDependString as e:
- # Force unicode format string for python-2.x safety,
- # ensuring that PortageException.__unicode__() is used
- # when necessary.
- self._invalid_metadata(k + ".syntax",
- _unicode_decode("%s: %s") % (k, e))
+ self._invalid_metadata(k + ".syntax", "%s: %s" % (k, e))
k = 'SRC_URI'
- v = self.metadata.get(k)
+ v = self._metadata.get(k)
if v:
try:
use_reduce(v, is_src_uri=True, eapi=eapi, matchall=True,
@@ -248,36 +309,45 @@ class Package(Task):
if self.invalid is not False:
masks['invalid'] = self.invalid
- if not settings._accept_chost(self.cpv, self.metadata):
- masks['CHOST'] = self.metadata['CHOST']
+ if not settings._accept_chost(self.cpv, self._metadata):
+ masks['CHOST'] = self._metadata['CHOST']
- eapi = self.metadata["EAPI"]
+ eapi = self.eapi
if not portage.eapi_is_supported(eapi):
masks['EAPI.unsupported'] = eapi
if portage._eapi_is_deprecated(eapi):
masks['EAPI.deprecated'] = eapi
missing_keywords = settings._getMissingKeywords(
- self.cpv, self.metadata)
+ self.cpv, self._metadata)
if missing_keywords:
masks['KEYWORDS'] = missing_keywords
try:
missing_properties = settings._getMissingProperties(
- self.cpv, self.metadata)
+ self.cpv, self._metadata)
if missing_properties:
masks['PROPERTIES'] = missing_properties
except InvalidDependString:
# already recorded as 'invalid'
pass
- mask_atom = settings._getMaskAtom(self.cpv, self.metadata)
+ try:
+ missing_restricts = settings._getMissingRestrict(
+ self.cpv, self._metadata)
+ if missing_restricts:
+ masks['RESTRICT'] = missing_restricts
+ except InvalidDependString:
+ # already recorded as 'invalid'
+ pass
+
+ mask_atom = settings._getMaskAtom(self.cpv, self._metadata)
if mask_atom is not None:
masks['package.mask'] = mask_atom
try:
missing_licenses = settings._getMissingLicenses(
- self.cpv, self.metadata)
+ self.cpv, self._metadata)
if missing_licenses:
masks['LICENSE'] = missing_licenses
except InvalidDependString:
@@ -303,7 +373,8 @@ class Package(Task):
'CHOST' in masks or \
'EAPI.deprecated' in masks or \
'KEYWORDS' in masks or \
- 'PROPERTIES' in masks):
+ 'PROPERTIES' in masks or \
+ 'RESTRICT' in masks):
return False
if 'package.mask' in masks or \
@@ -316,7 +387,7 @@ class Package(Task):
"""returns None, 'missing', or 'unstable'."""
missing = self.root_config.settings._getRawMissingKeywords(
- self.cpv, self.metadata)
+ self.cpv, self._metadata)
if not missing:
return None
@@ -337,17 +408,22 @@ class Package(Task):
"""returns a bool if the cpv is in the list of
expanded pmaskdict[cp] available ebuilds"""
pmask = self.root_config.settings._getRawMaskAtom(
- self.cpv, self.metadata)
+ self.cpv, self._metadata)
return pmask is not None
def _metadata_exception(self, k, e):
+ if k.endswith('DEPEND'):
+ qacat = 'dependency.syntax'
+ else:
+ qacat = k + ".syntax"
+
# For unicode safety with python-2.x we need to avoid
# using the string format operator with a non-unicode
# format string, since that will result in the
# PortageException.__str__() method being invoked,
# followed by unsafe decoding that may result in a
- # UnicodeDecodeError. Therefore, use _unicode_decode()
+ # UnicodeDecodeError. Therefore, use unicode_literals
# to ensure that format strings are unicode, so that
# PortageException.__unicode__() is used when necessary
# in python-2.x.
@@ -359,19 +435,17 @@ class Package(Task):
continue
categorized_error = True
self._invalid_metadata(error.category,
- _unicode_decode("%s: %s") % (k, error))
+ "%s: %s" % (k, error))
if not categorized_error:
- self._invalid_metadata(k + ".syntax",
- _unicode_decode("%s: %s") % (k, e))
+ self._invalid_metadata(qacat,"%s: %s" % (k, e))
else:
# For installed packages, show the path of the file
# containing the invalid metadata, since the user may
# want to fix the deps by hand.
vardb = self.root_config.trees['vartree'].dbapi
path = vardb.getpath(self.cpv, filename=k)
- self._invalid_metadata(k + ".syntax",
- _unicode_decode("%s: %s in '%s'") % (k, e, path))
+ self._invalid_metadata(qacat, "%s: %s in '%s'" % (k, e, path))
def _invalid_metadata(self, msg_type, msg):
if self._invalid is None:
@@ -394,7 +468,8 @@ class Package(Task):
cpv_color = "PKG_NOMERGE"
s = "(%s, %s" \
- % (portage.output.colorize(cpv_color, self.cpv + _repo_separator + self.repo) , self.type_name)
+ % (portage.output.colorize(cpv_color, self.cpv + _slot_separator + \
+ self.slot + "/" + self.sub_slot + _repo_separator + self.repo) , self.type_name)
if self.type_name == "installed":
if self.root_config.settings['ROOT'] != "/":
@@ -425,13 +500,16 @@ class Package(Task):
# Share identical frozenset instances when available.
_frozensets = {}
- def __init__(self, pkg, use_str):
+ def __init__(self, pkg, enabled_flags):
self._pkg = pkg
self._expand = None
self._expand_hidden = None
self._force = None
self._mask = None
- self.enabled = frozenset(use_str.split())
+ if eapi_has_use_aliases(pkg.eapi):
+ for enabled_flag in enabled_flags:
+ enabled_flags.extend(pkg.iuse.alias_mapping.get(enabled_flag, []))
+ self.enabled = frozenset(enabled_flags)
if pkg.built:
# Use IUSE to validate USE settings for built packages,
# in case the package manager that built this package
@@ -481,7 +559,7 @@ class Package(Task):
@property
def repo(self):
- return self.metadata['repository']
+ return self._metadata['repository']
@property
def repo_priority(self):
@@ -493,7 +571,7 @@ class Package(Task):
@property
def use(self):
if self._use is None:
- self.metadata._init_use()
+ self._init_use()
return self._use
def _get_pkgsettings(self):
@@ -502,28 +580,81 @@ class Package(Task):
pkgsettings.setcpv(self)
return pkgsettings
+ def _init_use(self):
+ if self.built:
+ # Use IUSE to validate USE settings for built packages,
+ # in case the package manager that built this package
+ # failed to do that for some reason (or in case of
+ # data corruption). The enabled flags must be consistent
+ # with implicit IUSE, in order to avoid potential
+ # inconsistencies in USE dep matching (see bug #453400).
+ use_str = self._metadata['USE']
+ is_valid_flag = self.iuse.is_valid_flag
+ enabled_flags = [x for x in use_str.split() if is_valid_flag(x)]
+ use_str = " ".join(enabled_flags)
+ self._use = self._use_class(
+ self, enabled_flags)
+ else:
+ try:
+ use_str = _PackageMetadataWrapperBase.__getitem__(
+ self._metadata, 'USE')
+ except KeyError:
+ use_str = None
+ calculated_use = False
+ if not use_str:
+ use_str = self._get_pkgsettings()["PORTAGE_USE"]
+ calculated_use = True
+ self._use = self._use_class(
+ self, use_str.split())
+ # Initialize these now, since USE access has just triggered
+ # setcpv, and we want to cache the result of the force/mask
+ # calculations that were done.
+ if calculated_use:
+ self._use._init_force_mask()
+
+ _PackageMetadataWrapperBase.__setitem__(
+ self._metadata, 'USE', use_str)
+
+ return use_str
+
class _iuse(object):
- __slots__ = ("__weakref__", "all", "enabled", "disabled",
- "tokens") + ("_iuse_implicit_match",)
+ __slots__ = ("__weakref__", "_iuse_implicit_match", "_pkg", "alias_mapping",
+ "all", "all_aliases", "enabled", "disabled", "tokens")
- def __init__(self, tokens, iuse_implicit_match):
+ def __init__(self, pkg, tokens, iuse_implicit_match, aliases, eapi):
+ self._pkg = pkg
self.tokens = tuple(tokens)
self._iuse_implicit_match = iuse_implicit_match
enabled = []
disabled = []
other = []
+ enabled_aliases = []
+ disabled_aliases = []
+ other_aliases = []
+ aliases_supported = eapi_has_use_aliases(eapi)
+ self.alias_mapping = {}
for x in tokens:
prefix = x[:1]
if prefix == "+":
enabled.append(x[1:])
+ if aliases_supported:
+ self.alias_mapping[x[1:]] = aliases.get(x[1:], [])
+ enabled_aliases.extend(self.alias_mapping[x[1:]])
elif prefix == "-":
disabled.append(x[1:])
+ if aliases_supported:
+ self.alias_mapping[x[1:]] = aliases.get(x[1:], [])
+ disabled_aliases.extend(self.alias_mapping[x[1:]])
else:
other.append(x)
- self.enabled = frozenset(enabled)
- self.disabled = frozenset(disabled)
+ if aliases_supported:
+ self.alias_mapping[x] = aliases.get(x, [])
+ other_aliases.extend(self.alias_mapping[x])
+ self.enabled = frozenset(chain(enabled, enabled_aliases))
+ self.disabled = frozenset(chain(disabled, disabled_aliases))
self.all = frozenset(chain(enabled, disabled, other))
+ self.all_aliases = frozenset(chain(enabled_aliases, disabled_aliases, other_aliases))
def is_valid_flag(self, flags):
"""
@@ -534,7 +665,7 @@ class Package(Task):
flags = [flags]
for flag in flags:
- if not flag in self.all and \
+ if not flag in self.all and not flag in self.all_aliases and \
not self._iuse_implicit_match(flag):
return False
return True
@@ -547,11 +678,28 @@ class Package(Task):
flags = [flags]
missing_iuse = []
for flag in flags:
- if not flag in self.all and \
+ if not flag in self.all and not flag in self.all_aliases and \
not self._iuse_implicit_match(flag):
missing_iuse.append(flag)
return missing_iuse
+ def get_real_flag(self, flag):
+ """
+ Returns the flag's name within the scope of this package
+ (accounting for aliases), or None if the flag is unknown.
+ """
+ if flag in self.all:
+ return flag
+ elif flag in self.all_aliases:
+ for k, v in self.alias_mapping.items():
+ if flag in v:
+ return k
+
+ if self._iuse_implicit_match(flag):
+ return flag
+
+ return None
+
def __len__(self):
return 4
@@ -604,7 +752,7 @@ class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
__slots__ = ("_pkg",)
_wrapped_keys = frozenset(
- ["COUNTER", "INHERITED", "IUSE", "USE", "_mtime_"])
+ ["COUNTER", "INHERITED", "USE", "_mtime_"])
_use_conditional_keys = frozenset(
['LICENSE', 'PROPERTIES', 'PROVIDE', 'RESTRICT',])
@@ -617,31 +765,6 @@ class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
self.update(metadata)
- def _init_use(self):
- if self._pkg.built:
- use_str = self['USE']
- self._pkg._use = self._pkg._use_class(
- self._pkg, use_str)
- else:
- try:
- use_str = _PackageMetadataWrapperBase.__getitem__(self, 'USE')
- except KeyError:
- use_str = None
- calculated_use = False
- if not use_str:
- use_str = self._pkg._get_pkgsettings()["PORTAGE_USE"]
- calculated_use = True
- _PackageMetadataWrapperBase.__setitem__(self, 'USE', use_str)
- self._pkg._use = self._pkg._use_class(
- self._pkg, use_str)
- # Initialize these now, since USE access has just triggered
- # setcpv, and we want to cache the result of the force/mask
- # calculations that were done.
- if calculated_use:
- self._pkg._use._init_force_mask()
-
- return use_str
-
def __getitem__(self, k):
v = _PackageMetadataWrapperBase.__getitem__(self, k)
if k in self._use_conditional_keys:
@@ -659,7 +782,7 @@ class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
elif k == 'USE' and not self._pkg.built:
if not v:
# This is lazy because it's expensive.
- v = self._init_use()
+ v = self._pkg._init_use()
return v
@@ -673,10 +796,6 @@ class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
v = frozenset(v.split())
self._pkg.inherited = v
- def _set_iuse(self, k, v):
- self._pkg.iuse = self._pkg._iuse(
- v.split(), self._pkg.root_config.settings._iuse_implicit_match)
-
def _set_counter(self, k, v):
if isinstance(v, basestring):
try:
diff --git a/pym/_emerge/PackageMerge.py b/pym/_emerge/PackageMerge.py
index eed34e99b..ef298ca48 100644
--- a/pym/_emerge/PackageMerge.py
+++ b/pym/_emerge/PackageMerge.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.CompositeTask import CompositeTask
@@ -11,6 +11,9 @@ class PackageMerge(CompositeTask):
self.scheduler = self.merge.scheduler
pkg = self.merge.pkg
pkg_count = self.merge.pkg_count
+ pkg_color = "PKG_MERGE"
+ if pkg.type_name == "binary":
+ pkg_color = "PKG_BINARY_MERGE"
if pkg.installed:
action_desc = "Uninstalling"
@@ -26,7 +29,7 @@ class PackageMerge(CompositeTask):
msg = "%s %s%s" % \
(action_desc,
counter_str,
- colorize("GOOD", pkg.cpv))
+ colorize(pkg_color, pkg.cpv))
if pkg.root_config.settings["ROOT"] != "/":
msg += " %s %s" % (preposition, pkg.root)
diff --git a/pym/_emerge/PackageUninstall.py b/pym/_emerge/PackageUninstall.py
index eb6a947a5..16c2f749b 100644
--- a/pym/_emerge/PackageUninstall.py
+++ b/pym/_emerge/PackageUninstall.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import logging
@@ -33,7 +33,7 @@ class PackageUninstall(CompositeTask):
# Apparently the package got uninstalled
# already, so we can safely return early.
self.returncode = os.EX_OK
- self.wait()
+ self._async_wait()
return
self.settings.setcpv(self.pkg)
@@ -67,7 +67,7 @@ class PackageUninstall(CompositeTask):
if retval != os.EX_OK:
self._builddir_lock.unlock()
self.returncode = retval
- self.wait()
+ self._async_wait()
return
self._writemsg_level(">>> Unmerging %s...\n" % (self.pkg.cpv,),
diff --git a/pym/_emerge/PackageVirtualDbapi.py b/pym/_emerge/PackageVirtualDbapi.py
index 0f7be44b1..56a5576e3 100644
--- a/pym/_emerge/PackageVirtualDbapi.py
+++ b/pym/_emerge/PackageVirtualDbapi.py
@@ -140,10 +140,10 @@ class PackageVirtualDbapi(dbapi):
self._clear_cache()
def aux_get(self, cpv, wants, myrepo=None):
- metadata = self._cpv_map[cpv].metadata
+ metadata = self._cpv_map[cpv]._metadata
return [metadata.get(x, "") for x in wants]
def aux_update(self, cpv, values):
- self._cpv_map[cpv].metadata.update(values)
+ self._cpv_map[cpv]._metadata.update(values)
self._clear_cache()
diff --git a/pym/_emerge/PipeReader.py b/pym/_emerge/PipeReader.py
index 90febdf44..a8392c329 100644
--- a/pym/_emerge/PipeReader.py
+++ b/pym/_emerge/PipeReader.py
@@ -1,9 +1,11 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+import fcntl
+import sys
+
from portage import os
from _emerge.AbstractPollTask import AbstractPollTask
-import fcntl
class PipeReader(AbstractPollTask):
@@ -27,18 +29,28 @@ class PipeReader(AbstractPollTask):
output_handler = self._output_handler
for f in self.input_files.values():
- fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
- fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
- self._reg_ids.add(self.scheduler.register(f.fileno(),
+ fd = isinstance(f, int) and f or f.fileno()
+ fcntl.fcntl(fd, fcntl.F_SETFL,
+ fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(fd, fcntl.F_SETFD,
+ fcntl.fcntl(fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self._reg_ids.add(self.scheduler.io_add_watch(fd,
self._registered_events, output_handler))
self._registered = True
- def isAlive(self):
- return self._registered
-
def _cancel(self):
+ self._unregister()
if self.returncode is None:
- self.returncode = 1
+ self.returncode = self._cancelled_returncode
def _wait(self):
if self.returncode is not None:
@@ -102,11 +114,14 @@ class PipeReader(AbstractPollTask):
if self._reg_ids is not None:
for reg_id in self._reg_ids:
- self.scheduler.unregister(reg_id)
+ self.scheduler.source_remove(reg_id)
self._reg_ids = None
if self.input_files is not None:
for f in self.input_files.values():
- f.close()
+ if isinstance(f, int):
+ os.close(f)
+ else:
+ f.close()
self.input_files = None
diff --git a/pym/_emerge/PollScheduler.py b/pym/_emerge/PollScheduler.py
index 5103e31d6..b118ac157 100644
--- a/pym/_emerge/PollScheduler.py
+++ b/pym/_emerge/PollScheduler.py
@@ -1,18 +1,13 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-import gzip
-import errno
-
try:
import threading
except ImportError:
import dummy_threading as threading
-from portage import _encodings
-from portage import _unicode_encode
-from portage.util import writemsg_level
-from portage.util.SlotObject import SlotObject
+import portage
+from portage.util._async.SchedulerInterface import SchedulerInterface
from portage.util._eventloop.EventLoop import EventLoop
from portage.util._eventloop.global_event_loop import global_event_loop
@@ -20,14 +15,10 @@ from _emerge.getloadavg import getloadavg
class PollScheduler(object):
- class _sched_iface_class(SlotObject):
- __slots__ = ("IO_ERR", "IO_HUP", "IO_IN", "IO_NVAL", "IO_OUT",
- "IO_PRI", "child_watch_add",
- "idle_add", "io_add_watch", "iteration",
- "output", "register", "run",
- "source_remove", "timeout_add", "unregister")
+ # max time between loadavg checks (milliseconds)
+ _loadavg_latency = None
- def __init__(self, main=False):
+ def __init__(self, main=False, event_loop=None):
"""
@param main: If True then use global_event_loop(), otherwise use
a local EventLoop instance (default is False, for safe use in
@@ -38,29 +29,20 @@ class PollScheduler(object):
self._terminated_tasks = False
self._max_jobs = 1
self._max_load = None
- self._jobs = 0
self._scheduling = False
self._background = False
- if main:
+ if event_loop is not None:
+ self._event_loop = event_loop
+ elif main:
self._event_loop = global_event_loop()
else:
- self._event_loop = EventLoop(main=False)
- self.sched_iface = self._sched_iface_class(
- IO_ERR=self._event_loop.IO_ERR,
- IO_HUP=self._event_loop.IO_HUP,
- IO_IN=self._event_loop.IO_IN,
- IO_NVAL=self._event_loop.IO_NVAL,
- IO_OUT=self._event_loop.IO_OUT,
- IO_PRI=self._event_loop.IO_PRI,
- child_watch_add=self._event_loop.child_watch_add,
- idle_add=self._event_loop.idle_add,
- io_add_watch=self._event_loop.io_add_watch,
- iteration=self._event_loop.iteration,
- output=self._task_output,
- register=self._event_loop.io_add_watch,
- source_remove=self._event_loop.source_remove,
- timeout_add=self._event_loop.timeout_add,
- unregister=self._event_loop.source_remove)
+ self._event_loop = (portage._internal_caller and
+ global_event_loop() or EventLoop(main=False))
+ self._sched_iface = SchedulerInterface(self._event_loop,
+ is_background=self._is_background)
+
+ def _is_background(self):
+ return self._background
def terminate(self):
"""
@@ -135,48 +117,23 @@ class PollScheduler(object):
Calls _schedule_tasks() and automatically returns early from
any recursive calls to this method that the _schedule_tasks()
call might trigger. This makes _schedule() safe to call from
- inside exit listeners.
+ inside exit listeners. This method always returns True, so that
+ it may be scheduled continuously via EventLoop.timeout_add().
"""
if self._scheduling:
- return False
+ return True
self._scheduling = True
try:
self._schedule_tasks()
finally:
self._scheduling = False
-
- def _main_loop(self):
- term_check_id = self.sched_iface.idle_add(self._termination_check)
- try:
- # Populate initial event sources. Unless we're scheduling
- # based on load average, we only need to do this once
- # here, since it can be called during the loop from within
- # event handlers.
- self._schedule()
- max_load = self._max_load
-
- # Loop while there are jobs to be scheduled.
- while self._keep_scheduling():
- self.sched_iface.iteration()
-
- if max_load is not None:
- # We have to schedule periodically, in case the load
- # average has changed since the last call.
- self._schedule()
-
- # Clean shutdown of previously scheduled jobs. In the
- # case of termination, this allows for basic cleanup
- # such as flushing of buffered output to logs.
- while self._is_work_scheduled():
- self.sched_iface.iteration()
- finally:
- self.sched_iface.source_remove(term_check_id)
+ return True
def _is_work_scheduled(self):
return bool(self._running_job_count())
def _running_job_count(self):
- return self._jobs
+ raise NotImplementedError(self)
def _can_add_job(self):
if self._terminated_tasks:
@@ -201,47 +158,3 @@ class PollScheduler(object):
return False
return True
-
- def _task_output(self, msg, log_path=None, background=None,
- level=0, noiselevel=-1):
- """
- Output msg to stdout if not self._background. If log_path
- is not None then append msg to the log (appends with
- compression if the filename extension of log_path
- corresponds to a supported compression type).
- """
-
- if background is None:
- # If the task does not have a local background value
- # (like for parallel-fetch), then use the global value.
- background = self._background
-
- msg_shown = False
- if not background:
- writemsg_level(msg, level=level, noiselevel=noiselevel)
- msg_shown = True
-
- if log_path is not None:
- try:
- f = open(_unicode_encode(log_path,
- encoding=_encodings['fs'], errors='strict'),
- mode='ab')
- f_real = f
- except IOError as e:
- if e.errno not in (errno.ENOENT, errno.ESTALE):
- raise
- if not msg_shown:
- writemsg_level(msg, level=level, noiselevel=noiselevel)
- else:
-
- if log_path.endswith('.gz'):
- # NOTE: The empty filename argument prevents us from
- # triggering a bug in python3 which causes GzipFile
- # to raise AttributeError if fileobj.name is bytes
- # instead of unicode.
- f = gzip.GzipFile(filename='', mode='ab', fileobj=f)
-
- f.write(_unicode_encode(msg))
- f.close()
- if f_real is not f:
- f_real.close()
diff --git a/pym/_emerge/QueueScheduler.py b/pym/_emerge/QueueScheduler.py
deleted file mode 100644
index 206087c7a..000000000
--- a/pym/_emerge/QueueScheduler.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# Copyright 1999-2012 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from _emerge.PollScheduler import PollScheduler
-
-class QueueScheduler(PollScheduler):
-
- """
- Add instances of SequentialTaskQueue and then call run(). The
- run() method returns when no tasks remain.
- """
-
- def __init__(self, main=True, max_jobs=None, max_load=None):
- PollScheduler.__init__(self, main=main)
-
- if max_jobs is None:
- max_jobs = 1
-
- self._max_jobs = max_jobs
- self._max_load = max_load
-
- self._queues = []
- self._schedule_listeners = []
-
- def add(self, q):
- self._queues.append(q)
-
- def remove(self, q):
- self._queues.remove(q)
-
- def clear(self):
- for q in self._queues:
- q.clear()
-
- def run(self, timeout=None):
-
- timeout_callback = None
- if timeout is not None:
- def timeout_callback():
- timeout_callback.timed_out = True
- return False
- timeout_callback.timed_out = False
- timeout_callback.timeout_id = self.sched_iface.timeout_add(
- timeout, timeout_callback)
-
- term_check_id = self.sched_iface.idle_add(self._termination_check)
- try:
- while not (timeout_callback is not None and
- timeout_callback.timed_out):
- # We don't have any callbacks to trigger _schedule(),
- # so we have to call it explicitly here.
- self._schedule()
- if self._keep_scheduling():
- self.sched_iface.iteration()
- else:
- break
-
- while self._is_work_scheduled() and \
- not (timeout_callback is not None and
- timeout_callback.timed_out):
- self.sched_iface.iteration()
- finally:
- self.sched_iface.source_remove(term_check_id)
- if timeout_callback is not None:
- self.sched_iface.unregister(timeout_callback.timeout_id)
-
- def _schedule_tasks(self):
- """
- @rtype: bool
- @return: True if there may be remaining tasks to schedule,
- False otherwise.
- """
- if self._terminated_tasks:
- return
-
- while self._can_add_job():
- n = self._max_jobs - self._running_job_count()
- if n < 1:
- break
-
- if not self._start_next_job(n):
- return
-
- def _keep_scheduling(self):
- return not self._terminated_tasks and any(self._queues)
-
- def _running_job_count(self):
- job_count = 0
- for q in self._queues:
- job_count += len(q.running_tasks)
- self._jobs = job_count
- return job_count
-
- def _start_next_job(self, n=1):
- started_count = 0
- for q in self._queues:
- initial_job_count = len(q.running_tasks)
- q.schedule()
- final_job_count = len(q.running_tasks)
- if final_job_count > initial_job_count:
- started_count += (final_job_count - initial_job_count)
- if started_count >= n:
- break
- return started_count
-
diff --git a/pym/_emerge/RootConfig.py b/pym/_emerge/RootConfig.py
index bb0d7682a..3648d01d7 100644
--- a/pym/_emerge/RootConfig.py
+++ b/pym/_emerge/RootConfig.py
@@ -1,10 +1,10 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
class RootConfig(object):
"""This is used internally by depgraph to track information about a
particular $ROOT."""
- __slots__ = ("root", "setconfig", "sets", "settings", "trees")
+ __slots__ = ("mtimedb", "root", "setconfig", "sets", "settings", "trees")
pkg_tree_map = {
"ebuild" : "porttree",
@@ -31,4 +31,11 @@ class RootConfig(object):
Shallow copy all attributes from another instance.
"""
for k in self.__slots__:
- setattr(self, k, getattr(other, k))
+ try:
+ setattr(self, k, getattr(other, k))
+ except AttributeError:
+ # mtimedb is currently not a required attribute
+ try:
+ delattr(self, k)
+ except AttributeError:
+ pass
diff --git a/pym/_emerge/Scheduler.py b/pym/_emerge/Scheduler.py
index 0b72a4cfc..dd268f708 100644
--- a/pym/_emerge/Scheduler.py
+++ b/pym/_emerge/Scheduler.py
@@ -1,7 +1,7 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from __future__ import print_function
+from __future__ import print_function, unicode_literals
from collections import deque
import gc
@@ -18,7 +18,7 @@ import zlib
import portage
from portage import os
from portage import _encodings
-from portage import _unicode_decode, _unicode_encode
+from portage import _unicode_encode
from portage.cache.mappings import slot_dict_class
from portage.elog.messages import eerror
from portage.localization import _
@@ -28,6 +28,8 @@ from portage._sets import SETPREFIX
from portage._sets.base import InternalPackageSet
from portage.util import ensure_dirs, writemsg, writemsg_level
from portage.util.SlotObject import SlotObject
+from portage.util._async.SchedulerInterface import SchedulerInterface
+from portage.util._eventloop.EventLoop import EventLoop
from portage.package.ebuild.digestcheck import digestcheck
from portage.package.ebuild.digestgen import digestgen
from portage.package.ebuild.doebuild import (_check_temp_dir,
@@ -50,6 +52,7 @@ from _emerge.EbuildFetcher import EbuildFetcher
from _emerge.EbuildPhase import EbuildPhase
from _emerge.emergelog import emergelog
from _emerge.FakeVartree import FakeVartree
+from _emerge.getloadavg import getloadavg
from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
from _emerge.JobStatusDisplay import JobStatusDisplay
@@ -64,6 +67,9 @@ if sys.hexversion >= 0x3000000:
class Scheduler(PollScheduler):
+ # max time between loadavg checks (milliseconds)
+ _loadavg_latency = 30000
+
# max time between display status updates (milliseconds)
_max_display_latency = 3000
@@ -79,7 +85,7 @@ class Scheduler(PollScheduler):
_opts_no_self_update = frozenset(["--buildpkgonly",
"--fetchonly", "--fetch-all-uri", "--pretend"])
- class _iface_class(PollScheduler._sched_iface_class):
+ class _iface_class(SchedulerInterface):
__slots__ = ("fetch",
"scheduleSetup", "scheduleUnpack")
@@ -135,8 +141,7 @@ class Scheduler(PollScheduler):
portage.exception.PortageException.__init__(self, value)
def __init__(self, settings, trees, mtimedb, myopts,
- spinner, mergelist=None, favorites=None, graph_config=None,
- uninstall_only=False):
+ spinner, mergelist=None, favorites=None, graph_config=None):
PollScheduler.__init__(self, main=True)
if mergelist is not None:
@@ -152,7 +157,6 @@ class Scheduler(PollScheduler):
self._spinner = spinner
self._mtimedb = mtimedb
self._favorites = favorites
- self._uninstall_only = uninstall_only
self._args_set = InternalPackageSet(favorites, allow_repo=True)
self._build_opts = self._build_opts_class()
@@ -161,6 +165,8 @@ class Scheduler(PollScheduler):
self._build_opts.buildpkg_exclude = InternalPackageSet( \
initial_atoms=" ".join(myopts.get("--buildpkg-exclude", [])).split(), \
allow_wildcard=True, allow_repo=True)
+ if "mirror" in self.settings.features:
+ self._build_opts.fetch_all_uri = True
self._binpkg_opts = self._binpkg_opts_class()
for k in self._binpkg_opts.__slots__:
@@ -217,14 +223,15 @@ class Scheduler(PollScheduler):
fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
schedule=self._schedule_fetch)
self._sched_iface = self._iface_class(
+ self._event_loop,
+ is_background=self._is_background,
fetch=fetch_iface,
scheduleSetup=self._schedule_setup,
- scheduleUnpack=self._schedule_unpack,
- **dict((k, getattr(self.sched_iface, k))
- for k in self.sched_iface.__slots__))
+ scheduleUnpack=self._schedule_unpack)
self._prefetchers = weakref.WeakValueDictionary()
self._pkg_queue = []
+ self._jobs = 0
self._running_tasks = {}
self._completed_tasks = set()
@@ -243,10 +250,15 @@ class Scheduler(PollScheduler):
# The load average takes some time to respond when new
# jobs are added, so we need to limit the rate of adding
# new jobs.
- self._job_delay_max = 10
- self._job_delay_factor = 1.0
- self._job_delay_exp = 1.5
+ self._job_delay_max = 5
self._previous_job_start_time = None
+ self._job_delay_timeout_id = None
+
+ # The load average takes some time to respond when after
+ # a SIGSTOP/SIGCONT cycle, so delay scheduling for some
+ # time after SIGCONT is received.
+ self._sigcont_delay = 5
+ self._sigcont_time = None
# This is used to memoize the _choose_pkg() result when
# no packages can be chosen until one of the existing
@@ -300,15 +312,10 @@ class Scheduler(PollScheduler):
if not portage.dep.match_from_list(
portage.const.PORTAGE_PACKAGE_ATOM, [x]):
continue
- if self._running_portage is None or \
- self._running_portage.cpv != x.cpv or \
- '9999' in x.cpv or \
- 'git' in x.inherited or \
- 'git-2' in x.inherited:
- rval = _check_temp_dir(self.settings)
- if rval != os.EX_OK:
- return rval
- _prepare_self_update(self.settings)
+ rval = _check_temp_dir(self.settings)
+ if rval != os.EX_OK:
+ return rval
+ _prepare_self_update(self.settings)
break
return os.EX_OK
@@ -328,15 +335,13 @@ class Scheduler(PollScheduler):
self._set_graph_config(graph_config)
self._blocker_db = {}
dynamic_deps = self.myopts.get("--dynamic-deps", "y") != "n"
- ignore_built_slot_abi_deps = self.myopts.get(
- "--ignore-built-slot-abi-deps", "n") == "y"
+ ignore_built_slot_operator_deps = self.myopts.get(
+ "--ignore-built-slot-operator-deps", "n") == "y"
for root in self.trees:
- if self._uninstall_only:
- continue
if graph_config is None:
fake_vartree = FakeVartree(self.trees[root]["root_config"],
pkg_cache=self._pkg_cache, dynamic_deps=dynamic_deps,
- ignore_built_slot_abi_deps=ignore_built_slot_abi_deps)
+ ignore_built_slot_operator_deps=ignore_built_slot_operator_deps)
fake_vartree.sync()
else:
fake_vartree = graph_config.trees[root]['vartree']
@@ -413,7 +418,7 @@ class Scheduler(PollScheduler):
if not (isinstance(task, Package) and \
task.operation == "merge"):
continue
- if 'interactive' in task.metadata.properties:
+ if 'interactive' in task.properties:
interactive_tasks.append(task)
return interactive_tasks
@@ -658,10 +663,11 @@ class Scheduler(PollScheduler):
if value and value.strip():
continue
msg = _("%(var)s is not set... "
- "Are you missing the '%(configroot)setc/make.profile' symlink? "
+ "Are you missing the '%(configroot)s%(profile_path)s' symlink? "
"Is the symlink correct? "
"Is your portage tree complete?") % \
- {"var": var, "configroot": settings["PORTAGE_CONFIGROOT"]}
+ {"var": var, "configroot": settings["PORTAGE_CONFIGROOT"],
+ "profile_path": portage.const.PROFILE_PATH}
out = portage.output.EOutput()
for line in textwrap.wrap(msg, 70):
@@ -721,7 +727,6 @@ class Scheduler(PollScheduler):
return
if self._parallel_fetch:
- self._status_msg("Starting parallel fetch")
prefetchers = self._prefetchers
@@ -753,7 +758,8 @@ class Scheduler(PollScheduler):
prefetcher = EbuildFetcher(background=True,
config_pool=self._ConfigPool(pkg.root,
self._allocate_config, self._deallocate_config),
- fetchonly=1, logfile=self._fetch_log,
+ fetchonly=1, fetchall=self._build_opts.fetch_all_uri,
+ logfile=self._fetch_log,
pkg=pkg, prefetch=True, scheduler=self._sched_iface)
elif pkg.type_name == "binary" and \
@@ -774,10 +780,10 @@ class Scheduler(PollScheduler):
failures = 0
- # Use a local PollScheduler instance here, since we don't
+ # Use a local EventLoop instance here, since we don't
# want tasks here to trigger the usual Scheduler callbacks
# that handle job scheduling and status display.
- sched_iface = PollScheduler().sched_iface
+ sched_iface = SchedulerInterface(EventLoop(main=False))
for x in self._mergelist:
if not isinstance(x, Package):
@@ -786,10 +792,10 @@ class Scheduler(PollScheduler):
if x.operation == "uninstall":
continue
- if x.metadata["EAPI"] in ("0", "1", "2", "3"):
+ if x.eapi in ("0", "1", "2", "3"):
continue
- if "pretend" not in x.metadata.defined_phases:
+ if "pretend" not in x.defined_phases:
continue
out_str =">>> Running pre-merge checks for " + colorize("INFORM", x.cpv) + "\n"
@@ -808,7 +814,7 @@ class Scheduler(PollScheduler):
build_dir_path = os.path.join(
os.path.realpath(settings["PORTAGE_TMPDIR"]),
"portage", x.category, x.pf)
- existing_buildir = os.path.isdir(build_dir_path)
+ existing_builddir = os.path.isdir(build_dir_path)
settings["PORTAGE_BUILDDIR"] = build_dir_path
build_dir = EbuildBuildDir(scheduler=sched_iface,
settings=settings)
@@ -819,7 +825,7 @@ class Scheduler(PollScheduler):
# Clean up the existing build dir, in case pkg_pretend
# checks for available space (bug #390711).
- if existing_buildir:
+ if existing_builddir:
if x.built:
tree = "bintree"
infloc = os.path.join(build_dir_path, "build-info")
@@ -908,13 +914,18 @@ class Scheduler(PollScheduler):
failures += 1
portage.elog.elog_process(x.cpv, settings)
finally:
- if current_task is not None and current_task.isAlive():
- current_task.cancel()
- current_task.wait()
- clean_phase = EbuildPhase(background=False,
- phase='clean', scheduler=sched_iface, settings=settings)
- clean_phase.start()
- clean_phase.wait()
+
+ if current_task is not None:
+ if current_task.isAlive():
+ current_task.cancel()
+ current_task.wait()
+ if current_task.returncode == os.EX_OK:
+ clean_phase = EbuildPhase(background=False,
+ phase='clean', scheduler=sched_iface,
+ settings=settings)
+ clean_phase.start()
+ clean_phase.wait()
+
build_dir.unlock()
if failures:
@@ -1004,6 +1015,8 @@ class Scheduler(PollScheduler):
earlier_sigint_handler = signal.signal(signal.SIGINT, sighandler)
earlier_sigterm_handler = signal.signal(signal.SIGTERM, sighandler)
+ earlier_sigcont_handler = \
+ signal.signal(signal.SIGCONT, self._sigcont_handler)
try:
rval = self._merge()
@@ -1017,6 +1030,10 @@ class Scheduler(PollScheduler):
signal.signal(signal.SIGTERM, earlier_sigterm_handler)
else:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
+ if earlier_sigcont_handler is not None:
+ signal.signal(signal.SIGCONT, earlier_sigcont_handler)
+ else:
+ signal.signal(signal.SIGCONT, signal.SIG_DFL)
if received_signal:
sys.exit(received_signal[0])
@@ -1063,7 +1080,8 @@ class Scheduler(PollScheduler):
printer = portage.output.EOutput()
background = self._background
failure_log_shown = False
- if background and len(self._failed_pkgs_all) == 1:
+ if background and len(self._failed_pkgs_all) == 1 and \
+ self.myopts.get('--quiet-fail', 'n') != 'y':
# If only one package failed then just show it's
# whole log for easy viewing.
failed_pkg = self._failed_pkgs_all[-1]
@@ -1142,9 +1160,9 @@ class Scheduler(PollScheduler):
printer.eerror(line)
printer.eerror("")
for failed_pkg in self._failed_pkgs_all:
- # Use _unicode_decode() to force unicode format string so
+ # Use unicode_literals to force unicode format string so
# that Package.__unicode__() is called in python2.
- msg = _unicode_decode(" %s") % (failed_pkg.pkg,)
+ msg = " %s" % (failed_pkg.pkg,)
log_path = self._locate_failure_log(failed_pkg)
if log_path is not None:
msg += ", Log file:"
@@ -1341,6 +1359,38 @@ class Scheduler(PollScheduler):
blocker_db = self._blocker_db[pkg.root]
blocker_db.discardBlocker(pkg)
+ def _main_loop(self):
+ term_check_id = self._event_loop.idle_add(self._termination_check)
+ loadavg_check_id = None
+ if self._max_load is not None and \
+ self._loadavg_latency is not None and \
+ (self._max_jobs is True or self._max_jobs > 1):
+ # We have to schedule periodically, in case the load
+ # average has changed since the last call.
+ loadavg_check_id = self._event_loop.timeout_add(
+ self._loadavg_latency, self._schedule)
+
+ try:
+ # Populate initial event sources. Unless we're scheduling
+ # based on load average, we only need to do this once
+ # here, since it can be called during the loop from within
+ # event handlers.
+ self._schedule()
+
+ # Loop while there are jobs to be scheduled.
+ while self._keep_scheduling():
+ self._event_loop.iteration()
+
+ # Clean shutdown of previously scheduled jobs. In the
+ # case of termination, this allows for basic cleanup
+ # such as flushing of buffered output to logs.
+ while self._is_work_scheduled():
+ self._event_loop.iteration()
+ finally:
+ self._event_loop.source_remove(term_check_id)
+ if loadavg_check_id is not None:
+ self._event_loop.source_remove(loadavg_check_id)
+
def _merge(self):
if self._opts_no_background.intersection(self.myopts):
@@ -1351,8 +1401,10 @@ class Scheduler(PollScheduler):
failed_pkgs = self._failed_pkgs
portage.locks._quiet = self._background
portage.elog.add_listener(self._elog_listener)
- display_timeout_id = self.sched_iface.timeout_add(
- self._max_display_latency, self._status_display.display)
+ display_timeout_id = None
+ if self._status_display._isatty and not self._status_display.quiet:
+ display_timeout_id = self._event_loop.timeout_add(
+ self._max_display_latency, self._status_display.display)
rval = os.EX_OK
try:
@@ -1361,7 +1413,8 @@ class Scheduler(PollScheduler):
self._main_loop_cleanup()
portage.locks._quiet = False
portage.elog.remove_listener(self._elog_listener)
- self.sched_iface.source_remove(display_timeout_id)
+ if display_timeout_id is not None:
+ self._event_loop.source_remove(display_timeout_id)
if failed_pkgs:
rval = failed_pkgs[-1].returncode
@@ -1493,12 +1546,15 @@ class Scheduler(PollScheduler):
self._config_pool[settings['EROOT']].append(settings)
def _keep_scheduling(self):
- return bool(not self._terminated_tasks and self._pkg_queue and \
+ return bool(not self._terminated.is_set() and self._pkg_queue and \
not (self._failed_pkgs and not self._build_opts.fetchonly))
def _is_work_scheduled(self):
return bool(self._running_tasks)
+ def _running_job_count(self):
+ return self._jobs
+
def _schedule_tasks(self):
while True:
@@ -1539,6 +1595,9 @@ class Scheduler(PollScheduler):
not self._task_queues.merge)):
break
+ def _sigcont_handler(self, signum, frame):
+ self._sigcont_time = time.time()
+
def _job_delay(self):
"""
@rtype: bool
@@ -1549,14 +1608,53 @@ class Scheduler(PollScheduler):
current_time = time.time()
- delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
+ if self._sigcont_time is not None:
+
+ elapsed_seconds = current_time - self._sigcont_time
+ # elapsed_seconds < 0 means the system clock has been adjusted
+ if elapsed_seconds > 0 and \
+ elapsed_seconds < self._sigcont_delay:
+
+ if self._job_delay_timeout_id is not None:
+ self._event_loop.source_remove(
+ self._job_delay_timeout_id)
+
+ self._job_delay_timeout_id = self._event_loop.timeout_add(
+ 1000 * (self._sigcont_delay - elapsed_seconds),
+ self._schedule_once)
+ return True
+
+ # Only set this to None after the delay has expired,
+ # since this method may be called again before the
+ # delay has expired.
+ self._sigcont_time = None
+
+ try:
+ avg1, avg5, avg15 = getloadavg()
+ except OSError:
+ return False
+
+ delay = self._job_delay_max * avg1 / self._max_load
if delay > self._job_delay_max:
delay = self._job_delay_max
- if (current_time - self._previous_job_start_time) < delay:
+ elapsed_seconds = current_time - self._previous_job_start_time
+ # elapsed_seconds < 0 means the system clock has been adjusted
+ if elapsed_seconds > 0 and elapsed_seconds < delay:
+
+ if self._job_delay_timeout_id is not None:
+ self._event_loop.source_remove(
+ self._job_delay_timeout_id)
+
+ self._job_delay_timeout_id = self._event_loop.timeout_add(
+ 1000 * (delay - elapsed_seconds), self._schedule_once)
return True
return False
+ def _schedule_once(self):
+ self._schedule()
+ return False
+
def _schedule_tasks_imp(self):
"""
@rtype: bool
@@ -1738,7 +1836,7 @@ class Scheduler(PollScheduler):
# scope
e = exc
mydepgraph = e.depgraph
- dropped_tasks = set()
+ dropped_tasks = {}
if e is not None:
def unsatisfied_resume_dep_msg():
@@ -1775,11 +1873,7 @@ class Scheduler(PollScheduler):
return False
if success and self._show_list():
- mylist = mydepgraph.altlist()
- if mylist:
- if "--tree" in self.myopts:
- mylist.reverse()
- mydepgraph.display(mylist, favorites=self._favorites)
+ mydepgraph.display(mydepgraph.altlist(), favorites=self._favorites)
if not success:
self._post_mod_echo_msgs.append(mydepgraph.display_problems)
@@ -1788,7 +1882,7 @@ class Scheduler(PollScheduler):
self._init_graph(mydepgraph.schedulerGraph())
msg_width = 75
- for task in dropped_tasks:
+ for task, atoms in dropped_tasks.items():
if not (isinstance(task, Package) and task.operation == "merge"):
continue
pkg = task
@@ -1796,7 +1890,10 @@ class Scheduler(PollScheduler):
" %s" % (pkg.cpv,)
if pkg.root_config.settings["ROOT"] != "/":
msg += " for %s" % (pkg.root,)
- msg += " dropped due to unsatisfied dependency."
+ if not atoms:
+ msg += " dropped because it is masked or unavailable"
+ else:
+ msg += " dropped because it requires %s" % ", ".join(atoms)
for line in textwrap.wrap(msg, msg_width):
eerror(line, phase="other", key=pkg.cpv)
settings = self.pkgsettings[pkg.root]
@@ -1841,11 +1938,21 @@ class Scheduler(PollScheduler):
root_config = pkg.root_config
world_set = root_config.sets["selected"]
world_locked = False
- if hasattr(world_set, "lock"):
- world_set.lock()
- world_locked = True
+ atom = None
+
+ if pkg.operation != "uninstall":
+ # Do this before acquiring the lock, since it queries the
+ # portdbapi which can call the global event loop, triggering
+ # a concurrent call to this method or something else that
+ # needs an exclusive (non-reentrant) lock on the world file.
+ atom = create_world_atom(pkg, args_set, root_config)
try:
+
+ if hasattr(world_set, "lock"):
+ world_set.lock()
+ world_locked = True
+
if hasattr(world_set, "load"):
world_set.load() # maybe it's changed on disk
@@ -1857,8 +1964,7 @@ class Scheduler(PollScheduler):
for s in pkg.root_config.setconfig.active:
world_set.remove(SETPREFIX+s)
else:
- atom = create_world_atom(pkg, args_set, root_config)
- if atom:
+ if atom is not None:
if hasattr(world_set, "add"):
self._status_msg(('Recording %s in "world" ' + \
'favorites file...') % atom)
diff --git a/pym/_emerge/SpawnProcess.py b/pym/_emerge/SpawnProcess.py
index 9fbc96472..15d3dc5cf 100644
--- a/pym/_emerge/SpawnProcess.py
+++ b/pym/_emerge/SpawnProcess.py
@@ -1,17 +1,23 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 2008-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from _emerge.SubProcess import SubProcess
+try:
+ import fcntl
+except ImportError:
+ # http://bugs.jython.org/issue1074
+ fcntl = None
+
+import errno
+import logging
+import signal
import sys
-from portage.cache.mappings import slot_dict_class
+
+from _emerge.SubProcess import SubProcess
import portage
-from portage import _encodings
-from portage import _unicode_encode
from portage import os
from portage.const import BASH_BINARY
-import fcntl
-import errno
-import gzip
+from portage.util import writemsg_level
+from portage.util._async.PipeLogger import PipeLogger
class SpawnProcess(SubProcess):
@@ -23,31 +29,27 @@ class SpawnProcess(SubProcess):
_spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
"uid", "gid", "groups", "umask", "logfile",
- "path_lookup", "pre_exec")
+ "path_lookup", "pre_exec", "close_fds", "cgroup",
+ "unshare_ipc", "unshare_net")
__slots__ = ("args",) + \
- _spawn_kwarg_names + ("_log_file_real", "_selinux_type",)
-
- _file_names = ("log", "process", "stdout")
- _files_dict = slot_dict_class(_file_names, prefix="")
+ _spawn_kwarg_names + ("_pipe_logger", "_selinux_type",)
def _start(self):
if self.fd_pipes is None:
self.fd_pipes = {}
+ else:
+ self.fd_pipes = self.fd_pipes.copy()
fd_pipes = self.fd_pipes
- self._files = self._files_dict()
- files = self._files
-
master_fd, slave_fd = self._pipe(fd_pipes)
- fcntl.fcntl(master_fd, fcntl.F_SETFL,
- fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
- files.process = master_fd
- logfile = None
- if self._can_log(slave_fd):
- logfile = self.logfile
+ can_log = self._can_log(slave_fd)
+ if can_log:
+ log_file_path = self.logfile
+ else:
+ log_file_path = None
null_input = None
if not self.background or 0 in fd_pipes:
@@ -62,48 +64,34 @@ class SpawnProcess(SubProcess):
null_input = os.open('/dev/null', os.O_RDWR)
fd_pipes[0] = null_input
- fd_pipes.setdefault(0, sys.stdin.fileno())
- fd_pipes.setdefault(1, sys.stdout.fileno())
- fd_pipes.setdefault(2, sys.stderr.fileno())
+ fd_pipes.setdefault(0, portage._get_stdin().fileno())
+ fd_pipes.setdefault(1, sys.__stdout__.fileno())
+ fd_pipes.setdefault(2, sys.__stderr__.fileno())
# flush any pending output
+ stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
for fd in fd_pipes.values():
- if fd == sys.stdout.fileno():
- sys.stdout.flush()
- if fd == sys.stderr.fileno():
- sys.stderr.flush()
+ if fd in stdout_filenos:
+ sys.__stdout__.flush()
+ sys.__stderr__.flush()
+ break
- if logfile is not None:
+ fd_pipes_orig = fd_pipes.copy()
- fd_pipes_orig = fd_pipes.copy()
+ if log_file_path is not None or self.background:
fd_pipes[1] = slave_fd
fd_pipes[2] = slave_fd
- files.log = open(_unicode_encode(logfile,
- encoding=_encodings['fs'], errors='strict'), mode='ab')
- if logfile.endswith('.gz'):
- self._log_file_real = files.log
- files.log = gzip.GzipFile(filename='', mode='ab',
- fileobj=files.log)
-
- portage.util.apply_secpass_permissions(logfile,
- uid=portage.portage_uid, gid=portage.portage_gid,
- mode=0o660)
-
- if not self.background:
- files.stdout = os.dup(fd_pipes_orig[1])
-
- output_handler = self._output_handler
-
else:
-
- # Create a dummy pipe so the scheduler can monitor
- # the process from inside a poll() loop.
- fd_pipes[self._dummy_pipe_fd] = slave_fd
- if self.background:
- fd_pipes[1] = slave_fd
- fd_pipes[2] = slave_fd
- output_handler = self._dummy_handler
+ # Create a dummy pipe that PipeLogger uses to efficiently
+ # monitor for process exit by listening for the EOF event.
+ # Re-use of the allocated fd number for the key in fd_pipes
+ # guarantees that the keys will not collide for similarly
+ # allocated pipes which are used by callers such as
+ # FileDigester and MergeProcess. See the _setup_pipes
+ # docstring for more benefits of this allocation approach.
+ self._dummy_pipe_fd = slave_fd
+ fd_pipes[slave_fd] = slave_fd
kwargs = {}
for k in self._spawn_kwarg_names:
@@ -115,10 +103,6 @@ class SpawnProcess(SubProcess):
kwargs["returnpid"] = True
kwargs.pop("logfile", None)
- self._reg_id = self.scheduler.register(files.process,
- self._registered_events, output_handler)
- self._registered = True
-
retval = self._spawn(self.args, **kwargs)
os.close(slave_fd)
@@ -129,11 +113,32 @@ class SpawnProcess(SubProcess):
# spawn failed
self._unregister()
self._set_returncode((self.pid, retval))
- self.wait()
+ self._async_wait()
return
self.pid = retval[0]
- portage.process.spawned_pids.remove(self.pid)
+
+ stdout_fd = None
+ if can_log and not self.background:
+ stdout_fd = os.dup(fd_pipes_orig[1])
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000 and fcntl is not None:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(stdout_fd, fcntl.F_SETFD,
+ fcntl.fcntl(stdout_fd,
+ fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self._pipe_logger = PipeLogger(background=self.background,
+ scheduler=self.scheduler, input_fd=master_fd,
+ log_file_path=log_file_path,
+ stdout_fd=stdout_fd)
+ self._pipe_logger.addExitListener(self._pipe_logger_exit)
+ self._pipe_logger.start()
+ self._registered = True
def _can_log(self, slave_fd):
return True
@@ -157,92 +162,56 @@ class SpawnProcess(SubProcess):
return spawn_func(args, **kwargs)
- def _output_handler(self, fd, event):
-
- files = self._files
- while True:
- buf = self._read_buf(fd, event)
-
- if buf is None:
- # not a POLLIN event, EAGAIN, etc...
- break
-
- if not buf:
- # EOF
- self._unregister()
- self.wait()
- break
-
- else:
- if not self.background:
- write_successful = False
- failures = 0
- while True:
- try:
- if not write_successful:
- os.write(files.stdout, buf)
- write_successful = True
- break
- except OSError as e:
- if e.errno != errno.EAGAIN:
- raise
- del e
- failures += 1
- if failures > 50:
- # Avoid a potentially infinite loop. In
- # most cases, the failure count is zero
- # and it's unlikely to exceed 1.
- raise
-
- # This means that a subprocess has put an inherited
- # stdio file descriptor (typically stdin) into
- # O_NONBLOCK mode. This is not acceptable (see bug
- # #264435), so revert it. We need to use a loop
- # here since there's a race condition due to
- # parallel processes being able to change the
- # flags on the inherited file descriptor.
- # TODO: When possible, avoid having child processes
- # inherit stdio file descriptors from portage
- # (maybe it can't be avoided with
- # PROPERTIES=interactive).
- fcntl.fcntl(files.stdout, fcntl.F_SETFL,
- fcntl.fcntl(files.stdout,
- fcntl.F_GETFL) ^ os.O_NONBLOCK)
-
- files.log.write(buf)
- files.log.flush()
-
- self._unregister_if_appropriate(event)
-
- return True
-
- def _dummy_handler(self, fd, event):
- """
- This method is mainly interested in detecting EOF, since
- the only purpose of the pipe is to allow the scheduler to
- monitor the process from inside a poll() loop.
- """
-
- while True:
- buf = self._read_buf(fd, event)
-
- if buf is None:
- # not a POLLIN event, EAGAIN, etc...
- break
-
- if not buf:
- # EOF
- self._unregister()
- self.wait()
- break
-
- self._unregister_if_appropriate(event)
-
- return True
-
- def _unregister(self):
- super(SpawnProcess, self)._unregister()
- if self._log_file_real is not None:
- # Avoid "ResourceWarning: unclosed file" since python 3.2.
- self._log_file_real.close()
- self._log_file_real = None
+ def _pipe_logger_exit(self, pipe_logger):
+ self._pipe_logger = None
+ self._unregister()
+ self.wait()
+
+ def _waitpid_loop(self):
+ SubProcess._waitpid_loop(self)
+
+ pipe_logger = self._pipe_logger
+ if pipe_logger is not None:
+ self._pipe_logger = None
+ pipe_logger.removeExitListener(self._pipe_logger_exit)
+ pipe_logger.cancel()
+ pipe_logger.wait()
+
+ def _set_returncode(self, wait_retval):
+ SubProcess._set_returncode(self, wait_retval)
+
+ if self.cgroup:
+ def get_pids(cgroup):
+ try:
+ with open(os.path.join(cgroup, 'cgroup.procs'), 'r') as f:
+ return [int(p) for p in f.read().split()]
+ except OSError:
+ # cgroup removed already?
+ return []
+
+ def kill_all(pids, sig):
+ for p in pids:
+ try:
+ os.kill(p, sig)
+ except OSError as e:
+ if e.errno == errno.EPERM:
+ # Reported with hardened kernel (bug #358211).
+ writemsg_level(
+ "!!! kill: (%i) - Operation not permitted\n" %
+ (p,), level=logging.ERROR,
+ noiselevel=-1)
+ elif e.errno != errno.ESRCH:
+ raise
+
+ # step 1: kill all orphans
+ pids = get_pids(self.cgroup)
+ if pids:
+ kill_all(pids, signal.SIGKILL)
+
+ # step 2: remove the cgroup
+ try:
+ os.rmdir(self.cgroup)
+ except OSError:
+ # it may be removed already, or busy
+ # we can't do anything good about it
+ pass
diff --git a/pym/_emerge/SubProcess.py b/pym/_emerge/SubProcess.py
index 76b313fc2..13d938297 100644
--- a/pym/_emerge/SubProcess.py
+++ b/pym/_emerge/SubProcess.py
@@ -1,7 +1,10 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+import logging
+
from portage import os
+from portage.util import writemsg_level
from _emerge.AbstractPollTask import AbstractPollTask
import signal
import errno
@@ -9,12 +12,7 @@ import errno
class SubProcess(AbstractPollTask):
__slots__ = ("pid",) + \
- ("_files", "_reg_id")
-
- # A file descriptor is required for the scheduler to monitor changes from
- # inside a poll() loop. When logging is not enabled, create a pipe just to
- # serve this purpose alone.
- _dummy_pipe_fd = 9
+ ("_dummy_pipe_fd", "_files", "_reg_id")
# This is how much time we allow for waitpid to succeed after
# we've sent a kill signal to our subprocess.
@@ -50,7 +48,13 @@ class SubProcess(AbstractPollTask):
try:
os.kill(self.pid, signal.SIGTERM)
except OSError as e:
- if e.errno != errno.ESRCH:
+ if e.errno == errno.EPERM:
+ # Reported with hardened kernel (bug #358211).
+ writemsg_level(
+ "!!! kill: (%i) - Operation not permitted\n" %
+ (self.pid,), level=logging.ERROR,
+ noiselevel=-1)
+ elif e.errno != errno.ESRCH:
raise
def isAlive(self):
@@ -69,7 +73,13 @@ class SubProcess(AbstractPollTask):
try:
os.kill(self.pid, signal.SIGKILL)
except OSError as e:
- if e.errno != errno.ESRCH:
+ if e.errno == errno.EPERM:
+ # Reported with hardened kernel (bug #358211).
+ writemsg_level(
+ "!!! kill: (%i) - Operation not permitted\n" %
+ (self.pid,), level=logging.ERROR,
+ noiselevel=-1)
+ elif e.errno != errno.ESRCH:
raise
del e
self._wait_loop(timeout=self._cancel_timeout)
@@ -116,7 +126,7 @@ class SubProcess(AbstractPollTask):
self._registered = False
if self._reg_id is not None:
- self.scheduler.unregister(self._reg_id)
+ self.scheduler.source_remove(self._reg_id)
self._reg_id = None
if self._files is not None:
diff --git a/pym/_emerge/Task.py b/pym/_emerge/Task.py
index 40f5066c0..250d45802 100644
--- a/pym/_emerge/Task.py
+++ b/pym/_emerge/Task.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage.util.SlotObject import SlotObject
@@ -41,3 +41,10 @@ class Task(SlotObject):
strings.
"""
return "(%s)" % ", ".join(("'%s'" % x for x in self._hash_key))
+
+ def __repr__(self):
+ if self._hash_key is None:
+ # triggered by python-trace
+ return SlotObject.__repr__(self)
+ return "<%s (%s)>" % (self.__class__.__name__,
+ ", ".join(("'%s'" % x for x in self._hash_key)))
diff --git a/pym/_emerge/TaskScheduler.py b/pym/_emerge/TaskScheduler.py
deleted file mode 100644
index 583bfe323..000000000
--- a/pym/_emerge/TaskScheduler.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 1999-2012 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from _emerge.QueueScheduler import QueueScheduler
-from _emerge.SequentialTaskQueue import SequentialTaskQueue
-
-class TaskScheduler(object):
-
- """
- A simple way to handle scheduling of AsynchrousTask instances. Simply
- add tasks and call run(). The run() method returns when no tasks remain.
- """
-
- def __init__(self, main=True, max_jobs=None, max_load=None):
- self._queue = SequentialTaskQueue(max_jobs=max_jobs)
- self._scheduler = QueueScheduler(main=main,
- max_jobs=max_jobs, max_load=max_load)
- self.sched_iface = self._scheduler.sched_iface
- self.run = self._scheduler.run
- self.clear = self._scheduler.clear
- self.wait = self._queue.wait
- self._scheduler.add(self._queue)
-
- def add(self, task):
- self._queue.add(task)
-
diff --git a/pym/_emerge/UnmergeDepPriority.py b/pym/_emerge/UnmergeDepPriority.py
index 43166006f..ec44a67a1 100644
--- a/pym/_emerge/UnmergeDepPriority.py
+++ b/pym/_emerge/UnmergeDepPriority.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.AbstractDepPriority import AbstractDepPriority
@@ -7,15 +7,16 @@ class UnmergeDepPriority(AbstractDepPriority):
"""
Combination of properties Priority Category
- runtime 0 HARD
- runtime_post -1 HARD
- buildtime -2 SOFT
- (none of the above) -2 SOFT
+ runtime_slot_op 0 HARD
+ runtime -1 HARD
+ runtime_post -2 HARD
+ buildtime -3 SOFT
+ (none of the above) -3 SOFT
"""
MAX = 0
- SOFT = -2
- MIN = -2
+ SOFT = -3
+ MIN = -3
def __init__(self, **kwargs):
AbstractDepPriority.__init__(self, **kwargs)
@@ -23,17 +24,21 @@ class UnmergeDepPriority(AbstractDepPriority):
self.optional = True
def __int__(self):
- if self.runtime:
+ if self.runtime_slot_op:
return 0
- if self.runtime_post:
+ if self.runtime:
return -1
- if self.buildtime:
+ if self.runtime_post:
return -2
- return -2
+ if self.buildtime:
+ return -3
+ return -3
def __str__(self):
if self.ignored:
return "ignored"
+ if self.runtime_slot_op:
+ return "hard slot op"
myvalue = self.__int__()
if myvalue > self.SOFT:
return "hard"
diff --git a/pym/_emerge/UseFlagDisplay.py b/pym/_emerge/UseFlagDisplay.py
index 3daca19e1..f46047454 100644
--- a/pym/_emerge/UseFlagDisplay.py
+++ b/pym/_emerge/UseFlagDisplay.py
@@ -1,10 +1,12 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
from itertools import chain
import sys
-from portage import _encodings, _unicode_decode, _unicode_encode
+from portage import _encodings, _unicode_encode
from portage.output import red
from portage.util import cmp_sort_key
from portage.output import blue
@@ -114,9 +116,9 @@ def pkg_use_display(pkg, opts, modified_use=None):
flags.sort(key=UseFlagDisplay.sort_combined)
else:
flags.sort(key=UseFlagDisplay.sort_separated)
- # Use _unicode_decode() to force unicode format string so
+ # Use unicode_literals to force unicode format string so
# that UseFlagDisplay.__unicode__() is called in python2.
flag_displays.append('%s="%s"' % (varname,
- ' '.join(_unicode_decode("%s") % (f,) for f in flags)))
+ ' '.join("%s" % (f,) for f in flags)))
return ' '.join(flag_displays)
diff --git a/pym/_emerge/actions.py b/pym/_emerge/actions.py
index 9a023a84a..2a1354b6b 100644
--- a/pym/_emerge/actions.py
+++ b/pym/_emerge/actions.py
@@ -1,7 +1,7 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from __future__ import print_function
+from __future__ import print_function, unicode_literals
import errno
import logging
@@ -18,27 +18,35 @@ import sys
import tempfile
import textwrap
import time
+import warnings
from itertools import chain
import portage
portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.dbapi._similar_name_search:similar_name_search',
+ 'portage.debug',
'portage.news:count_unread_news,display_news_notifications',
+ 'portage.util._get_vm_info:get_vm_info',
+ '_emerge.chk_updated_cfg_files:chk_updated_cfg_files',
+ '_emerge.help:help@emerge_help',
+ '_emerge.post_emerge:display_news_notification,post_emerge',
+ '_emerge.stdout_spinner:stdout_spinner',
)
from portage.localization import _
from portage import os
from portage import shutil
-from portage import eapi_is_supported, _unicode_decode
+from portage import eapi_is_supported, _encodings, _unicode_decode
from portage.cache.cache_errors import CacheError
-from portage.const import GLOBAL_CONFIG_PATH
-from portage.const import _ENABLE_DYN_LINK_MAP
+from portage.const import GLOBAL_CONFIG_PATH, VCS_DIRS, _DEPCLEAN_LIB_CHECK_DEFAULT
+from portage.const import SUPPORTED_BINPKG_FORMATS, TIMESTAMP_FORMAT
from portage.dbapi.dep_expand import dep_expand
from portage.dbapi._expand_new_virt import expand_new_virt
from portage.dep import Atom
from portage.eclass_cache import hashed_path
-from portage.exception import InvalidAtom, InvalidData
+from portage.exception import InvalidAtom, InvalidData, ParseError
from portage.output import blue, bold, colorize, create_color_func, darkgreen, \
- red, yellow
+ red, xtermTitle, xtermTitleReset, yellow
good = create_color_func("GOOD")
bad = create_color_func("BAD")
warn = create_color_func("WARN")
@@ -46,9 +54,13 @@ from portage.package.ebuild._ipc.QueryCommand import QueryCommand
from portage.package.ebuild.doebuild import _check_temp_dir
from portage._sets import load_default_config, SETPREFIX
from portage._sets.base import InternalPackageSet
-from portage.util import cmp_sort_key, writemsg, \
+from portage.util import cmp_sort_key, writemsg, varexpand, \
writemsg_level, writemsg_stdout
from portage.util.digraph import digraph
+from portage.util.SlotObject import SlotObject
+from portage.util._async.run_main_scheduler import run_main_scheduler
+from portage.util._async.SchedulerInterface import SchedulerInterface
+from portage.util._eventloop.global_event_loop import global_event_loop
from portage._global_updates import _global_updates
from _emerge.clear_caches import clear_caches
@@ -277,8 +289,14 @@ def action_build(settings, trees, mtimedb,
"dropped due to\n" + \
"!!! masking or unsatisfied dependencies:\n\n",
noiselevel=-1)
- for task in dropped_tasks:
- portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
+ for task, atoms in dropped_tasks.items():
+ if not atoms:
+ writemsg(" %s is masked or unavailable\n" %
+ (task,), noiselevel=-1)
+ else:
+ writemsg(" %s requires %s\n" %
+ (task, ", ".join(atoms)), noiselevel=-1)
+
portage.writemsg("\n", noiselevel=-1)
del dropped_tasks
else:
@@ -309,6 +327,7 @@ def action_build(settings, trees, mtimedb,
mydepgraph.display_problems()
return 1
+ mergecount = None
if "--pretend" not in myopts and \
("--ask" in myopts or "--tree" in myopts or \
"--verbose" in myopts) and \
@@ -320,7 +339,7 @@ def action_build(settings, trees, mtimedb,
return os.EX_OK
favorites = mtimedb["resume"]["favorites"]
retval = mydepgraph.display(
- mydepgraph.altlist(reversed=tree),
+ mydepgraph.altlist(),
favorites=favorites)
mydepgraph.display_problems()
mergelist_shown = True
@@ -329,7 +348,7 @@ def action_build(settings, trees, mtimedb,
prompt="Would you like to resume merging these packages?"
else:
retval = mydepgraph.display(
- mydepgraph.altlist(reversed=("--tree" in myopts)),
+ mydepgraph.altlist(),
favorites=favorites)
mydepgraph.display_problems()
mergelist_shown = True
@@ -340,6 +359,7 @@ def action_build(settings, trees, mtimedb,
if isinstance(x, Package) and x.operation == "merge":
mergecount += 1
+ prompt = None
if mergecount==0:
sets = trees[settings['EROOT']]['root_config'].sets
world_candidates = None
@@ -352,14 +372,11 @@ def action_build(settings, trees, mtimedb,
world_candidates = [x for x in favorites \
if not (x.startswith(SETPREFIX) and \
not sets[x[1:]].world_candidate)]
+
if "selective" in myparams and \
not oneshot and world_candidates:
- print()
- for x in world_candidates:
- print(" %s %s" % (good("*"), x))
- prompt="Would you like to add these packages to your world favorites?"
- elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
- prompt="Nothing to merge; would you like to auto-clean packages?"
+ # Prompt later, inside saveNomergeFavorites.
+ prompt = None
else:
print()
print("Nothing to merge; quitting.")
@@ -370,13 +387,15 @@ def action_build(settings, trees, mtimedb,
else:
prompt="Would you like to merge these packages?"
print()
- if "--ask" in myopts and userquery(prompt, enter_invalid) == "No":
+ if prompt is not None and "--ask" in myopts and \
+ userquery(prompt, enter_invalid) == "No":
print()
print("Quitting.")
print()
return 128 + signal.SIGINT
# Don't ask again (e.g. when auto-cleaning packages after merge)
- myopts.pop("--ask", None)
+ if mergecount != 0:
+ myopts.pop("--ask", None)
if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
if ("--resume" in myopts):
@@ -386,7 +405,7 @@ def action_build(settings, trees, mtimedb,
return os.EX_OK
favorites = mtimedb["resume"]["favorites"]
retval = mydepgraph.display(
- mydepgraph.altlist(reversed=tree),
+ mydepgraph.altlist(),
favorites=favorites)
mydepgraph.display_problems()
mergelist_shown = True
@@ -394,39 +413,14 @@ def action_build(settings, trees, mtimedb,
return retval
else:
retval = mydepgraph.display(
- mydepgraph.altlist(reversed=("--tree" in myopts)),
+ mydepgraph.altlist(),
favorites=favorites)
mydepgraph.display_problems()
mergelist_shown = True
if retval != os.EX_OK:
return retval
- if "--buildpkgonly" in myopts:
- graph_copy = mydepgraph._dynamic_config.digraph.copy()
- removed_nodes = set()
- for node in graph_copy:
- if not isinstance(node, Package) or \
- node.operation == "nomerge":
- removed_nodes.add(node)
- graph_copy.difference_update(removed_nodes)
- if not graph_copy.hasallzeros(ignore_priority = \
- DepPrioritySatisfiedRange.ignore_medium):
- print("\n!!! --buildpkgonly requires all dependencies to be merged.")
- print("!!! You have to merge the dependencies before you can build this package.\n")
- return 1
+
else:
- if "--buildpkgonly" in myopts:
- graph_copy = mydepgraph._dynamic_config.digraph.copy()
- removed_nodes = set()
- for node in graph_copy:
- if not isinstance(node, Package) or \
- node.operation == "nomerge":
- removed_nodes.add(node)
- graph_copy.difference_update(removed_nodes)
- if not graph_copy.hasallzeros(ignore_priority = \
- DepPrioritySatisfiedRange.ignore_medium):
- print("\n!!! --buildpkgonly requires all dependencies to be merged.")
- print("!!! Cannot merge requested packages. Merge deps and try again.\n")
- return 1
if not mergelist_shown:
# If we haven't already shown the merge list above, at
@@ -446,25 +440,29 @@ def action_build(settings, trees, mtimedb,
mydepgraph.saveNomergeFavorites()
- mergetask = Scheduler(settings, trees, mtimedb, myopts,
- spinner, favorites=favorites,
- graph_config=mydepgraph.schedulerGraph())
-
- del mydepgraph
- clear_caches(trees)
-
- retval = mergetask.merge()
-
- if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
- if "yes" == settings.get("AUTOCLEAN"):
- portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
- unmerge(trees[settings['EROOT']]['root_config'],
- myopts, "clean", [],
- ldpath_mtimes, autoclean=1)
- else:
- portage.writemsg_stdout(colorize("WARN", "WARNING:")
- + " AUTOCLEAN is disabled. This can cause serious"
- + " problems due to overlapping packages.\n")
+ if mergecount == 0:
+ retval = os.EX_OK
+ else:
+ mergetask = Scheduler(settings, trees, mtimedb, myopts,
+ spinner, favorites=favorites,
+ graph_config=mydepgraph.schedulerGraph())
+
+ del mydepgraph
+ clear_caches(trees)
+
+ retval = mergetask.merge()
+
+ if retval == os.EX_OK and \
+ not (buildpkgonly or fetchonly or pretend):
+ if "yes" == settings.get("AUTOCLEAN"):
+ portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
+ unmerge(trees[settings['EROOT']]['root_config'],
+ myopts, "clean", [],
+ ldpath_mtimes, autoclean=1)
+ else:
+ portage.writemsg_stdout(colorize("WARN", "WARNING:")
+ + " AUTOCLEAN is disabled. This can cause serious"
+ + " problems due to overlapping packages.\n")
return retval
@@ -544,7 +542,8 @@ def action_depclean(settings, trees, ldpath_mtimes,
# specific packages.
msg = []
- if not _ENABLE_DYN_LINK_MAP:
+ if "preserve-libs" not in settings.features and \
+ not myopts.get("--depclean-lib-check", _DEPCLEAN_LIB_CHECK_DEFAULT) != "n":
msg.append("Depclean may break link level dependencies. Thus, it is\n")
msg.append("recommended to use a tool such as " + good("`revdep-rebuild`") + " (from\n")
msg.append("app-portage/gentoolkit) in order to detect such breakage.\n")
@@ -610,11 +609,17 @@ def action_depclean(settings, trees, ldpath_mtimes,
if not cleanlist and "--quiet" in myopts:
return rval
+ set_atoms = {}
+ for k in ("system", "selected"):
+ try:
+ set_atoms[k] = root_config.setconfig.getSetAtoms(k)
+ except portage.exception.PackageSetNotFound:
+ # A nested set could not be resolved, so ignore nested sets.
+ set_atoms[k] = root_config.sets[k].getAtoms()
+
print("Packages installed: " + str(len(vardb.cpv_all())))
- print("Packages in world: " + \
- str(len(root_config.sets["selected"].getAtoms())))
- print("Packages in system: " + \
- str(len(root_config.sets["system"].getAtoms())))
+ print("Packages in world: %d" % len(set_atoms["selected"]))
+ print("Packages in system: %d" % len(set_atoms["system"]))
print("Required packages: "+str(req_pkg_count))
if "--pretend" in myopts:
print("Number to remove: "+str(len(cleanlist)))
@@ -647,13 +652,21 @@ def calc_depclean(settings, trees, ldpath_mtimes,
required_sets[protected_set_name] = protected_set
system_set = psets["system"]
- if not system_set or not selected_set:
+ set_atoms = {}
+ for k in ("system", "selected"):
+ try:
+ set_atoms[k] = root_config.setconfig.getSetAtoms(k)
+ except portage.exception.PackageSetNotFound:
+ # A nested set could not be resolved, so ignore nested sets.
+ set_atoms[k] = root_config.sets[k].getAtoms()
+
+ if not set_atoms["system"] or not set_atoms["selected"]:
- if not system_set:
+ if not set_atoms["system"]:
writemsg_level("!!! You have no system list.\n",
level=logging.ERROR, noiselevel=-1)
- if not selected_set:
+ if not set_atoms["selected"]:
writemsg_level("!!! You have no world file.\n",
level=logging.WARNING, noiselevel=-1)
@@ -697,7 +710,7 @@ def calc_depclean(settings, trees, ldpath_mtimes,
continue
except portage.exception.InvalidDependString as e:
show_invalid_depstring_notice(pkg,
- pkg.metadata["PROVIDE"], str(e))
+ pkg._metadata["PROVIDE"], _unicode(e))
del e
protected_set.add("=" + pkg.cpv)
continue
@@ -751,7 +764,7 @@ def calc_depclean(settings, trees, ldpath_mtimes,
continue
except portage.exception.InvalidDependString as e:
show_invalid_depstring_notice(pkg,
- pkg.metadata["PROVIDE"], str(e))
+ pkg._metadata["PROVIDE"], _unicode(e))
del e
protected_set.add("=" + pkg.cpv)
continue
@@ -769,7 +782,7 @@ def calc_depclean(settings, trees, ldpath_mtimes,
required_sets['__excluded__'].add("=" + pkg.cpv)
except portage.exception.InvalidDependString as e:
show_invalid_depstring_notice(pkg,
- pkg.metadata["PROVIDE"], str(e))
+ pkg._metadata["PROVIDE"], _unicode(e))
del e
required_sets['__excluded__'].add("=" + pkg.cpv)
@@ -805,7 +818,12 @@ def calc_depclean(settings, trees, ldpath_mtimes,
msg.append("the following required packages not being installed:")
msg.append("")
for atom, parent in unresolvable:
- msg.append(" %s pulled in by:" % (atom,))
+ if atom != atom.unevaluated_atom and \
+ vardb.match(_unicode(atom)):
+ msg.append(" %s (%s) pulled in by:" %
+ (atom.unevaluated_atom, atom))
+ else:
+ msg.append(" %s pulled in by:" % (atom,))
msg.append(" %s" % (parent,))
msg.append("")
msg.extend(textwrap.wrap(
@@ -848,15 +866,27 @@ def calc_depclean(settings, trees, ldpath_mtimes,
required_pkgs_total += 1
def show_parents(child_node):
- parent_nodes = graph.parent_nodes(child_node)
- if not parent_nodes:
+ parent_atoms = \
+ resolver._dynamic_config._parent_atoms.get(child_node, [])
+
+ # Never display the special internal protected_set.
+ parent_atoms = [parent_atom for parent_atom in parent_atoms
+ if not (isinstance(parent_atom[0], SetArg) and
+ parent_atom[0].name == protected_set_name)]
+
+ if not parent_atoms:
# With --prune, the highest version can be pulled in without any
# real parent since all installed packages are pulled in. In that
# case there's nothing to show here.
return
+ parent_atom_dict = {}
+ for parent, atom in parent_atoms:
+ parent_atom_dict.setdefault(parent, []).append(atom)
+
parent_strs = []
- for node in parent_nodes:
- parent_strs.append(str(getattr(node, "cpv", node)))
+ for parent, atoms in parent_atom_dict.items():
+ parent_strs.append("%s requires %s" %
+ (getattr(parent, "cpv", parent), ", ".join(atoms)))
parent_strs.sort()
msg = []
msg.append(" %s pulled in by:\n" % (child_node.cpv,))
@@ -881,12 +911,6 @@ def calc_depclean(settings, trees, ldpath_mtimes,
graph.debug_print()
writemsg("\n", noiselevel=-1)
- # Never display the special internal protected_set.
- for node in graph:
- if isinstance(node, SetArg) and node.name == protected_set_name:
- graph.remove(node)
- break
-
pkgs_to_remove = []
if action == "depclean":
@@ -939,10 +963,19 @@ def calc_depclean(settings, trees, ldpath_mtimes,
cleanlist = create_cleanlist()
clean_set = set(cleanlist)
- if cleanlist and \
- real_vardb._linkmap is not None and \
- myopts.get("--depclean-lib-check") != "n" and \
- "preserve-libs" not in settings.features:
+ depclean_lib_check = cleanlist and real_vardb._linkmap is not None and \
+ myopts.get("--depclean-lib-check", _DEPCLEAN_LIB_CHECK_DEFAULT) != "n"
+ preserve_libs = "preserve-libs" in settings.features
+ preserve_libs_restrict = False
+
+ if depclean_lib_check and preserve_libs:
+ for pkg in cleanlist:
+ if "preserve-libs" in pkg.restrict:
+ preserve_libs_restrict = True
+ break
+
+ if depclean_lib_check and \
+ (preserve_libs_restrict or not preserve_libs):
# Check if any of these packages are the sole providers of libraries
# with consumers that have not been selected for removal. If so, these
@@ -955,6 +988,13 @@ def calc_depclean(settings, trees, ldpath_mtimes,
writemsg_level(">>> Checking for lib consumers...\n")
for pkg in cleanlist:
+
+ if preserve_libs and "preserve-libs" not in pkg.restrict:
+ # Any needed libraries will be preserved
+ # when this package is unmerged, so there's
+ # no need to account for it here.
+ continue
+
pkg_dblink = real_vardb._dblink(pkg.cpv)
consumers = {}
@@ -1109,7 +1149,8 @@ def calc_depclean(settings, trees, ldpath_mtimes,
"installed", root_config, installed=True)
if not resolver._add_pkg(pkg,
Dependency(parent=consumer_pkg,
- priority=UnmergeDepPriority(runtime=True),
+ priority=UnmergeDepPriority(runtime=True,
+ runtime_slot_op=True),
root=pkg.root)):
resolver.display_problems()
return 1, [], False, 0
@@ -1146,30 +1187,30 @@ def calc_depclean(settings, trees, ldpath_mtimes,
graph = digraph()
del cleanlist[:]
- dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
runtime = UnmergeDepPriority(runtime=True)
runtime_post = UnmergeDepPriority(runtime_post=True)
buildtime = UnmergeDepPriority(buildtime=True)
priority_map = {
"RDEPEND": runtime,
"PDEPEND": runtime_post,
+ "HDEPEND": buildtime,
"DEPEND": buildtime,
}
for node in clean_set:
graph.add(node, None)
- for dep_type in dep_keys:
- depstr = node.metadata[dep_type]
+ for dep_type in Package._dep_keys:
+ depstr = node._metadata[dep_type]
if not depstr:
continue
priority = priority_map[dep_type]
if debug:
- writemsg_level(_unicode_decode("\nParent: %s\n") \
+ writemsg_level("\nParent: %s\n"
% (node,), noiselevel=-1, level=logging.DEBUG)
- writemsg_level(_unicode_decode( "Depstring: %s\n") \
+ writemsg_level( "Depstring: %s\n"
% (depstr,), noiselevel=-1, level=logging.DEBUG)
- writemsg_level(_unicode_decode( "Priority: %s\n") \
+ writemsg_level( "Priority: %s\n"
% (priority,), noiselevel=-1, level=logging.DEBUG)
try:
@@ -1183,7 +1224,7 @@ def calc_depclean(settings, trees, ldpath_mtimes,
if debug:
writemsg_level("Candidates: [%s]\n" % \
- ', '.join(_unicode_decode("'%s'") % (x,) for x in atoms),
+ ', '.join("'%s'" % (x,) for x in atoms),
noiselevel=-1, level=logging.DEBUG)
for atom in atoms:
@@ -1197,7 +1238,15 @@ def calc_depclean(settings, trees, ldpath_mtimes,
continue
for child_node in matches:
if child_node in clean_set:
- graph.add(child_node, node, priority=priority)
+
+ mypriority = priority.copy()
+ if atom.slot_operator_built:
+ if mypriority.buildtime:
+ mypriority.buildtime_slot_op = True
+ if mypriority.runtime:
+ mypriority.runtime_slot_op = True
+
+ graph.add(child_node, node, priority=mypriority)
if debug:
writemsg_level("\nunmerge digraph:\n\n",
@@ -1277,11 +1326,8 @@ def action_deselect(settings, trees, opts, atoms):
allow_repo=True, allow_wildcard=True))
for cpv in vardb.match(atom):
- slot, = vardb.aux_get(cpv, ["SLOT"])
- if not slot:
- slot = "0"
- expanded_atoms.add(Atom("%s:%s" % \
- (portage.cpv_getkey(cpv), slot)))
+ pkg = vardb._pkg_str(cpv, None)
+ expanded_atoms.add(Atom("%s:%s" % (pkg.cp, pkg.slot)))
discard_atoms = set()
for atom in world_set:
@@ -1352,10 +1398,90 @@ class _info_pkgs_ver(object):
def action_info(settings, trees, myopts, myfiles):
+ # See if we can find any packages installed matching the strings
+ # passed on the command line
+ mypkgs = []
+ eroot = settings['EROOT']
+ vardb = trees[eroot]["vartree"].dbapi
+ portdb = trees[eroot]['porttree'].dbapi
+ bindb = trees[eroot]["bintree"].dbapi
+ for x in myfiles:
+ any_match = False
+ cp_exists = bool(vardb.match(x.cp))
+ installed_match = vardb.match(x)
+ for installed in installed_match:
+ mypkgs.append((installed, "installed"))
+ any_match = True
+
+ if any_match:
+ continue
+
+ for db, pkg_type in ((portdb, "ebuild"), (bindb, "binary")):
+ if pkg_type == "binary" and "--usepkg" not in myopts:
+ continue
+
+ # Use match instead of cp_list, to account for old-style virtuals.
+ if not cp_exists and db.match(x.cp):
+ cp_exists = True
+ # Search for masked packages too.
+ if not cp_exists and hasattr(db, "xmatch") and \
+ db.xmatch("match-all", x.cp):
+ cp_exists = True
+
+ matches = db.match(x)
+ matches.reverse()
+ for match in matches:
+ if pkg_type == "binary":
+ if db.bintree.isremote(match):
+ continue
+ auxkeys = ["EAPI", "DEFINED_PHASES"]
+ metadata = dict(zip(auxkeys, db.aux_get(match, auxkeys)))
+ if metadata["EAPI"] not in ("0", "1", "2", "3") and \
+ "info" in metadata["DEFINED_PHASES"].split():
+ mypkgs.append((match, pkg_type))
+ break
+
+ if not cp_exists:
+ xinfo = '"%s"' % x.unevaluated_atom
+ # Discard null/ from failed cpv_expand category expansion.
+ xinfo = xinfo.replace("null/", "")
+ if settings["ROOT"] != "/":
+ xinfo = "%s for %s" % (xinfo, eroot)
+ writemsg("\nemerge: there are no ebuilds to satisfy %s.\n" %
+ colorize("INFORM", xinfo), noiselevel=-1)
+
+ if myopts.get("--misspell-suggestions", "y") != "n":
+
+ writemsg("\nemerge: searching for similar names..."
+ , noiselevel=-1)
+
+ dbs = [vardb]
+ #if "--usepkgonly" not in myopts:
+ dbs.append(portdb)
+ if "--usepkg" in myopts:
+ dbs.append(bindb)
+
+ matches = similar_name_search(dbs, x)
+
+ if len(matches) == 1:
+ writemsg("\nemerge: Maybe you meant " + matches[0] + "?\n"
+ , noiselevel=-1)
+ elif len(matches) > 1:
+ writemsg(
+ "\nemerge: Maybe you meant any of these: %s?\n" % \
+ (", ".join(matches),), noiselevel=-1)
+ else:
+ # Generally, this would only happen if
+ # all dbapis are empty.
+ writemsg(" nothing similar found.\n"
+ , noiselevel=-1)
+
+ return 1
+
output_buffer = []
append = output_buffer.append
root_config = trees[settings['EROOT']]['root_config']
- running_eroot = trees._running_eroot
+ chost = settings.get("CHOST")
append(getportageversion(settings["PORTDIR"], None,
settings.profile_path, settings["CHOST"],
@@ -1369,6 +1495,18 @@ def action_info(settings, trees, myopts, myfiles):
append(header_width * "=")
append("System uname: %s" % (platform.platform(aliased=1),))
+ vm_info = get_vm_info()
+ if "ram.total" in vm_info:
+ line = "%-9s %10d total" % ("KiB Mem:", vm_info["ram.total"] / 1024)
+ if "ram.free" in vm_info:
+ line += ",%10d free" % (vm_info["ram.free"] / 1024,)
+ append(line)
+ if "swap.total" in vm_info:
+ line = "%-9s %10d total" % ("KiB Swap:", vm_info["swap.total"] / 1024)
+ if "swap.free" in vm_info:
+ line += ",%10d free" % (vm_info["swap.free"] / 1024,)
+ append(line)
+
lastSync = portage.grabfile(os.path.join(
settings["PORTDIR"], "metadata", "timestamp.chk"))
if lastSync:
@@ -1377,6 +1515,23 @@ def action_info(settings, trees, myopts, myfiles):
lastSync = "Unknown"
append("Timestamp of tree: %s" % (lastSync,))
+ ld_names = []
+ if chost:
+ ld_names.append(chost + "-ld")
+ ld_names.append("ld")
+ for name in ld_names:
+ try:
+ proc = subprocess.Popen([name, "--version"],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ pass
+ else:
+ output = _unicode_decode(proc.communicate()[0]).splitlines()
+ proc.wait()
+ if proc.wait() == os.EX_OK and output:
+ append("ld %s" % (output[0]))
+ break
+
try:
proc = subprocess.Popen(["distcc", "--version"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
@@ -1413,7 +1568,6 @@ def action_info(settings, trees, myopts, myfiles):
"sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
atoms = []
- vardb = trees[running_eroot]['vartree'].dbapi
for x in myvars:
try:
x = Atom(x)
@@ -1426,7 +1580,6 @@ def action_info(settings, trees, myopts, myfiles):
myvars = sorted(set(atoms))
- portdb = trees[running_eroot]['porttree'].dbapi
main_repo = portdb.getRepositoryName(portdb.porttree_root)
cp_map = {}
cp_max_len = 0
@@ -1493,7 +1646,7 @@ def action_info(settings, trees, myopts, myfiles):
'PORTDIR_OVERLAY', 'PORTAGE_BUNZIP2_COMMAND',
'PORTAGE_BZIP2_COMMAND',
'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
- 'ACCEPT_KEYWORDS', 'ACCEPT_LICENSE', 'SYNC', 'FEATURES',
+ 'ACCEPT_KEYWORDS', 'ACCEPT_LICENSE', 'FEATURES',
'EMERGE_DEFAULT_OPTS']
myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
@@ -1539,40 +1692,7 @@ def action_info(settings, trees, myopts, myfiles):
append("")
writemsg_stdout("\n".join(output_buffer),
noiselevel=-1)
-
- # See if we can find any packages installed matching the strings
- # passed on the command line
- mypkgs = []
- eroot = settings['EROOT']
- vardb = trees[eroot]["vartree"].dbapi
- portdb = trees[eroot]['porttree'].dbapi
- bindb = trees[eroot]["bintree"].dbapi
- for x in myfiles:
- match_found = False
- installed_match = vardb.match(x)
- for installed in installed_match:
- mypkgs.append((installed, "installed"))
- match_found = True
-
- if match_found:
- continue
-
- for db, pkg_type in ((portdb, "ebuild"), (bindb, "binary")):
- if pkg_type == "binary" and "--usepkg" not in myopts:
- continue
-
- matches = db.match(x)
- matches.reverse()
- for match in matches:
- if pkg_type == "binary":
- if db.bintree.isremote(match):
- continue
- auxkeys = ["EAPI", "DEFINED_PHASES"]
- metadata = dict(zip(auxkeys, db.aux_get(match, auxkeys)))
- if metadata["EAPI"] not in ("0", "1", "2", "3") and \
- "info" in metadata["DEFINED_PHASES"].split():
- mypkgs.append((match, pkg_type))
- break
+ del output_buffer[:]
# If some packages were found...
if mypkgs:
@@ -1586,11 +1706,15 @@ def action_info(settings, trees, myopts, myfiles):
# Loop through each package
# Only print settings if they differ from global settings
header_title = "Package Settings"
- print(header_width * "=")
- print(header_title.rjust(int(header_width/2 + len(header_title)/2)))
- print(header_width * "=")
- from portage.output import EOutput
- out = EOutput()
+ append(header_width * "=")
+ append(header_title.rjust(int(header_width/2 + len(header_title)/2)))
+ append(header_width * "=")
+ append("")
+ writemsg_stdout("\n".join(output_buffer),
+ noiselevel=-1)
+ del output_buffer[:]
+
+ out = portage.output.EOutput()
for mypkg in mypkgs:
cpv = mypkg[0]
pkg_type = mypkg[1]
@@ -1608,28 +1732,32 @@ def action_info(settings, trees, myopts, myfiles):
root_config=root_config, type_name=pkg_type)
if pkg_type == "installed":
- print("\n%s was built with the following:" % \
+ append("\n%s was built with the following:" % \
colorize("INFORM", str(pkg.cpv)))
elif pkg_type == "ebuild":
- print("\n%s would be build with the following:" % \
+ append("\n%s would be build with the following:" % \
colorize("INFORM", str(pkg.cpv)))
elif pkg_type == "binary":
- print("\n%s (non-installed binary) was built with the following:" % \
+ append("\n%s (non-installed binary) was built with the following:" % \
colorize("INFORM", str(pkg.cpv)))
- writemsg_stdout('%s\n' % pkg_use_display(pkg, myopts),
- noiselevel=-1)
+ append('%s' % pkg_use_display(pkg, myopts))
if pkg_type == "installed":
for myvar in mydesiredvars:
if metadata[myvar].split() != settings.get(myvar, '').split():
- print("%s=\"%s\"" % (myvar, metadata[myvar]))
- print()
+ append("%s=\"%s\"" % (myvar, metadata[myvar]))
+ append("")
+ append("")
+ writemsg_stdout("\n".join(output_buffer),
+ noiselevel=-1)
+ del output_buffer[:]
if metadata['DEFINED_PHASES']:
if 'info' not in metadata['DEFINED_PHASES'].split():
continue
- print(">>> Attempting to run pkg_info() for '%s'" % pkg.cpv)
+ writemsg_stdout(">>> Attempting to run pkg_info() for '%s'\n"
+ % pkg.cpv, noiselevel=-1)
if pkg_type == "installed":
ebuildpath = vardb.findname(pkg.cpv)
@@ -1856,6 +1984,7 @@ def action_metadata(settings, portdb, myopts, porttrees=None):
print()
signal.signal(signal.SIGWINCH, signal.SIG_DFL)
+ portdb.flush_cache()
sys.stdout.flush()
os.umask(old_umask)
@@ -1865,35 +1994,12 @@ def action_regen(settings, portdb, max_jobs, max_load):
#regenerate cache entries
sys.stdout.flush()
- regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
- received_signal = []
-
- def emergeexitsig(signum, frame):
- signal.signal(signal.SIGINT, signal.SIG_IGN)
- signal.signal(signal.SIGTERM, signal.SIG_IGN)
- portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % \
- {"signal":signum})
- regen.terminate()
- received_signal.append(128 + signum)
-
- earlier_sigint_handler = signal.signal(signal.SIGINT, emergeexitsig)
- earlier_sigterm_handler = signal.signal(signal.SIGTERM, emergeexitsig)
+ regen = MetadataRegen(portdb, max_jobs=max_jobs,
+ max_load=max_load, main=True)
- try:
- regen.run()
- finally:
- # Restore previous handlers
- if earlier_sigint_handler is not None:
- signal.signal(signal.SIGINT, earlier_sigint_handler)
- else:
- signal.signal(signal.SIGINT, signal.SIG_DFL)
- if earlier_sigterm_handler is not None:
- signal.signal(signal.SIGTERM, earlier_sigterm_handler)
- else:
- signal.signal(signal.SIGTERM, signal.SIG_DFL)
-
- if received_signal:
- sys.exit(received_signal[0])
+ signum = run_main_scheduler(regen)
+ if signum is not None:
+ sys.exit(128 + signum)
portage.writemsg_stdout("done!\n")
return regen.returncode
@@ -1914,37 +2020,110 @@ def action_search(root_config, myopts, myfiles, spinner):
sys.exit(1)
searchinstance.output()
-def action_sync(settings, trees, mtimedb, myopts, myaction):
+def action_sync(emerge_config, trees=DeprecationWarning,
+ mtimedb=DeprecationWarning, opts=DeprecationWarning,
+ action=DeprecationWarning):
+
+ if not isinstance(emerge_config, _emerge_config):
+ warnings.warn("_emerge.actions.action_sync() now expects "
+ "an _emerge_config instance as the first parameter",
+ DeprecationWarning, stacklevel=2)
+ emerge_config = load_emerge_config(
+ action=action, args=[], trees=trees, opts=opts)
+
+ xterm_titles = "notitles" not in \
+ emerge_config.target_config.settings.features
+ emergelog(xterm_titles, " === sync")
+
+ selected_repos = []
+ unknown_repo_names = []
+ missing_sync_type = []
+ if emerge_config.args:
+ for repo_name in emerge_config.args:
+ try:
+ repo = emerge_config.target_config.settings.repositories[repo_name]
+ except KeyError:
+ unknown_repo_names.append(repo_name)
+ else:
+ selected_repos.append(repo)
+ if repo.sync_type is None:
+ missing_sync_type.append(repo)
+
+ if unknown_repo_names:
+ writemsg_level("!!! %s\n" % _("Unknown repo(s): %s") %
+ " ".join(unknown_repo_names),
+ level=logging.ERROR, noiselevel=-1)
+
+ if missing_sync_type:
+ writemsg_level("!!! %s\n" %
+ _("Missing sync-type for repo(s): %s") %
+ " ".join(repo.name for repo in missing_sync_type),
+ level=logging.ERROR, noiselevel=-1)
+
+ if unknown_repo_names or missing_sync_type:
+ return 1
+
+ else:
+ selected_repos.extend(emerge_config.target_config.settings.repositories)
+
+ for repo in selected_repos:
+ if repo.sync_type is not None:
+ returncode = _sync_repo(emerge_config, repo)
+ if returncode != os.EX_OK:
+ return returncode
+
+ # Reload the whole config from scratch.
+ portage._sync_mode = False
+ load_emerge_config(emerge_config=emerge_config)
+ adjust_configs(emerge_config.opts, emerge_config.trees)
+
+ if emerge_config.opts.get('--package-moves') != 'n' and \
+ _global_updates(emerge_config.trees,
+ emerge_config.target_config.mtimedb["updates"],
+ quiet=("--quiet" in emerge_config.opts)):
+ emerge_config.target_config.mtimedb.commit()
+ # Reload the whole config from scratch.
+ load_emerge_config(emerge_config=emerge_config)
+ adjust_configs(emerge_config.opts, emerge_config.trees)
+
+ mybestpv = emerge_config.target_config.trees['porttree'].dbapi.xmatch(
+ "bestmatch-visible", portage.const.PORTAGE_PACKAGE_ATOM)
+ mypvs = portage.best(
+ emerge_config.target_config.trees['vartree'].dbapi.match(
+ portage.const.PORTAGE_PACKAGE_ATOM))
+
+ chk_updated_cfg_files(emerge_config.target_config.root,
+ portage.util.shlex_split(
+ emerge_config.target_config.settings.get("CONFIG_PROTECT", "")))
+
+ if mybestpv != mypvs and "--quiet" not in emerge_config.opts:
+ print()
+ print(warn(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended")
+ print(warn(" * ")+"that you update portage now, before any other packages are updated.")
+ print()
+ print(warn(" * ")+"To update portage, run 'emerge --oneshot portage' now.")
+ print()
+
+ display_news_notification(emerge_config.target_config, emerge_config.opts)
+ return os.EX_OK
+
+def _sync_repo(emerge_config, repo):
+ settings, trees, mtimedb = emerge_config
+ myopts = emerge_config.opts
enter_invalid = '--ask-enter-invalid' in myopts
xterm_titles = "notitles" not in settings.features
- emergelog(xterm_titles, " === sync")
- portdb = trees[settings['EROOT']]['porttree'].dbapi
- myportdir = portdb.porttree_root
- if not myportdir:
- myportdir = settings.get('PORTDIR', '')
- if myportdir and myportdir.strip():
- myportdir = os.path.realpath(myportdir)
- else:
- myportdir = None
+ msg = ">>> Synchronization of repository '%s' located in '%s'..." % (repo.name, repo.location)
+ emergelog(xterm_titles, msg)
+ writemsg_level(msg + "\n")
out = portage.output.EOutput()
- global_config_path = GLOBAL_CONFIG_PATH
- if settings['EPREFIX']:
- global_config_path = os.path.join(settings['EPREFIX'],
- GLOBAL_CONFIG_PATH.lstrip(os.sep))
- if not myportdir:
- sys.stderr.write("!!! PORTDIR is undefined. " + \
- "Is %s/make.globals missing?\n" % global_config_path)
- sys.exit(1)
- if myportdir[-1]=="/":
- myportdir=myportdir[:-1]
try:
- st = os.stat(myportdir)
+ st = os.stat(repo.location)
except OSError:
st = None
if st is None:
- print(">>>",myportdir,"not found, creating it.")
- portage.util.ensure_dirs(myportdir, mode=0o755)
- st = os.stat(myportdir)
+ print(">>> '%s' not found, creating it." % repo.location)
+ portage.util.ensure_dirs(repo.location, mode=0o755)
+ st = os.stat(repo.location)
usersync_uid = None
spawn_kwargs = {}
@@ -1977,59 +2156,51 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
if rval != os.EX_OK:
return rval
- syncuri = settings.get("SYNC", "").strip()
- if not syncuri:
- writemsg_level("!!! SYNC is undefined. " + \
- "Is %s/make.globals missing?\n" % global_config_path,
- noiselevel=-1, level=logging.ERROR)
- return 1
+ syncuri = repo.sync_uri
- vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
- vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
+ vcs_dirs = frozenset(VCS_DIRS)
+ vcs_dirs = vcs_dirs.intersection(os.listdir(repo.location))
os.umask(0o022)
dosyncuri = syncuri
updatecache_flg = False
- git = False
- if myaction == "metadata":
- print("skipping sync")
- updatecache_flg = True
- elif ".git" in vcs_dirs:
+ if repo.sync_type == "git":
# Update existing git repository, and ignore the syncuri. We are
# going to trust the user and assume that the user is in the branch
# that he/she wants updated. We'll let the user manage branches with
# git directly.
if portage.process.find_binary("git") is None:
msg = ["Command not found: git",
- "Type \"emerge dev-util/git\" to enable git support."]
+ "Type \"emerge %s\" to enable git support." % portage.const.GIT_PACKAGE_ATOM]
for l in msg:
writemsg_level("!!! %s\n" % l,
level=logging.ERROR, noiselevel=-1)
return 1
- msg = ">>> Starting git pull in %s..." % myportdir
+ msg = ">>> Starting git pull in %s..." % repo.location
emergelog(xterm_titles, msg )
writemsg_level(msg + "\n")
exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
- (portage._shell_quote(myportdir),), **spawn_kwargs)
+ (portage._shell_quote(repo.location),),
+ **portage._native_kwargs(spawn_kwargs))
if exitcode != os.EX_OK:
- msg = "!!! git pull error in %s." % myportdir
+ msg = "!!! git pull error in %s." % repo.location
emergelog(xterm_titles, msg)
writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
return exitcode
- msg = ">>> Git pull in %s successful" % myportdir
+ msg = ">>> Git pull in %s successful" % repo.location
emergelog(xterm_titles, msg)
writemsg_level(msg + "\n")
- git = True
- elif syncuri[:8]=="rsync://" or syncuri[:6]=="ssh://":
+ elif repo.sync_type == "rsync":
for vcs_dir in vcs_dirs:
writemsg_level(("!!! %s appears to be under revision " + \
"control (contains %s).\n!!! Aborting rsync sync.\n") % \
- (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
+ (repo.location, vcs_dir), level=logging.ERROR, noiselevel=-1)
return 1
- if not os.path.exists("/usr/bin/rsync"):
+ rsync_binary = portage.process.find_binary("rsync")
+ if rsync_binary is None:
print("!!! /usr/bin/rsync does not exist, so rsync support is disabled.")
- print("!!! Type \"emerge net-misc/rsync\" to enable rsync support.")
- sys.exit(1)
+ print("!!! Type \"emerge %s\" to enable rsync support." % portage.const.RSYNC_PACKAGE_ATOM)
+ return os.EX_UNAVAILABLE
mytimeout=180
rsync_opts = []
@@ -2041,6 +2212,7 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
"--safe-links", # Ignore links outside of tree
"--perms", # Preserve permissions
"--times", # Preserive mod times
+ "--omit-dir-times",
"--compress", # Compress the data transmitted
"--force", # Force deletion on non-empty dirs
"--whole-file", # Don't do block transfers, only entire files
@@ -2103,14 +2275,14 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
# Real local timestamp file.
servertimestampfile = os.path.join(
- myportdir, "metadata", "timestamp.chk")
+ repo.location, "metadata", "timestamp.chk")
content = portage.util.grabfile(servertimestampfile)
mytimestamp = 0
if content:
try:
mytimestamp = time.mktime(time.strptime(content[0],
- "%a, %d %b %Y %H:%M:%S +0000"))
+ TIMESTAMP_FORMAT))
except (OverflowError, ValueError):
pass
del content
@@ -2134,9 +2306,12 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
r"(rsync|ssh)://([^:/]+@)?(\[[:\da-fA-F]*\]|[^:/]*)(:[0-9]+)?",
syncuri, maxsplit=4)[1:5]
except ValueError:
- writemsg_level("!!! SYNC is invalid: %s\n" % syncuri,
+ writemsg_level("!!! sync-uri is invalid: %s\n" % syncuri,
noiselevel=-1, level=logging.ERROR)
return 1
+
+ ssh_opts = settings.get("PORTAGE_SSH_OPTS")
+
if port is None:
port=""
if user_name is None:
@@ -2252,7 +2427,10 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
if mytimestamp != 0 and "--quiet" not in myopts:
print(">>> Checking server timestamp ...")
- rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
+ rsynccommand = [rsync_binary] + rsync_opts + extra_rsync_opts
+
+ if proto == 'ssh' and ssh_opts:
+ rsynccommand.append("--rsh=ssh " + ssh_opts)
if "--debug" in myopts:
print(rsynccommand)
@@ -2298,7 +2476,8 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
rsync_initial_timeout)
mypids.extend(portage.process.spawn(
- mycommand, returnpid=True, **spawn_kwargs))
+ mycommand, returnpid=True,
+ **portage._native_kwargs(spawn_kwargs)))
exitcode = os.waitpid(mypids[0], 0)[1]
if usersync_uid is not None:
portage.util.apply_permissions(tmpservertimestampfile,
@@ -2328,12 +2507,11 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
exitcode = (exitcode & 0xff) << 8
else:
exitcode = exitcode >> 8
- if mypids:
- portage.process.spawned_pids.remove(mypids[0])
+
if content:
try:
servertimestamp = time.mktime(time.strptime(
- content[0], "%a, %d %b %Y %H:%M:%S +0000"))
+ content[0], TIMESTAMP_FORMAT))
except (OverflowError, ValueError):
pass
del mycommand, mypids, content
@@ -2349,7 +2527,7 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
print(">>> In order to force sync, remove '%s'." % servertimestampfile)
print(">>>")
print()
- sys.exit(0)
+ return os.EX_OK
elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
emergelog(xterm_titles,
">>> Server out of date: %s" % dosyncuri)
@@ -2363,8 +2541,33 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
exitcode = SERVER_OUT_OF_DATE
elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
# actual sync
- mycommand = rsynccommand + [dosyncuri+"/", myportdir]
- exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
+ mycommand = rsynccommand + [dosyncuri+"/", repo.location]
+ exitcode = None
+ try:
+ exitcode = portage.process.spawn(mycommand,
+ **portage._native_kwargs(spawn_kwargs))
+ finally:
+ if exitcode is None:
+ # interrupted
+ exitcode = 128 + signal.SIGINT
+
+ # 0 Success
+ # 1 Syntax or usage error
+ # 2 Protocol incompatibility
+ # 5 Error starting client-server protocol
+ # 35 Timeout waiting for daemon connection
+ if exitcode not in (0, 1, 2, 5, 35):
+ # If the exit code is not among those listed above,
+ # then we may have a partial/inconsistent sync
+ # state, so our previously read timestamp as well
+ # as the corresponding file can no longer be
+ # trusted.
+ mytimestamp = 0
+ try:
+ os.unlink(servertimestampfile)
+ except OSError:
+ pass
+
if exitcode in [0,1,3,4,11,14,20,21]:
break
elif exitcode in [1,3,4,11,14,20,21]:
@@ -2390,23 +2593,23 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
if (exitcode==0):
emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
elif exitcode == SERVER_OUT_OF_DATE:
- sys.exit(1)
+ return 1
elif exitcode == EXCEEDED_MAX_RETRIES:
sys.stderr.write(
">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
- sys.exit(1)
+ return 1
elif (exitcode>0):
msg = []
if exitcode==1:
msg.append("Rsync has reported that there is a syntax error. Please ensure")
- msg.append("that your SYNC statement is proper.")
- msg.append("SYNC=" + settings["SYNC"])
+ msg.append("that sync-uri attribute for repository '%s' is proper." % repo.name)
+ msg.append("sync-uri: '%s'" % repo.sync_uri)
elif exitcode==11:
msg.append("Rsync has reported that there is a File IO error. Normally")
msg.append("this means your disk is full, but can be caused by corruption")
- msg.append("on the filesystem that contains PORTDIR. Please investigate")
+ msg.append("on the filesystem that contains repository '%s'. Please investigate" % repo.name)
msg.append("and try again after the problem has been fixed.")
- msg.append("PORTDIR=" + settings["PORTDIR"])
+ msg.append("Location of repository: '%s'" % repo.location)
elif exitcode==20:
msg.append("Rsync was killed before it finished.")
else:
@@ -2417,115 +2620,76 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
msg.append("(and possibly your system's filesystem) configuration.")
for line in msg:
out.eerror(line)
- sys.exit(exitcode)
- elif syncuri[:6]=="cvs://":
+ return exitcode
+ elif repo.sync_type == "cvs":
if not os.path.exists("/usr/bin/cvs"):
print("!!! /usr/bin/cvs does not exist, so CVS support is disabled.")
- print("!!! Type \"emerge dev-vcs/cvs\" to enable CVS support.")
- sys.exit(1)
- cvsroot=syncuri[6:]
- cvsdir=os.path.dirname(myportdir)
- if not os.path.exists(myportdir+"/CVS"):
+ print("!!! Type \"emerge %s\" to enable CVS support." % portage.const.CVS_PACKAGE_ATOM)
+ return os.EX_UNAVAILABLE
+ cvs_root = syncuri
+ if cvs_root.startswith("cvs://"):
+ cvs_root = cvs_root[6:]
+ if not os.path.exists(os.path.join(repo.location, "CVS")):
#initial checkout
print(">>> Starting initial cvs checkout with "+syncuri+"...")
- if os.path.exists(cvsdir+"/gentoo-x86"):
- print("!!! existing",cvsdir+"/gentoo-x86 directory; exiting.")
- sys.exit(1)
try:
- os.rmdir(myportdir)
+ os.rmdir(repo.location)
except OSError as e:
if e.errno != errno.ENOENT:
sys.stderr.write(
- "!!! existing '%s' directory; exiting.\n" % myportdir)
- sys.exit(1)
+ "!!! existing '%s' directory; exiting.\n" % repo.location)
+ return 1
del e
if portage.process.spawn_bash(
- "cd %s; exec cvs -z0 -d %s co -P gentoo-x86" % \
- (portage._shell_quote(cvsdir), portage._shell_quote(cvsroot)),
- **spawn_kwargs) != os.EX_OK:
+ "cd %s; exec cvs -z0 -d %s co -P -d %s %s" %
+ (portage._shell_quote(os.path.dirname(repo.location)), portage._shell_quote(cvs_root),
+ portage._shell_quote(os.path.basename(repo.location)), portage._shell_quote(repo.sync_cvs_repo)),
+ **portage._native_kwargs(spawn_kwargs)) != os.EX_OK:
print("!!! cvs checkout error; exiting.")
- sys.exit(1)
- os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
+ return 1
else:
#cvs update
print(">>> Starting cvs update with "+syncuri+"...")
retval = portage.process.spawn_bash(
"cd %s; exec cvs -z0 -q update -dP" % \
- (portage._shell_quote(myportdir),), **spawn_kwargs)
+ (portage._shell_quote(repo.location),),
+ **portage._native_kwargs(spawn_kwargs))
if retval != os.EX_OK:
writemsg_level("!!! cvs update error; exiting.\n",
noiselevel=-1, level=logging.ERROR)
- sys.exit(retval)
+ return retval
dosyncuri = syncuri
- else:
- writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
- noiselevel=-1, level=logging.ERROR)
- return 1
# Reload the whole config from scratch.
- settings, trees, mtimedb = load_emerge_config(trees=trees)
- adjust_configs(myopts, trees)
- root_config = trees[settings['EROOT']]['root_config']
+ settings, trees, mtimedb = load_emerge_config(emerge_config=emerge_config)
+ adjust_configs(emerge_config.opts, emerge_config.trees)
portdb = trees[settings['EROOT']]['porttree'].dbapi
- if git:
+ if repo.sync_type == "git":
# NOTE: Do this after reloading the config, in case
# it did not exist prior to sync, so that the config
# and portdb properly account for its existence.
- exitcode = git_sync_timestamps(portdb, myportdir)
+ exitcode = git_sync_timestamps(portdb, repo.location)
if exitcode == os.EX_OK:
updatecache_flg = True
- if updatecache_flg and \
- myaction != "metadata" and \
- "metadata-transfer" not in settings.features:
+ if updatecache_flg and "metadata-transfer" not in settings.features:
updatecache_flg = False
if updatecache_flg and \
- os.path.exists(os.path.join(myportdir, 'metadata', 'cache')):
+ os.path.exists(os.path.join(repo.location, 'metadata', 'cache')):
- # Only update cache for myportdir since that's
+ # Only update cache for repo.location since that's
# the only one that's been synced here.
- action_metadata(settings, portdb, myopts, porttrees=[myportdir])
-
- if myopts.get('--package-moves') != 'n' and \
- _global_updates(trees, mtimedb["updates"], quiet=("--quiet" in myopts)):
- mtimedb.commit()
- # Reload the whole config from scratch.
- settings, trees, mtimedb = load_emerge_config(trees=trees)
- adjust_configs(myopts, trees)
- portdb = trees[settings['EROOT']]['porttree'].dbapi
- root_config = trees[settings['EROOT']]['root_config']
-
- mybestpv = portdb.xmatch("bestmatch-visible",
- portage.const.PORTAGE_PACKAGE_ATOM)
- mypvs = portage.best(
- trees[settings['EROOT']]['vartree'].dbapi.match(
- portage.const.PORTAGE_PACKAGE_ATOM))
-
- chk_updated_cfg_files(settings["EROOT"],
- portage.util.shlex_split(settings.get("CONFIG_PROTECT", "")))
-
- if myaction != "metadata":
- postsync = os.path.join(settings["PORTAGE_CONFIGROOT"],
- portage.USER_CONFIG_PATH, "bin", "post_sync")
- if os.access(postsync, os.X_OK):
- retval = portage.process.spawn(
- [postsync, dosyncuri], env=settings.environ())
- if retval != os.EX_OK:
- writemsg_level(
- " %s spawn failed of %s\n" % (bad("*"), postsync,),
- level=logging.ERROR, noiselevel=-1)
+ action_metadata(settings, portdb, myopts, porttrees=[repo.location])
- if(mybestpv != mypvs) and not "--quiet" in myopts:
- print()
- print(warn(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended")
- print(warn(" * ")+"that you update portage now, before any other packages are updated.")
- print()
- print(warn(" * ")+"To update portage, run 'emerge portage' now.")
- print()
+ postsync = os.path.join(settings["PORTAGE_CONFIGROOT"], portage.USER_CONFIG_PATH, "bin", "post_sync")
+ if os.access(postsync, os.X_OK):
+ retval = portage.process.spawn([postsync, dosyncuri], env=settings.environ())
+ if retval != os.EX_OK:
+ writemsg_level(" %s spawn failed of %s\n" % (bad("*"), postsync,),
+ level=logging.ERROR, noiselevel=-1)
- display_news_notification(root_config, myopts)
return os.EX_OK
def action_uninstall(settings, trees, ldpath_mtimes,
@@ -2647,13 +2811,8 @@ def action_uninstall(settings, trees, ldpath_mtimes,
if owners:
for cpv in owners:
- slot = vardb.aux_get(cpv, ['SLOT'])[0]
- if not slot:
- # portage now masks packages with missing slot, but it's
- # possible that one was installed by an older version
- atom = portage.cpv_getkey(cpv)
- else:
- atom = '%s:%s' % (portage.cpv_getkey(cpv), slot)
+ pkg = vardb._pkg_str(cpv, None)
+ atom = '%s:%s' % (pkg.cp, pkg.slot)
valid_atoms.append(portage.dep.Atom(atom))
else:
writemsg_level(("!!! '%s' is not claimed " + \
@@ -2677,20 +2836,20 @@ def action_uninstall(settings, trees, ldpath_mtimes,
if action == 'deselect':
return action_deselect(settings, trees, opts, valid_atoms)
- # Create a Scheduler for calls to unmerge(), in order to cause
- # redirection of ebuild phase output to logs as required for
- # options such as --quiet.
- sched = Scheduler(settings, trees, None, opts,
- spinner, uninstall_only=True)
- sched._background = sched._background_mode()
- sched._status_display.quiet = True
-
- if sched._background:
- sched.settings.unlock()
- sched.settings["PORTAGE_BACKGROUND"] = "1"
- sched.settings.backup_changes("PORTAGE_BACKGROUND")
- sched.settings.lock()
- sched.pkgsettings[eroot] = portage.config(clone=sched.settings)
+ # Use the same logic as the Scheduler class to trigger redirection
+ # of ebuild pkg_prerm/postrm phase output to logs as appropriate
+ # for options such as --jobs, --quiet and --quiet-build.
+ max_jobs = opts.get("--jobs", 1)
+ background = (max_jobs is True or max_jobs > 1 or
+ "--quiet" in opts or opts.get("--quiet-build") == "y")
+ sched_iface = SchedulerInterface(global_event_loop(),
+ is_background=lambda: background)
+
+ if background:
+ settings.unlock()
+ settings["PORTAGE_BACKGROUND"] = "1"
+ settings.backup_changes("PORTAGE_BACKGROUND")
+ settings.lock()
if action in ('clean', 'unmerge') or \
(action == 'prune' and "--nodeps" in opts):
@@ -2698,10 +2857,11 @@ def action_uninstall(settings, trees, ldpath_mtimes,
ordered = action == 'unmerge'
rval = unmerge(trees[settings['EROOT']]['root_config'], opts, action,
valid_atoms, ldpath_mtimes, ordered=ordered,
- scheduler=sched._sched_iface)
+ scheduler=sched_iface)
else:
rval = action_depclean(settings, trees, ldpath_mtimes,
- opts, action, valid_atoms, spinner, scheduler=sched._sched_iface)
+ opts, action, valid_atoms, spinner,
+ scheduler=sched_iface)
return rval
@@ -2807,6 +2967,10 @@ def adjust_config(myopts, settings):
settings["NOCOLOR"] = "true"
settings.backup_changes("NOCOLOR")
+ if "--pkg-format" in myopts:
+ settings["PORTAGE_BINPKG_FORMAT"] = myopts["--pkg-format"]
+ settings.backup_changes("PORTAGE_BINPKG_FORMAT")
+
def display_missing_pkg_set(root_config, set_name):
msg = []
@@ -3030,61 +3194,53 @@ def git_sync_timestamps(portdb, portdir):
return os.EX_OK
-def load_emerge_config(trees=None):
+class _emerge_config(SlotObject):
+
+ __slots__ = ('action', 'args', 'opts',
+ 'running_config', 'target_config', 'trees')
+
+ # Support unpack as tuple, for load_emerge_config backward compatibility.
+ def __iter__(self):
+ yield self.target_config.settings
+ yield self.trees
+ yield self.target_config.mtimedb
+
+ def __getitem__(self, index):
+ return list(self)[index]
+
+ def __len__(self):
+ return 3
+
+def load_emerge_config(emerge_config=None, **kargs):
+
+ if emerge_config is None:
+ emerge_config = _emerge_config(**kargs)
+
kwargs = {}
- for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
+ for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT"),
+ ("eprefix", "EPREFIX")):
v = os.environ.get(envvar, None)
if v and v.strip():
kwargs[k] = v
- trees = portage.create_trees(trees=trees, **kwargs)
+ emerge_config.trees = portage.create_trees(trees=emerge_config.trees,
+ **portage._native_kwargs(kwargs))
- for root_trees in trees.values():
+ for root_trees in emerge_config.trees.values():
settings = root_trees["vartree"].settings
settings._init_dirs()
setconfig = load_default_config(settings, root_trees)
root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
- settings = trees[trees._target_eroot]['vartree'].settings
- mtimedbfile = os.path.join(settings['EROOT'], portage.CACHE_PATH, "mtimedb")
- mtimedb = portage.MtimeDB(mtimedbfile)
- QueryCommand._db = trees
- return settings, trees, mtimedb
-
-def chk_updated_cfg_files(eroot, config_protect):
- target_root = eroot
- result = list(
- portage.util.find_updated_config_files(target_root, config_protect))
-
- for x in result:
- writemsg_level("\n %s " % (colorize("WARN", "* " + _("IMPORTANT:"))),
- level=logging.INFO, noiselevel=-1)
- if not x[1]: # it's a protected file
- writemsg_level( _("config file '%s' needs updating.\n") % x[0],
- level=logging.INFO, noiselevel=-1)
- else: # it's a protected dir
- if len(x[1]) == 1:
- head, tail = os.path.split(x[1][0])
- tail = tail[len("._cfg0000_"):]
- fpath = os.path.join(head, tail)
- writemsg_level(_("config file '%s' needs updating.\n") % fpath,
- level=logging.INFO, noiselevel=-1)
- else:
- writemsg_level( _("%d config files in '%s' need updating.\n") % \
- (len(x[1]), x[0]), level=logging.INFO, noiselevel=-1)
-
- if result:
- print(" "+yellow("*")+ " See the "+colorize("INFORM", _("CONFIGURATION FILES"))\
- + " " + _("section of the") + " " + bold("emerge"))
- print(" "+yellow("*")+ " " + _("man page to learn how to update config files."))
-
+ target_eroot = emerge_config.trees._target_eroot
+ emerge_config.target_config = \
+ emerge_config.trees[target_eroot]['root_config']
+ emerge_config.target_config.mtimedb = portage.MtimeDB(
+ os.path.join(target_eroot, portage.CACHE_PATH, "mtimedb"))
+ emerge_config.running_config = emerge_config.trees[
+ emerge_config.trees._running_eroot]['root_config']
+ QueryCommand._db = emerge_config.trees
-def display_news_notification(root_config, myopts):
- if "news" not in root_config.settings.features:
- return
- portdb = root_config.trees["porttree"].dbapi
- vardb = root_config.trees["vartree"].dbapi
- news_counts = count_unread_news(portdb, vardb)
- display_news_notifications(news_counts)
+ return emerge_config
def getgccversion(chost):
"""
@@ -3140,3 +3296,771 @@ def getgccversion(chost):
portage.writemsg(gcc_not_found_error, noiselevel=-1)
return "[unavailable]"
+
+# Warn about features that may confuse users and
+# lead them to report invalid bugs.
+_emerge_features_warn = frozenset(['keeptemp', 'keepwork'])
+
+def validate_ebuild_environment(trees):
+ features_warn = set()
+ for myroot in trees:
+ settings = trees[myroot]["vartree"].settings
+ settings.validate()
+ features_warn.update(
+ _emerge_features_warn.intersection(settings.features))
+
+ if features_warn:
+ msg = "WARNING: The FEATURES variable contains one " + \
+ "or more values that should be disabled under " + \
+ "normal circumstances: %s" % " ".join(features_warn)
+ out = portage.output.EOutput()
+ for line in textwrap.wrap(msg, 65):
+ out.ewarn(line)
+
+def check_procfs():
+ procfs_path = '/proc'
+ if platform.system() not in ("Linux",) or \
+ os.path.ismount(procfs_path):
+ return os.EX_OK
+ msg = "It seems that %s is not mounted. You have been warned." % procfs_path
+ writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+def config_protect_check(trees):
+ for root, root_trees in trees.items():
+ settings = root_trees["root_config"].settings
+ if not settings.get("CONFIG_PROTECT"):
+ msg = "!!! CONFIG_PROTECT is empty"
+ if settings["ROOT"] != "/":
+ msg += " for '%s'" % root
+ msg += "\n"
+ writemsg_level(msg, level=logging.WARN, noiselevel=-1)
+
+def apply_priorities(settings):
+ ionice(settings)
+ nice(settings)
+
+def nice(settings):
+ try:
+ os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
+ except (OSError, ValueError) as e:
+ out = portage.output.EOutput()
+ out.eerror("Failed to change nice value to '%s'" % \
+ settings["PORTAGE_NICENESS"])
+ out.eerror("%s\n" % str(e))
+
+def ionice(settings):
+
+ ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
+ if ionice_cmd:
+ ionice_cmd = portage.util.shlex_split(ionice_cmd)
+ if not ionice_cmd:
+ return
+
+ variables = {"PID" : str(os.getpid())}
+ cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
+
+ try:
+ rval = portage.process.spawn(cmd, env=os.environ)
+ except portage.exception.CommandNotFound:
+ # The OS kernel probably doesn't support ionice,
+ # so return silently.
+ return
+
+ if rval != os.EX_OK:
+ out = portage.output.EOutput()
+ out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
+ out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
+
+def setconfig_fallback(root_config):
+ setconfig = root_config.setconfig
+ setconfig._create_default_config()
+ setconfig._parse(update=True)
+ root_config.sets = setconfig.getSets()
+
+def get_missing_sets(root_config):
+ # emerge requires existence of "world", "selected", and "system"
+ missing_sets = []
+
+ for s in ("selected", "system", "world",):
+ if s not in root_config.sets:
+ missing_sets.append(s)
+
+ return missing_sets
+
+def missing_sets_warning(root_config, missing_sets):
+ if len(missing_sets) > 2:
+ missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
+ missing_sets_str += ', and "%s"' % missing_sets[-1]
+ elif len(missing_sets) == 2:
+ missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
+ else:
+ missing_sets_str = '"%s"' % missing_sets[-1]
+ msg = ["emerge: incomplete set configuration, " + \
+ "missing set(s): %s" % missing_sets_str]
+ if root_config.sets:
+ msg.append(" sets defined: %s" % ", ".join(root_config.sets))
+ global_config_path = portage.const.GLOBAL_CONFIG_PATH
+ if portage.const.EPREFIX:
+ global_config_path = os.path.join(portage.const.EPREFIX,
+ portage.const.GLOBAL_CONFIG_PATH.lstrip(os.sep))
+ msg.append(" This usually means that '%s'" % \
+ (os.path.join(global_config_path, "sets/portage.conf"),))
+ msg.append(" is missing or corrupt.")
+ msg.append(" Falling back to default world and system set configuration!!!")
+ for line in msg:
+ writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
+
+def ensure_required_sets(trees):
+ warning_shown = False
+ for root_trees in trees.values():
+ missing_sets = get_missing_sets(root_trees["root_config"])
+ if missing_sets and not warning_shown:
+ warning_shown = True
+ missing_sets_warning(root_trees["root_config"], missing_sets)
+ if missing_sets:
+ setconfig_fallback(root_trees["root_config"])
+
+def expand_set_arguments(myfiles, myaction, root_config):
+ retval = os.EX_OK
+ setconfig = root_config.setconfig
+
+ sets = setconfig.getSets()
+
+ # In order to know exactly which atoms/sets should be added to the
+ # world file, the depgraph performs set expansion later. It will get
+ # confused about where the atoms came from if it's not allowed to
+ # expand them itself.
+ do_not_expand = myaction is None
+ newargs = []
+ for a in myfiles:
+ if a in ("system", "world"):
+ newargs.append(SETPREFIX+a)
+ else:
+ newargs.append(a)
+ myfiles = newargs
+ del newargs
+ newargs = []
+
+ # separators for set arguments
+ ARG_START = "{"
+ ARG_END = "}"
+
+ for i in range(0, len(myfiles)):
+ if myfiles[i].startswith(SETPREFIX):
+ start = 0
+ end = 0
+ x = myfiles[i][len(SETPREFIX):]
+ newset = ""
+ while x:
+ start = x.find(ARG_START)
+ end = x.find(ARG_END)
+ if start > 0 and start < end:
+ namepart = x[:start]
+ argpart = x[start+1:end]
+
+ # TODO: implement proper quoting
+ args = argpart.split(",")
+ options = {}
+ for a in args:
+ if "=" in a:
+ k, v = a.split("=", 1)
+ options[k] = v
+ else:
+ options[a] = "True"
+ setconfig.update(namepart, options)
+ newset += (x[:start-len(namepart)]+namepart)
+ x = x[end+len(ARG_END):]
+ else:
+ newset += x
+ x = ""
+ myfiles[i] = SETPREFIX+newset
+
+ sets = setconfig.getSets()
+
+ # display errors that occurred while loading the SetConfig instance
+ for e in setconfig.errors:
+ print(colorize("BAD", "Error during set creation: %s" % e))
+
+ unmerge_actions = ("unmerge", "prune", "clean", "depclean")
+
+ for a in myfiles:
+ if a.startswith(SETPREFIX):
+ s = a[len(SETPREFIX):]
+ if s not in sets:
+ display_missing_pkg_set(root_config, s)
+ return (None, 1)
+ if s == "installed":
+ msg = ("The @installed set is deprecated and will soon be "
+ "removed. Please refer to bug #387059 for details.")
+ out = portage.output.EOutput()
+ for line in textwrap.wrap(msg, 50):
+ out.ewarn(line)
+ setconfig.active.append(s)
+
+ if do_not_expand:
+ # Loading sets can be slow, so skip it here, in order
+ # to allow the depgraph to indicate progress with the
+ # spinner while sets are loading (bug #461412).
+ newargs.append(a)
+ continue
+
+ try:
+ set_atoms = setconfig.getSetAtoms(s)
+ except portage.exception.PackageSetNotFound as e:
+ writemsg_level(("emerge: the given set '%s' " + \
+ "contains a non-existent set named '%s'.\n") % \
+ (s, e), level=logging.ERROR, noiselevel=-1)
+ if s in ('world', 'selected') and \
+ SETPREFIX + e.value in sets['selected']:
+ writemsg_level(("Use `emerge --deselect %s%s` to "
+ "remove this set from world_sets.\n") %
+ (SETPREFIX, e,), level=logging.ERROR,
+ noiselevel=-1)
+ return (None, 1)
+ if myaction in unmerge_actions and \
+ not sets[s].supportsOperation("unmerge"):
+ writemsg_level("emerge: the given set '%s' does " % s + \
+ "not support unmerge operations\n",
+ level=logging.ERROR, noiselevel=-1)
+ retval = 1
+ elif not set_atoms:
+ writemsg_level("emerge: '%s' is an empty set\n" % s,
+ level=logging.INFO, noiselevel=-1)
+ else:
+ newargs.extend(set_atoms)
+ for error_msg in sets[s].errors:
+ writemsg_level("%s\n" % (error_msg,),
+ level=logging.ERROR, noiselevel=-1)
+ else:
+ newargs.append(a)
+ return (newargs, retval)
+
+def repo_name_check(trees):
+ missing_repo_names = set()
+ for root_trees in trees.values():
+ porttree = root_trees.get("porttree")
+ if porttree:
+ portdb = porttree.dbapi
+ missing_repo_names.update(portdb.getMissingRepoNames())
+
+ # Skip warnings about missing repo_name entries for
+ # /usr/local/portage (see bug #248603).
+ try:
+ missing_repo_names.remove('/usr/local/portage')
+ except KeyError:
+ pass
+
+ if missing_repo_names:
+ msg = []
+ msg.append("WARNING: One or more repositories " + \
+ "have missing repo_name entries:")
+ msg.append("")
+ for p in missing_repo_names:
+ msg.append("\t%s/profiles/repo_name" % (p,))
+ msg.append("")
+ msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
+ "should be a plain text file containing a unique " + \
+ "name for the repository on the first line.", 70))
+ msg.append("\n")
+ writemsg_level("".join("%s\n" % l for l in msg),
+ level=logging.WARNING, noiselevel=-1)
+
+ return bool(missing_repo_names)
+
+def repo_name_duplicate_check(trees):
+ ignored_repos = {}
+ for root, root_trees in trees.items():
+ if 'porttree' in root_trees:
+ portdb = root_trees['porttree'].dbapi
+ if portdb.settings.get('PORTAGE_REPO_DUPLICATE_WARN') != '0':
+ for repo_name, paths in portdb.getIgnoredRepos():
+ k = (root, repo_name, portdb.getRepositoryPath(repo_name))
+ ignored_repos.setdefault(k, []).extend(paths)
+
+ if ignored_repos:
+ msg = []
+ msg.append('WARNING: One or more repositories ' + \
+ 'have been ignored due to duplicate')
+ msg.append(' profiles/repo_name entries:')
+ msg.append('')
+ for k in sorted(ignored_repos):
+ msg.append(' %s overrides' % ", ".join(k))
+ for path in ignored_repos[k]:
+ msg.append(' %s' % (path,))
+ msg.append('')
+ msg.extend(' ' + x for x in textwrap.wrap(
+ "All profiles/repo_name entries must be unique in order " + \
+ "to avoid having duplicates ignored. " + \
+ "Set PORTAGE_REPO_DUPLICATE_WARN=\"0\" in " + \
+ "/etc/portage/make.conf if you would like to disable this warning."))
+ msg.append("\n")
+ writemsg_level(''.join('%s\n' % l for l in msg),
+ level=logging.WARNING, noiselevel=-1)
+
+ return bool(ignored_repos)
+
+def run_action(emerge_config):
+
+ # skip global updates prior to sync, since it's called after sync
+ if emerge_config.action not in ('help', 'info', 'sync', 'version') and \
+ emerge_config.opts.get('--package-moves') != 'n' and \
+ _global_updates(emerge_config.trees,
+ emerge_config.target_config.mtimedb["updates"],
+ quiet=("--quiet" in emerge_config.opts)):
+ emerge_config.target_config.mtimedb.commit()
+ # Reload the whole config from scratch.
+ load_emerge_config(emerge_config=emerge_config)
+
+ xterm_titles = "notitles" not in \
+ emerge_config.target_config.settings.features
+ if xterm_titles:
+ xtermTitle("emerge")
+
+ if "--digest" in emerge_config.opts:
+ os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
+ # Reload the whole config from scratch so that the portdbapi internal
+ # config is updated with new FEATURES.
+ load_emerge_config(emerge_config=emerge_config)
+
+ # NOTE: adjust_configs() can map options to FEATURES, so any relevant
+ # options adjustments should be made prior to calling adjust_configs().
+ if "--buildpkgonly" in emerge_config.opts:
+ emerge_config.opts["--buildpkg"] = True
+
+ if "getbinpkg" in emerge_config.target_config.settings.features:
+ emerge_config.opts["--getbinpkg"] = True
+
+ if "--getbinpkgonly" in emerge_config.opts:
+ emerge_config.opts["--getbinpkg"] = True
+
+ if "--getbinpkgonly" in emerge_config.opts:
+ emerge_config.opts["--usepkgonly"] = True
+
+ if "--getbinpkg" in emerge_config.opts:
+ emerge_config.opts["--usepkg"] = True
+
+ if "--usepkgonly" in emerge_config.opts:
+ emerge_config.opts["--usepkg"] = True
+
+ if "--buildpkgonly" in emerge_config.opts:
+ # --buildpkgonly will not merge anything, so
+ # it cancels all binary package options.
+ for opt in ("--getbinpkg", "--getbinpkgonly",
+ "--usepkg", "--usepkgonly"):
+ emerge_config.opts.pop(opt, None)
+
+ adjust_configs(emerge_config.opts, emerge_config.trees)
+ apply_priorities(emerge_config.target_config.settings)
+
+ for fmt in emerge_config.target_config.settings["PORTAGE_BINPKG_FORMAT"].split():
+ if not fmt in portage.const.SUPPORTED_BINPKG_FORMATS:
+ if "--pkg-format" in emerge_config.opts:
+ problematic="--pkg-format"
+ else:
+ problematic="PORTAGE_BINPKG_FORMAT"
+
+ writemsg_level(("emerge: %s is not set correctly. Format " + \
+ "'%s' is not supported.\n") % (problematic, fmt),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ if emerge_config.action == 'version':
+ writemsg_stdout(getportageversion(
+ emerge_config.target_config.settings["PORTDIR"],
+ None,
+ emerge_config.target_config.settings.profile_path,
+ emerge_config.target_config.settings["CHOST"],
+ emerge_config.target_config.trees['vartree'].dbapi) + '\n',
+ noiselevel=-1)
+ return 0
+ elif emerge_config.action == 'help':
+ emerge_help()
+ return 0
+
+ spinner = stdout_spinner()
+ if "candy" in emerge_config.target_config.settings.features:
+ spinner.update = spinner.update_scroll
+
+ if "--quiet" not in emerge_config.opts:
+ portage.deprecated_profile_check(
+ settings=emerge_config.target_config.settings)
+ repo_name_check(emerge_config.trees)
+ repo_name_duplicate_check(emerge_config.trees)
+ config_protect_check(emerge_config.trees)
+ check_procfs()
+
+ for mytrees in emerge_config.trees.values():
+ mydb = mytrees["porttree"].dbapi
+ # Freeze the portdbapi for performance (memoize all xmatch results).
+ mydb.freeze()
+
+ if emerge_config.action in ('search', None) and \
+ "--usepkg" in emerge_config.opts:
+ # Populate the bintree with current --getbinpkg setting.
+ # This needs to happen before expand_set_arguments(), in case
+ # any sets use the bintree.
+ try:
+ mytrees["bintree"].populate(
+ getbinpkgs="--getbinpkg" in emerge_config.opts)
+ except ParseError as e:
+ writemsg("\n\n!!!%s.\nSee make.conf(5) for more info.\n"
+ % e, noiselevel=-1)
+ return 1
+
+ del mytrees, mydb
+
+ for x in emerge_config.args:
+ if x.endswith((".ebuild", ".tbz2")) and \
+ os.path.exists(os.path.abspath(x)):
+ print(colorize("BAD", "\n*** emerging by path is broken "
+ "and may not always work!!!\n"))
+ break
+
+ if emerge_config.action == "list-sets":
+ writemsg_stdout("".join("%s\n" % s for s in
+ sorted(emerge_config.target_config.sets)))
+ return os.EX_OK
+ elif emerge_config.action == "check-news":
+ news_counts = count_unread_news(
+ emerge_config.target_config.trees["porttree"].dbapi,
+ emerge_config.target_config.trees["vartree"].dbapi)
+ if any(news_counts.values()):
+ display_news_notifications(news_counts)
+ elif "--quiet" not in emerge_config.opts:
+ print("", colorize("GOOD", "*"), "No news items were found.")
+ return os.EX_OK
+
+ ensure_required_sets(emerge_config.trees)
+
+ if emerge_config.action is None and \
+ "--resume" in emerge_config.opts and emerge_config.args:
+ writemsg("emerge: unexpected argument(s) for --resume: %s\n" %
+ " ".join(emerge_config.args), noiselevel=-1)
+ return 1
+
+ # only expand sets for actions taking package arguments
+ oldargs = emerge_config.args[:]
+ if emerge_config.action in ("clean", "config", "depclean",
+ "info", "prune", "unmerge", None):
+ newargs, retval = expand_set_arguments(
+ emerge_config.args, emerge_config.action,
+ emerge_config.target_config)
+ if retval != os.EX_OK:
+ return retval
+
+ # Need to handle empty sets specially, otherwise emerge will react
+ # with the help message for empty argument lists
+ if oldargs and not newargs:
+ print("emerge: no targets left after set expansion")
+ return 0
+
+ emerge_config.args = newargs
+
+ if "--tree" in emerge_config.opts and \
+ "--columns" in emerge_config.opts:
+ print("emerge: can't specify both of \"--tree\" and \"--columns\".")
+ return 1
+
+ if '--emptytree' in emerge_config.opts and \
+ '--noreplace' in emerge_config.opts:
+ writemsg_level("emerge: can't specify both of " + \
+ "\"--emptytree\" and \"--noreplace\".\n",
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ if ("--quiet" in emerge_config.opts):
+ spinner.update = spinner.update_quiet
+ portage.util.noiselimit = -1
+
+ if "--fetch-all-uri" in emerge_config.opts:
+ emerge_config.opts["--fetchonly"] = True
+
+ if "--skipfirst" in emerge_config.opts and \
+ "--resume" not in emerge_config.opts:
+ emerge_config.opts["--resume"] = True
+
+ # Allow -p to remove --ask
+ if "--pretend" in emerge_config.opts:
+ emerge_config.opts.pop("--ask", None)
+
+ # forbid --ask when not in a terminal
+ # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
+ if ("--ask" in emerge_config.opts) and (not sys.stdin.isatty()):
+ portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
+ noiselevel=-1)
+ return 1
+
+ if emerge_config.target_config.settings.get("PORTAGE_DEBUG", "") == "1":
+ spinner.update = spinner.update_quiet
+ portage.util.noiselimit = 0
+ if "python-trace" in emerge_config.target_config.settings.features:
+ portage.debug.set_trace(True)
+
+ if not ("--quiet" in emerge_config.opts):
+ if '--nospinner' in emerge_config.opts or \
+ emerge_config.target_config.settings.get('TERM') == 'dumb' or \
+ not sys.stdout.isatty():
+ spinner.update = spinner.update_basic
+
+ if "--debug" in emerge_config.opts:
+ print("myaction", emerge_config.action)
+ print("myopts", emerge_config.opts)
+
+ if not emerge_config.action and not emerge_config.args and \
+ "--resume" not in emerge_config.opts:
+ emerge_help()
+ return 1
+
+ pretend = "--pretend" in emerge_config.opts
+ fetchonly = "--fetchonly" in emerge_config.opts or \
+ "--fetch-all-uri" in emerge_config.opts
+ buildpkgonly = "--buildpkgonly" in emerge_config.opts
+
+ # check if root user is the current user for the actions where emerge needs this
+ if portage.data.secpass < 2:
+ # We've already allowed "--version" and "--help" above.
+ if "--pretend" not in emerge_config.opts and \
+ emerge_config.action not in ("search", "info"):
+ need_superuser = emerge_config.action in ('clean', 'depclean',
+ 'deselect', 'prune', 'unmerge') or not \
+ (fetchonly or \
+ (buildpkgonly and portage.data.secpass >= 1) or \
+ emerge_config.action in ("metadata", "regen", "sync"))
+ if portage.data.secpass < 1 or \
+ need_superuser:
+ if need_superuser:
+ access_desc = "superuser"
+ else:
+ access_desc = "portage group"
+ # Always show portage_group_warning() when only portage group
+ # access is required but the user is not in the portage group.
+ if "--ask" in emerge_config.opts:
+ writemsg_stdout("This action requires %s access...\n" % \
+ (access_desc,), noiselevel=-1)
+ if portage.data.secpass < 1 and not need_superuser:
+ portage.data.portage_group_warning()
+ if userquery("Would you like to add --pretend to options?",
+ "--ask-enter-invalid" in emerge_config.opts) == "No":
+ return 128 + signal.SIGINT
+ emerge_config.opts["--pretend"] = True
+ emerge_config.opts.pop("--ask")
+ else:
+ sys.stderr.write(("emerge: %s access is required\n") \
+ % access_desc)
+ if portage.data.secpass < 1 and not need_superuser:
+ portage.data.portage_group_warning()
+ return 1
+
+ # Disable emergelog for everything except build or unmerge operations.
+ # This helps minimize parallel emerge.log entries that can confuse log
+ # parsers like genlop.
+ disable_emergelog = False
+ for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
+ if x in emerge_config.opts:
+ disable_emergelog = True
+ break
+ if disable_emergelog:
+ pass
+ elif emerge_config.action in ("search", "info"):
+ disable_emergelog = True
+ elif portage.data.secpass < 1:
+ disable_emergelog = True
+
+ import _emerge.emergelog
+ _emerge.emergelog._disable = disable_emergelog
+
+ if not disable_emergelog:
+ emerge_log_dir = \
+ emerge_config.target_config.settings.get('EMERGE_LOG_DIR')
+ if emerge_log_dir:
+ try:
+ # At least the parent needs to exist for the lock file.
+ portage.util.ensure_dirs(emerge_log_dir)
+ except portage.exception.PortageException as e:
+ writemsg_level("!!! Error creating directory for " + \
+ "EMERGE_LOG_DIR='%s':\n!!! %s\n" % \
+ (emerge_log_dir, e),
+ noiselevel=-1, level=logging.ERROR)
+ portage.util.ensure_dirs(_emerge.emergelog._emerge_log_dir)
+ else:
+ _emerge.emergelog._emerge_log_dir = emerge_log_dir
+ else:
+ _emerge.emergelog._emerge_log_dir = os.path.join(os.sep,
+ portage.const.EPREFIX.lstrip(os.sep), "var", "log")
+ portage.util.ensure_dirs(_emerge.emergelog._emerge_log_dir)
+
+ if not "--pretend" in emerge_config.opts:
+ time_fmt = "%b %d, %Y %H:%M:%S"
+ if sys.hexversion < 0x3000000:
+ time_fmt = portage._unicode_encode(time_fmt)
+ time_str = time.strftime(time_fmt, time.localtime(time.time()))
+ # Avoid potential UnicodeDecodeError in Python 2, since strftime
+ # returns bytes in Python 2, and %b may contain non-ascii chars.
+ time_str = _unicode_decode(time_str,
+ encoding=_encodings['content'], errors='replace')
+ emergelog(xterm_titles, "Started emerge on: %s" % time_str)
+ myelogstr=""
+ if emerge_config.opts:
+ opt_list = []
+ for opt, arg in emerge_config.opts.items():
+ if arg is True:
+ opt_list.append(opt)
+ elif isinstance(arg, list):
+ # arguments like --exclude that use 'append' action
+ for x in arg:
+ opt_list.append("%s=%s" % (opt, x))
+ else:
+ opt_list.append("%s=%s" % (opt, arg))
+ myelogstr=" ".join(opt_list)
+ if emerge_config.action:
+ myelogstr += " --" + emerge_config.action
+ if oldargs:
+ myelogstr += " " + " ".join(oldargs)
+ emergelog(xterm_titles, " *** emerge " + myelogstr)
+
+ oldargs = None
+
+ def emergeexitsig(signum, frame):
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ portage.util.writemsg(
+ "\n\nExiting on signal %(signal)s\n" % {"signal":signum})
+ sys.exit(128 + signum)
+
+ signal.signal(signal.SIGTERM, emergeexitsig)
+
+ def emergeexit():
+ """This gets out final log message in before we quit."""
+ if "--pretend" not in emerge_config.opts:
+ emergelog(xterm_titles, " *** terminating.")
+ if xterm_titles:
+ xtermTitleReset()
+ portage.atexit_register(emergeexit)
+
+ if emerge_config.action in ("config", "metadata", "regen", "sync"):
+ if "--pretend" in emerge_config.opts:
+ sys.stderr.write(("emerge: The '%s' action does " + \
+ "not support '--pretend'.\n") % emerge_config.action)
+ return 1
+
+ if "sync" == emerge_config.action:
+ return action_sync(emerge_config)
+ elif "metadata" == emerge_config.action:
+ action_metadata(emerge_config.target_config.settings,
+ emerge_config.target_config.trees['porttree'].dbapi,
+ emerge_config.opts)
+ elif emerge_config.action=="regen":
+ validate_ebuild_environment(emerge_config.trees)
+ return action_regen(emerge_config.target_config.settings,
+ emerge_config.target_config.trees['porttree'].dbapi,
+ emerge_config.opts.get("--jobs"),
+ emerge_config.opts.get("--load-average"))
+ # HELP action
+ elif "config" == emerge_config.action:
+ validate_ebuild_environment(emerge_config.trees)
+ action_config(emerge_config.target_config.settings,
+ emerge_config.trees, emerge_config.opts, emerge_config.args)
+
+ # SEARCH action
+ elif "search" == emerge_config.action:
+ validate_ebuild_environment(emerge_config.trees)
+ action_search(emerge_config.target_config,
+ emerge_config.opts, emerge_config.args, spinner)
+
+ elif emerge_config.action in \
+ ('clean', 'depclean', 'deselect', 'prune', 'unmerge'):
+ validate_ebuild_environment(emerge_config.trees)
+ rval = action_uninstall(emerge_config.target_config.settings,
+ emerge_config.trees, emerge_config.target_config.mtimedb["ldpath"],
+ emerge_config.opts, emerge_config.action,
+ emerge_config.args, spinner)
+ if not (emerge_config.action == 'deselect' or
+ buildpkgonly or fetchonly or pretend):
+ post_emerge(emerge_config.action, emerge_config.opts,
+ emerge_config.args, emerge_config.target_config.root,
+ emerge_config.trees, emerge_config.target_config.mtimedb, rval)
+ return rval
+
+ elif emerge_config.action == 'info':
+
+ # Ensure atoms are valid before calling unmerge().
+ vardb = emerge_config.target_config.trees['vartree'].dbapi
+ portdb = emerge_config.target_config.trees['porttree'].dbapi
+ bindb = emerge_config.target_config.trees['bintree'].dbapi
+ valid_atoms = []
+ for x in emerge_config.args:
+ if is_valid_package_atom(x, allow_repo=True):
+ try:
+ #look at the installed files first, if there is no match
+ #look at the ebuilds, since EAPI 4 allows running pkg_info
+ #on non-installed packages
+ valid_atom = dep_expand(x, mydb=vardb)
+ if valid_atom.cp.split("/")[0] == "null":
+ valid_atom = dep_expand(x, mydb=portdb)
+
+ if valid_atom.cp.split("/")[0] == "null" and \
+ "--usepkg" in emerge_config.opts:
+ valid_atom = dep_expand(x, mydb=bindb)
+
+ valid_atoms.append(valid_atom)
+
+ except portage.exception.AmbiguousPackageName as e:
+ msg = "The short ebuild name \"" + x + \
+ "\" is ambiguous. Please specify " + \
+ "one of the following " + \
+ "fully-qualified ebuild names instead:"
+ for line in textwrap.wrap(msg, 70):
+ writemsg_level("!!! %s\n" % (line,),
+ level=logging.ERROR, noiselevel=-1)
+ for i in e.args[0]:
+ writemsg_level(" %s\n" % colorize("INFORM", i),
+ level=logging.ERROR, noiselevel=-1)
+ writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
+ return 1
+ continue
+ msg = []
+ msg.append("'%s' is not a valid package atom." % (x,))
+ msg.append("Please check ebuild(5) for full details.")
+ writemsg_level("".join("!!! %s\n" % line for line in msg),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ return action_info(emerge_config.target_config.settings,
+ emerge_config.trees, emerge_config.opts, valid_atoms)
+
+ # "update", "system", or just process files:
+ else:
+ validate_ebuild_environment(emerge_config.trees)
+
+ for x in emerge_config.args:
+ if x.startswith(SETPREFIX) or \
+ is_valid_package_atom(x, allow_repo=True):
+ continue
+ if x[:1] == os.sep:
+ continue
+ try:
+ os.lstat(x)
+ continue
+ except OSError:
+ pass
+ msg = []
+ msg.append("'%s' is not a valid package atom." % (x,))
+ msg.append("Please check ebuild(5) for full details.")
+ writemsg_level("".join("!!! %s\n" % line for line in msg),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ # GLEP 42 says to display news *after* an emerge --pretend
+ if "--pretend" not in emerge_config.opts:
+ display_news_notification(
+ emerge_config.target_config, emerge_config.opts)
+ retval = action_build(emerge_config.target_config.settings,
+ emerge_config.trees, emerge_config.target_config.mtimedb,
+ emerge_config.opts, emerge_config.action,
+ emerge_config.args, spinner)
+ post_emerge(emerge_config.action, emerge_config.opts,
+ emerge_config.args, emerge_config.target_config.root,
+ emerge_config.trees, emerge_config.target_config.mtimedb, retval)
+
+ return retval
diff --git a/pym/_emerge/chk_updated_cfg_files.py b/pym/_emerge/chk_updated_cfg_files.py
new file mode 100644
index 000000000..9f2ab6f3e
--- /dev/null
+++ b/pym/_emerge/chk_updated_cfg_files.py
@@ -0,0 +1,42 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import logging
+
+import portage
+from portage import os
+from portage.localization import _
+from portage.output import bold, colorize, yellow
+from portage.util import writemsg_level
+
+def chk_updated_cfg_files(eroot, config_protect):
+ target_root = eroot
+ result = list(
+ portage.util.find_updated_config_files(target_root, config_protect))
+
+ for x in result:
+ writemsg_level("\n %s " % (colorize("WARN", "* " + _("IMPORTANT:"))),
+ level=logging.INFO, noiselevel=-1)
+ if not x[1]: # it's a protected file
+ writemsg_level( _("config file '%s' needs updating.\n") % x[0],
+ level=logging.INFO, noiselevel=-1)
+ else: # it's a protected dir
+ if len(x[1]) == 1:
+ head, tail = os.path.split(x[1][0])
+ tail = tail[len("._cfg0000_"):]
+ fpath = os.path.join(head, tail)
+ writemsg_level(_("config file '%s' needs updating.\n") % fpath,
+ level=logging.INFO, noiselevel=-1)
+ else:
+ writemsg_level(
+ _("%d config files in '%s' need updating.\n") % \
+ (len(x[1]), x[0]), level=logging.INFO, noiselevel=-1)
+
+ if result:
+ print(" " + yellow("*") + " See the " +
+ colorize("INFORM", _("CONFIGURATION FILES")) +
+ " " + _("section of the") + " " + bold("emerge"))
+ print(" " + yellow("*") + " " +
+ _("man page to learn how to update config files."))
diff --git a/pym/_emerge/clear_caches.py b/pym/_emerge/clear_caches.py
index 7b7c5eced..513df626f 100644
--- a/pym/_emerge/clear_caches.py
+++ b/pym/_emerge/clear_caches.py
@@ -1,8 +1,7 @@
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import gc
-from portage.util.listdir import dircache
def clear_caches(trees):
for d in trees.values():
@@ -15,5 +14,4 @@ def clear_caches(trees):
pass
else:
d["vartree"].dbapi._linkmap._clear_cache()
- dircache.clear()
gc.collect()
diff --git a/pym/_emerge/countdown.py b/pym/_emerge/countdown.py
index 5abdc8a96..62e3c8dea 100644
--- a/pym/_emerge/countdown.py
+++ b/pym/_emerge/countdown.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2009 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -8,15 +8,15 @@ import time
from portage.output import colorize
-def countdown(secs=5, doing="Starting"):
+
+def countdown(secs=5, doing='Starting'):
if secs:
- print(">>> Waiting",secs,"seconds before starting...")
- print(">>> (Control-C to abort)...\n"+doing+" in: ", end=' ')
- ticks=list(range(secs))
- ticks.reverse()
- for sec in ticks:
- sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
+ print(
+ '>>> Waiting %s seconds before starting...\n'
+ '>>> (Control-C to abort)...\n'
+ '%s in:' % (secs, doing), end='')
+ for sec in range(secs, 0, -1):
+ sys.stdout.write(colorize('UNMERGE_WARN', ' %i' % sec))
sys.stdout.flush()
time.sleep(1)
print()
-
diff --git a/pym/_emerge/create_depgraph_params.py b/pym/_emerge/create_depgraph_params.py
index 2838e93c3..225b792b6 100644
--- a/pym/_emerge/create_depgraph_params.py
+++ b/pym/_emerge/create_depgraph_params.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import logging
@@ -15,11 +15,11 @@ def create_depgraph_params(myopts, myaction):
# complete: completely account for all known dependencies
# remove: build graph for use in removing packages
# rebuilt_binaries: replace installed packages with rebuilt binaries
- # rebuild_if_new_slot_abi: rebuild or reinstall packages when
- # SLOT/ABI := operator dependencies can be satisfied by a newer
- # SLOT/ABI, so that older packages slots will become eligible for
+ # rebuild_if_new_slot: rebuild or reinstall packages when
+ # slot/sub-slot := operator dependencies can be satisfied by a newer
+ # slot/sub-slot, so that older packages slots will become eligible for
# removal by the --depclean action as soon as possible
- # ignore_built_slot_abi_deps: ignore the SLOT/ABI := operator parts
+ # ignore_built_slot_operator_deps: ignore the slot/sub-slot := operator parts
# of dependencies that have been recorded when packages where built
myparams = {"recurse" : True}
@@ -27,9 +27,9 @@ def create_depgraph_params(myopts, myaction):
if bdeps is not None:
myparams["bdeps"] = bdeps
- ignore_built_slot_abi_deps = myopts.get("--ignore-built-slot-abi-deps")
- if ignore_built_slot_abi_deps is not None:
- myparams["ignore_built_slot_abi_deps"] = ignore_built_slot_abi_deps
+ ignore_built_slot_operator_deps = myopts.get("--ignore-built-slot-operator-deps")
+ if ignore_built_slot_operator_deps is not None:
+ myparams["ignore_built_slot_operator_deps"] = ignore_built_slot_operator_deps
dynamic_deps = myopts.get("--dynamic-deps")
if dynamic_deps is not None:
@@ -41,11 +41,12 @@ def create_depgraph_params(myopts, myaction):
myparams["selective"] = True
return myparams
- rebuild_if_new_slot_abi = myopts.get('--rebuild-if-new-slot-abi')
- if rebuild_if_new_slot_abi is not None:
- myparams['rebuild_if_new_slot_abi'] = rebuild_if_new_slot_abi
+ rebuild_if_new_slot = myopts.get('--rebuild-if-new-slot')
+ if rebuild_if_new_slot is not None:
+ myparams['rebuild_if_new_slot'] = rebuild_if_new_slot
if "--update" in myopts or \
+ "--newrepo" in myopts or \
"--newuse" in myopts or \
"--reinstall" in myopts or \
"--noreplace" in myopts or \
diff --git a/pym/_emerge/create_world_atom.py b/pym/_emerge/create_world_atom.py
index 35fb7c4bd..ac994cc04 100644
--- a/pym/_emerge/create_world_atom.py
+++ b/pym/_emerge/create_world_atom.py
@@ -1,7 +1,15 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+import sys
+
from portage.dep import _repo_separator
+from portage.exception import InvalidData
+
+if sys.hexversion >= 0x3000000:
+ _unicode = str
+else:
+ _unicode = unicode
def create_world_atom(pkg, args_set, root_config):
"""Create a new atom for the world file if one does not exist. If the
@@ -35,16 +43,15 @@ def create_world_atom(pkg, args_set, root_config):
for cpv in portdb.match(cp):
for repo in repos:
try:
- available_slots.add(portdb.aux_get(cpv, ["SLOT"],
- myrepo=repo)[0])
- except KeyError:
+ available_slots.add(portdb._pkg_str(_unicode(cpv), repo).slot)
+ except (KeyError, InvalidData):
pass
slotted = len(available_slots) > 1 or \
(len(available_slots) == 1 and "0" not in available_slots)
if not slotted:
# check the vdb in case this is multislot
- available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
+ available_slots = set(vardb._pkg_str(cpv, None).slot \
for cpv in vardb.match(cp))
slotted = len(available_slots) > 1 or \
(len(available_slots) == 1 and "0" not in available_slots)
@@ -83,14 +90,14 @@ def create_world_atom(pkg, args_set, root_config):
matched_slots = set()
if mydb is vardb:
for cpv in matches:
- matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
+ matched_slots.add(mydb._pkg_str(cpv, None).slot)
else:
for cpv in matches:
for repo in repos:
try:
- matched_slots.add(portdb.aux_get(cpv, ["SLOT"],
- myrepo=repo)[0])
- except KeyError:
+ matched_slots.add(
+ portdb._pkg_str(_unicode(cpv), repo).slot)
+ except (KeyError, InvalidData):
pass
if len(matched_slots) == 1:
diff --git a/pym/_emerge/depgraph.py b/pym/_emerge/depgraph.py
index 0f3bc9389..abb70a769 100644
--- a/pym/_emerge/depgraph.py
+++ b/pym/_emerge/depgraph.py
@@ -1,34 +1,38 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from __future__ import print_function
+from __future__ import print_function, unicode_literals
-import difflib
+import collections
import errno
import io
import logging
import stat
import sys
import textwrap
+import warnings
from collections import deque
from itertools import chain
import portage
from portage import os, OrderedDict
from portage import _unicode_decode, _unicode_encode, _encodings
-from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH
+from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH, VCS_DIRS
from portage.dbapi import dbapi
from portage.dbapi.dep_expand import dep_expand
+from portage.dbapi._similar_name_search import similar_name_search
from portage.dep import Atom, best_match_to_list, extract_affecting_use, \
check_required_use, human_readable_required_use, match_from_list, \
_repo_separator
-from portage.dep._slot_abi import ignore_built_slot_abi_deps
-from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use
-from portage.exception import (InvalidAtom, InvalidDependString,
+from portage.dep._slot_operator import ignore_built_slot_operator_deps
+from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use, \
+ _get_eapi_attrs
+from portage.exception import (InvalidAtom, InvalidData, InvalidDependString,
PackageNotFound, PortageException)
from portage.output import colorize, create_color_func, \
darkgreen, green
bad = create_color_func("BAD")
+from portage.package.ebuild.config import _get_feature_flags
from portage.package.ebuild.getmaskingstatus import \
_getmaskingstatus, _MaskReason
from portage._sets import SETPREFIX
@@ -38,13 +42,16 @@ from portage.util import cmp_sort_key, writemsg, writemsg_stdout
from portage.util import ensure_dirs
from portage.util import writemsg_level, write_atomic
from portage.util.digraph import digraph
-from portage.util.listdir import _ignorecvs_dirs
+from portage.util._async.TaskScheduler import TaskScheduler
+from portage.util._eventloop.EventLoop import EventLoop
+from portage.util._eventloop.global_event_loop import global_event_loop
from portage.versions import catpkgsplit
from _emerge.AtomArg import AtomArg
from _emerge.Blocker import Blocker
from _emerge.BlockerCache import BlockerCache
from _emerge.BlockerDepPriority import BlockerDepPriority
+from .chk_updated_cfg_files import chk_updated_cfg_files
from _emerge.countdown import countdown
from _emerge.create_world_atom import create_world_atom
from _emerge.Dependency import Dependency
@@ -52,6 +59,7 @@ from _emerge.DependencyArg import DependencyArg
from _emerge.DepPriority import DepPriority
from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
+from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
from _emerge.FakeVartree import FakeVartree
from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
from _emerge.is_valid_package_atom import insert_category_into_atom, \
@@ -68,9 +76,10 @@ from _emerge.UseFlagDisplay import pkg_use_display
from _emerge.userquery import userquery
from _emerge.resolver.backtracking import Backtracker, BacktrackParameter
+from _emerge.resolver.package_tracker import PackageTracker, PackageTrackerDbapiWrapper
from _emerge.resolver.slot_collision import slot_conflict_handler
from _emerge.resolver.circular_dependency import circular_dependency_handler
-from _emerge.resolver.output import Display
+from _emerge.resolver.output import Display, format_unmatched_atom
if sys.hexversion >= 0x3000000:
basestring = str
@@ -115,8 +124,8 @@ class _frozen_depgraph_config(object):
self._pkg_cache = {}
self._highest_license_masked = {}
dynamic_deps = myopts.get("--dynamic-deps", "y") != "n"
- ignore_built_slot_abi_deps = myopts.get(
- "--ignore-built-slot-abi-deps", "n") == "y"
+ ignore_built_slot_operator_deps = myopts.get(
+ "--ignore-built-slot-operator-deps", "n") == "y"
for myroot in trees:
self.trees[myroot] = {}
# Create a RootConfig instance that references
@@ -132,7 +141,7 @@ class _frozen_depgraph_config(object):
pkg_cache=self._pkg_cache,
pkg_root_config=self.roots[myroot],
dynamic_deps=dynamic_deps,
- ignore_built_slot_abi_deps=ignore_built_slot_abi_deps)
+ ignore_built_slot_operator_deps=ignore_built_slot_operator_deps)
self.pkgsettings[myroot] = portage.config(
clone=self.trees[myroot]["vartree"].settings)
@@ -267,13 +276,12 @@ class _rebuild_config(object):
return True
elif (parent.installed and
root_slot not in self.reinstall_list):
- inst_build_time = parent.metadata.get("BUILD_TIME")
try:
bin_build_time, = bindb.aux_get(parent.cpv,
["BUILD_TIME"])
except KeyError:
continue
- if bin_build_time != inst_build_time:
+ if bin_build_time != _unicode(parent.build_time):
# 2) Remote binary package is valid, and local package
# is not up to date. Force reinstall.
reinstall = True
@@ -335,11 +343,8 @@ class _dynamic_depgraph_config(object):
self.myparams = myparams.copy()
self._vdb_loaded = False
self._allow_backtracking = allow_backtracking
- # Maps slot atom to package for each Package added to the graph.
- self._slot_pkg_map = {}
# Maps nodes to the reasons they were selected for reinstallation.
self._reinstall_nodes = {}
- self.mydbapi = {}
# Contains a filtered view of preferred packages that are selected
# from available repositories.
self._filtered_trees = {}
@@ -374,14 +379,6 @@ class _dynamic_depgraph_config(object):
# This use used to check if we have accounted for blockers
# relevant to a package.
self._traversed_pkg_deps = set()
- # This should be ordered such that the backtracker will
- # attempt to solve conflicts which occurred earlier first,
- # since an earlier conflict can be the cause of a conflict
- # which occurs later.
- self._slot_collision_info = OrderedDict()
- # Slot collision nodes are not allowed to block other packages since
- # blocker validation is only able to account for one package per slot.
- self._slot_collision_nodes = set()
self._parent_atoms = {}
self._slot_conflict_handler = None
self._circular_dependency_handler = None
@@ -412,28 +409,31 @@ class _dynamic_depgraph_config(object):
self._needed_license_changes = backtrack_parameters.needed_license_changes
self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes
self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask
- self._slot_abi_replace_installed = backtrack_parameters.slot_abi_replace_installed
+ self._slot_operator_replace_installed = backtrack_parameters.slot_operator_replace_installed
+ self._prune_rebuilds = backtrack_parameters.prune_rebuilds
self._need_restart = False
# For conditions that always require user intervention, such as
# unsatisfied REQUIRED_USE (currently has no autounmask support).
self._skip_restart = False
self._backtrack_infos = {}
+ self._buildpkgonly_deps_unsatisfied = False
self._autounmask = depgraph._frozen_config.myopts.get('--autounmask') != 'n'
self._success_without_autounmask = False
self._traverse_ignored_deps = False
self._complete_mode = False
- self._slot_abi_deps = {}
+ self._slot_operator_deps = {}
+ self._package_tracker = PackageTracker()
+ # Track missed updates caused by solved conflicts.
+ self._conflict_missed_update = collections.defaultdict(dict)
for myroot in depgraph._frozen_config.trees:
self.sets[myroot] = _depgraph_sets()
- self._slot_pkg_map[myroot] = {}
vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
# This dbapi instance will model the state that the vdb will
# have after new packages have been installed.
- fakedb = PackageVirtualDbapi(vardb.settings)
+ fakedb = PackageTrackerDbapiWrapper(myroot, self._package_tracker)
- self.mydbapi[myroot] = fakedb
def graph_tree():
pass
graph_tree.dbapi = fakedb
@@ -446,6 +446,7 @@ class _dynamic_depgraph_config(object):
self._graph_trees[myroot]["vartree"] = graph_tree
self._graph_trees[myroot]["graph_db"] = graph_tree.dbapi
self._graph_trees[myroot]["graph"] = self.digraph
+ self._graph_trees[myroot]["want_update_pkg"] = depgraph._want_update_pkg
def filtered_tree():
pass
filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
@@ -472,6 +473,7 @@ class _dynamic_depgraph_config(object):
self._filtered_trees[myroot]["graph"] = self.digraph
self._filtered_trees[myroot]["vartree"] = \
depgraph._frozen_config.trees[myroot]["vartree"]
+ self._filtered_trees[myroot]["want_update_pkg"] = depgraph._want_update_pkg
dbs = []
# (db, pkg_type, built, installed, db_keys)
@@ -502,8 +504,6 @@ class depgraph(object):
pkg_tree_map = RootConfig.pkg_tree_map
- _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
-
def __init__(self, settings, trees, myopts, myparams, spinner,
frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False):
if frozen_config is None:
@@ -517,6 +517,9 @@ class depgraph(object):
self._select_atoms = self._select_atoms_highest_available
self._select_package = self._select_pkg_highest_available
+ self._event_loop = (portage._internal_caller and
+ global_event_loop() or EventLoop(main=False))
+
def _load_vdb(self):
"""
Load installed package metadata if appropriate. This used to be called
@@ -535,10 +538,6 @@ class depgraph(object):
preload_installed_pkgs = \
"--nodeps" not in self._frozen_config.myopts
- if self._frozen_config.myopts.get("--root-deps") is not None and \
- myroot != self._frozen_config.target_root:
- continue
-
fake_vartree = self._frozen_config.trees[myroot]["vartree"]
if not fake_vartree.dbapi:
# This needs to be called for the first depgraph, but not for
@@ -552,24 +551,157 @@ class depgraph(object):
if preload_installed_pkgs:
vardb = fake_vartree.dbapi
- fakedb = self._dynamic_config._graph_trees[
- myroot]["vartree"].dbapi
- for pkg in vardb:
- self._spinner_update()
- if dynamic_deps:
- # This causes FakeVartree to update the
- # Package instance dependencies via
- # PackageVirtualDbapi.aux_update()
- vardb.aux_get(pkg.cpv, [])
- fakedb.cpv_inject(pkg)
+ if not dynamic_deps:
+ for pkg in vardb:
+ self._dynamic_config._package_tracker.add_installed_pkg(pkg)
+ else:
+ max_jobs = self._frozen_config.myopts.get("--jobs")
+ max_load = self._frozen_config.myopts.get("--load-average")
+ scheduler = TaskScheduler(
+ self._dynamic_deps_preload(fake_vartree),
+ max_jobs=max_jobs,
+ max_load=max_load,
+ event_loop=fake_vartree._portdb._event_loop)
+ scheduler.start()
+ scheduler.wait()
self._dynamic_config._vdb_loaded = True
+ def _dynamic_deps_preload(self, fake_vartree):
+ portdb = fake_vartree._portdb
+ for pkg in fake_vartree.dbapi:
+ self._spinner_update()
+ self._dynamic_config._package_tracker.add_installed_pkg(pkg)
+ ebuild_path, repo_path = \
+ portdb.findname2(pkg.cpv, myrepo=pkg.repo)
+ if ebuild_path is None:
+ fake_vartree.dynamic_deps_preload(pkg, None)
+ continue
+ metadata, ebuild_hash = portdb._pull_valid_cache(
+ pkg.cpv, ebuild_path, repo_path)
+ if metadata is not None:
+ fake_vartree.dynamic_deps_preload(pkg, metadata)
+ else:
+ proc = EbuildMetadataPhase(cpv=pkg.cpv,
+ ebuild_hash=ebuild_hash,
+ portdb=portdb, repo_path=repo_path,
+ settings=portdb.doebuild_settings)
+ proc.addExitListener(
+ self._dynamic_deps_proc_exit(pkg, fake_vartree))
+ yield proc
+
+ class _dynamic_deps_proc_exit(object):
+
+ __slots__ = ('_pkg', '_fake_vartree')
+
+ def __init__(self, pkg, fake_vartree):
+ self._pkg = pkg
+ self._fake_vartree = fake_vartree
+
+ def __call__(self, proc):
+ metadata = None
+ if proc.returncode == os.EX_OK:
+ metadata = proc.metadata
+ self._fake_vartree.dynamic_deps_preload(self._pkg, metadata)
+
def _spinner_update(self):
if self._frozen_config.spinner:
self._frozen_config.spinner.update()
+ def _compute_abi_rebuild_info(self):
+ """
+ Fill self._forced_rebuilds with packages that cause rebuilds.
+ """
+
+ debug = "--debug" in self._frozen_config.myopts
+
+ # Get all atoms that might have caused a forced rebuild.
+ atoms = {}
+ for s in self._dynamic_config._initial_arg_list:
+ if s.force_reinstall:
+ root = s.root_config.root
+ atoms.setdefault(root, set()).update(s.pset)
+
+ if debug:
+ writemsg_level("forced reinstall atoms:\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+ for root in atoms:
+ writemsg_level(" root: %s\n" % root,
+ level=logging.DEBUG, noiselevel=-1)
+ for atom in atoms[root]:
+ writemsg_level(" atom: %s\n" % atom,
+ level=logging.DEBUG, noiselevel=-1)
+ writemsg_level("\n\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+ # Go through all slot operator deps and check if one of these deps
+ # has a parent that is matched by one of the atoms from above.
+ forced_rebuilds = {}
+ for (root, slot_atom), deps in self._dynamic_config._slot_operator_deps.items():
+ rebuild_atoms = atoms.get(root, set())
+
+ for dep in deps:
+ if getattr(dep.parent, "installed", False) or dep.child.installed or \
+ dep.parent.slot_atom not in rebuild_atoms:
+ continue
+
+ # Make sure the child's slot/subslot has changed. If it hasn't,
+ # then another child has forced this rebuild.
+ installed_pkg = self._select_pkg_from_installed(root, dep.child.slot_atom)[0]
+ if installed_pkg and installed_pkg.slot == dep.child.slot and \
+ installed_pkg.sub_slot == dep.child.sub_slot:
+ continue
+
+ # The child has forced a rebuild of the parent
+ forced_rebuilds.setdefault(root, {}).setdefault(dep.child, set()).add(dep.parent)
+
+ if debug:
+ writemsg_level("slot operator dependencies:\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+ for (root, slot_atom), deps in self._dynamic_config._slot_operator_deps.items():
+ writemsg_level(" (%s, %s)\n" % \
+ (root, slot_atom), level=logging.DEBUG, noiselevel=-1)
+ for dep in deps:
+ writemsg_level(" parent: %s\n" % dep.parent, level=logging.DEBUG, noiselevel=-1)
+ writemsg_level(" child: %s (%s)\n" % (dep.child, dep.priority), level=logging.DEBUG, noiselevel=-1)
+
+ writemsg_level("\n\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+
+ writemsg_level("forced rebuilds:\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+ for root in forced_rebuilds:
+ writemsg_level(" root: %s\n" % root,
+ level=logging.DEBUG, noiselevel=-1)
+ for child in forced_rebuilds[root]:
+ writemsg_level(" child: %s\n" % child,
+ level=logging.DEBUG, noiselevel=-1)
+ for parent in forced_rebuilds[root][child]:
+ writemsg_level(" parent: %s\n" % parent,
+ level=logging.DEBUG, noiselevel=-1)
+ writemsg_level("\n\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+ self._forced_rebuilds = forced_rebuilds
+
+ def _show_abi_rebuild_info(self):
+
+ if not self._forced_rebuilds:
+ return
+
+ writemsg_stdout("\nThe following packages are causing rebuilds:\n\n", noiselevel=-1)
+
+ for root in self._forced_rebuilds:
+ for child in self._forced_rebuilds[root]:
+ writemsg_stdout(" %s causes rebuilds for:\n" % (child,), noiselevel=-1)
+ for parent in self._forced_rebuilds[root][child]:
+ writemsg_stdout(" %s\n" % (parent,), noiselevel=-1)
+
def _show_ignored_binaries(self):
"""
Show binaries that have been ignored because their USE didn't
@@ -583,26 +715,23 @@ class depgraph(object):
for pkg in list(self._dynamic_config.ignored_binaries):
- selected_pkg = self._dynamic_config.mydbapi[pkg.root
- ].match_pkgs(pkg.slot_atom)
+ selected_pkg = list()
- if not selected_pkg:
- continue
+ for selected_pkg in self._dynamic_config._package_tracker.match(
+ pkg.root, pkg.slot_atom):
- selected_pkg = selected_pkg[-1]
- if selected_pkg > pkg:
- self._dynamic_config.ignored_binaries.pop(pkg)
- continue
+ if selected_pkg > pkg:
+ self._dynamic_config.ignored_binaries.pop(pkg)
+ break
- if selected_pkg.installed and \
- selected_pkg.cpv == pkg.cpv and \
- selected_pkg.metadata.get('BUILD_TIME') == \
- pkg.metadata.get('BUILD_TIME'):
- # We don't care about ignored binaries when an
- # identical installed instance is selected to
- # fill the slot.
- self._dynamic_config.ignored_binaries.pop(pkg)
- continue
+ if selected_pkg.installed and \
+ selected_pkg.cpv == pkg.cpv and \
+ selected_pkg.build_time == pkg.build_time:
+ # We don't care about ignored binaries when an
+ # identical installed instance is selected to
+ # fill the slot.
+ self._dynamic_config.ignored_binaries.pop(pkg)
+ break
if not self._dynamic_config.ignored_binaries:
return
@@ -613,11 +742,17 @@ class depgraph(object):
"due to non matching USE:\n\n", noiselevel=-1)
for pkg, flags in self._dynamic_config.ignored_binaries.items():
- writemsg(" =%s" % pkg.cpv, noiselevel=-1)
+ flag_display = []
+ for flag in sorted(flags):
+ if flag not in pkg.use.enabled:
+ flag = "-" + flag
+ flag_display.append(flag)
+ flag_display = " ".join(flag_display)
+ # The user can paste this line into package.use
+ writemsg(" =%s %s" % (pkg.cpv, flag_display), noiselevel=-1)
if pkg.root_config.settings["ROOT"] != "/":
- writemsg(" for %s" % (pkg.root,), noiselevel=-1)
- writemsg("\n use flag(s): %s\n" % ", ".join(sorted(flags)),
- noiselevel=-1)
+ writemsg(" # for %s" % (pkg.root,), noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
msg = [
"",
@@ -631,31 +766,44 @@ class depgraph(object):
line = colorize("INFORM", line)
writemsg(line + "\n", noiselevel=-1)
- def _show_missed_update(self):
+ def _get_missed_updates(self):
# In order to minimize noise, show only the highest
# missed update from each SLOT.
missed_updates = {}
for pkg, mask_reasons in \
- self._dynamic_config._runtime_pkg_mask.items():
+ chain(self._dynamic_config._runtime_pkg_mask.items(),
+ self._dynamic_config._conflict_missed_update.items()):
if pkg.installed:
# Exclude installed here since we only
# want to show available updates.
continue
- chosen_pkg = self._dynamic_config.mydbapi[pkg.root
- ].match_pkgs(pkg.slot_atom)
- if not chosen_pkg or chosen_pkg[-1] >= pkg:
- continue
- k = (pkg.root, pkg.slot_atom)
- if k in missed_updates:
- other_pkg, mask_type, parent_atoms = missed_updates[k]
- if other_pkg > pkg:
- continue
- for mask_type, parent_atoms in mask_reasons.items():
- if not parent_atoms:
- continue
- missed_updates[k] = (pkg, mask_type, parent_atoms)
- break
+ missed_update = True
+ any_selected = False
+ for chosen_pkg in self._dynamic_config._package_tracker.match(
+ pkg.root, pkg.slot_atom):
+ any_selected = True
+ if chosen_pkg > pkg or (not chosen_pkg.installed and \
+ chosen_pkg.version == pkg.version):
+ missed_update = False
+ break
+ if any_selected and missed_update:
+ k = (pkg.root, pkg.slot_atom)
+ if k in missed_updates:
+ other_pkg, mask_type, parent_atoms = missed_updates[k]
+ if other_pkg > pkg:
+ continue
+ for mask_type, parent_atoms in mask_reasons.items():
+ if not parent_atoms:
+ continue
+ missed_updates[k] = (pkg, mask_type, parent_atoms)
+ break
+
+ return missed_updates
+
+ def _show_missed_update(self):
+
+ missed_updates = self._get_missed_updates()
if not missed_updates:
return
@@ -726,7 +874,7 @@ class depgraph(object):
self._show_merge_list()
msg = []
- msg.append("\nWARNING: One or more updates have been " + \
+ msg.append("\nWARNING: One or more updates/rebuilds have been " + \
"skipped due to a dependency conflict:\n\n")
indent = " "
@@ -736,22 +884,29 @@ class depgraph(object):
msg.append(" for %s" % (pkg.root,))
msg.append("\n\n")
- for parent, atom in parent_atoms:
- msg.append(indent)
- msg.append(str(pkg))
+ msg.append(indent)
+ msg.append(str(pkg))
+ msg.append(" conflicts with\n")
- msg.append(" conflicts with\n")
- msg.append(2*indent)
+ for parent, atom in parent_atoms:
if isinstance(parent,
(PackageArg, AtomArg)):
# For PackageArg and AtomArg types, it's
# redundant to display the atom attribute.
+ msg.append(2*indent)
msg.append(str(parent))
+ msg.append("\n")
else:
# Display the specific atom from SetArg or
# Package types.
- msg.append("%s required by %s" % (atom, parent))
- msg.append("\n")
+ atom, marker = format_unmatched_atom(
+ pkg, atom, self._pkg_use_enabled)
+
+ msg.append(2*indent)
+ msg.append("%s required by %s\n" % (atom, parent))
+ msg.append(2*indent)
+ msg.append(marker)
+ msg.append("\n")
msg.append("\n")
writemsg("".join(msg), noiselevel=-1)
@@ -764,7 +919,7 @@ class depgraph(object):
cases.
"""
- if not self._dynamic_config._slot_collision_info:
+ if not any(self._dynamic_config._package_tracker.slot_conflicts()):
return
self._show_merge_list()
@@ -774,7 +929,7 @@ class depgraph(object):
conflict = handler.get_conflict()
writemsg(conflict, noiselevel=-1)
-
+
explanation = handler.get_explanation()
if explanation:
writemsg(explanation, noiselevel=-1)
@@ -813,6 +968,239 @@ class depgraph(object):
writemsg(line + '\n', noiselevel=-1)
writemsg('\n', noiselevel=-1)
+ def _solve_non_slot_operator_slot_conflicts(self):
+ """
+ This function solves slot conflicts which can
+ be solved by simply choosing one of the conflicting
+ and removing all the other ones.
+ It is able to solve somewhat more complex cases where
+ conflicts can only be solved simultaniously.
+ """
+ debug = "--debug" in self._frozen_config.myopts
+
+ # List all conflicts. Ignore those that involve slot operator rebuilds
+ # as the logic there needs special slot conflict behavior which isn't
+ # provided by this function.
+ conflicts = []
+ for conflict in self._dynamic_config._package_tracker.slot_conflicts():
+ slot_key = conflict.root, conflict.atom
+ if slot_key not in self._dynamic_config._slot_operator_replace_installed:
+ conflicts.append(conflict)
+
+ if not conflicts:
+ return
+
+ # Get a set of all conflicting packages.
+ conflict_pkgs = set()
+ for conflict in conflicts:
+ conflict_pkgs.update(conflict)
+
+ # Get the list of other packages which are only
+ # required by conflict packages.
+ indirect_conflict_candidates = set()
+ for pkg in conflict_pkgs:
+ indirect_conflict_candidates.update(self._dynamic_config.digraph.child_nodes(pkg))
+ indirect_conflict_candidates.difference_update(conflict_pkgs)
+
+ indirect_conflict_pkgs = set()
+ while indirect_conflict_candidates:
+ pkg = indirect_conflict_candidates.pop()
+
+ only_conflict_parents = True
+ for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []):
+ if parent not in conflict_pkgs and parent not in indirect_conflict_pkgs:
+ only_conflict_parents = False
+ break
+ if not only_conflict_parents:
+ continue
+
+ indirect_conflict_pkgs.add(pkg)
+ for child in self._dynamic_config.digraph.child_nodes(pkg):
+ if child in conflict_pkgs or child in indirect_conflict_pkgs:
+ continue
+ indirect_conflict_candidates.add(child)
+
+ # Create a graph containing the conflict packages
+ # and a special 'non_conflict_node' that represents
+ # all non-conflict packages.
+ conflict_graph = digraph()
+
+ non_conflict_node = "(non-conflict package)"
+ conflict_graph.add(non_conflict_node, None)
+
+ for pkg in chain(conflict_pkgs, indirect_conflict_pkgs):
+ conflict_graph.add(pkg, None)
+
+ # Add parent->child edges for each conflict package.
+ # Parents, which aren't conflict packages are represented
+ # by 'non_conflict_node'.
+ # If several conflicting packages are matched, but not all,
+ # add a tuple with the matched packages to the graph.
+ class or_tuple(tuple):
+ """
+ Helper class for debug printing.
+ """
+ def __str__(self):
+ return "(%s)" % ",".join(str(pkg) for pkg in self)
+
+ for conflict in conflicts:
+ all_parent_atoms = set()
+ for pkg in conflict:
+ all_parent_atoms.update(
+ self._dynamic_config._parent_atoms.get(pkg, []))
+
+ for parent, atom in all_parent_atoms:
+ is_arg_parent = isinstance(parent, AtomArg)
+
+ if parent not in conflict_pkgs and \
+ parent not in indirect_conflict_pkgs:
+ parent = non_conflict_node
+
+ atom_set = InternalPackageSet(
+ initial_atoms=(atom,), allow_repo=True)
+
+ matched = []
+ for pkg in conflict:
+ if atom_set.findAtomForPackage(pkg, \
+ modified_use=self._pkg_use_enabled(pkg)) and \
+ not (is_arg_parent and pkg.installed):
+ matched.append(pkg)
+ if len(matched) == len(conflict):
+ # All packages match.
+ continue
+ elif len(matched) == 1:
+ conflict_graph.add(matched[0], parent)
+ else:
+ # More than one packages matched, but not all.
+ conflict_graph.add(or_tuple(matched), parent)
+
+ for pkg in indirect_conflict_pkgs:
+ for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []):
+ if parent not in conflict_pkgs and \
+ parent not in indirect_conflict_pkgs:
+ parent = non_conflict_node
+ conflict_graph.add(pkg, parent)
+
+ if debug:
+ writemsg_level(
+ "\n!!! Slot conflict graph:\n",
+ level=logging.DEBUG, noiselevel=-1)
+ conflict_graph.debug_print()
+
+ # Now select required packages. Collect them in the
+ # 'forced' set.
+ forced = set([non_conflict_node])
+ unexplored = set([non_conflict_node])
+ # or_tuples get special handling. We first explore
+ # all packages in the hope of having forced one of
+ # the packages in the tuple. This way we don't have
+ # to choose one.
+ unexplored_tuples = set()
+
+ while unexplored:
+ # Handle all unexplored packages.
+ while unexplored:
+ node = unexplored.pop()
+ for child in conflict_graph.child_nodes(node):
+ if child in forced:
+ continue
+ forced.add(child)
+ if isinstance(child, Package):
+ unexplored.add(child)
+ else:
+ unexplored_tuples.add(child)
+
+ # Now handle unexplored or_tuples. Move on with packages
+ # once we had to choose one.
+ while unexplored_tuples:
+ nodes = unexplored_tuples.pop()
+ if any(node in forced for node in nodes):
+ # At least one of the packages in the
+ # tuple is already forced, which means the
+ # dependency represented by this tuple
+ # is satisfied.
+ continue
+
+ # We now have to choose one of packages in the tuple.
+ # In theory one could solve more conflicts if we'd be
+ # able to try different choices here, but that has lots
+ # of other problems. For now choose the package that was
+ # pulled first, as this should be the most desirable choice
+ # (otherwise it wouldn't have been the first one).
+ forced.add(nodes[0])
+ unexplored.add(nodes[0])
+ break
+
+ # Remove 'non_conflict_node' and or_tuples from 'forced'.
+ forced = set(pkg for pkg in forced if isinstance(pkg, Package))
+ non_forced = set(pkg for pkg in conflict_pkgs if pkg not in forced)
+
+ if debug:
+ writemsg_level(
+ "\n!!! Slot conflict solution:\n",
+ level=logging.DEBUG, noiselevel=-1)
+ for conflict in conflicts:
+ writemsg_level(
+ " Conflict: (%s, %s)\n" % (conflict.root, conflict.atom),
+ level=logging.DEBUG, noiselevel=-1)
+ for pkg in conflict:
+ if pkg in forced:
+ writemsg_level(
+ " keep: %s\n" % pkg,
+ level=logging.DEBUG, noiselevel=-1)
+ else:
+ writemsg_level(
+ " remove: %s\n" % pkg,
+ level=logging.DEBUG, noiselevel=-1)
+
+ broken_packages = set()
+ for pkg in non_forced:
+ for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []):
+ if isinstance(parent, Package) and parent not in non_forced:
+ # Non-forcing set args are expected to be a parent of all
+ # packages in the conflict.
+ broken_packages.add(parent)
+ self._remove_pkg(pkg)
+
+ # Process the dependencies of choosen conflict packages
+ # again to properly account for blockers.
+ broken_packages.update(forced)
+
+ # Filter out broken packages which have been removed during
+ # recursive removal in self._remove_pkg.
+ broken_packages = list(pkg for pkg in broken_packages if pkg in broken_packages \
+ if self._dynamic_config._package_tracker.contains(pkg, installed=False))
+
+ self._dynamic_config._dep_stack.extend(broken_packages)
+
+ if broken_packages:
+ # Process dependencies. This cannot fail because we just ensured that
+ # the remaining packages satisfy all dependencies.
+ self._create_graph()
+
+ # Record missed updates.
+ for conflict in conflicts:
+ if not any(pkg in non_forced for pkg in conflict):
+ continue
+ for pkg in conflict:
+ if pkg not in non_forced:
+ continue
+
+ for other in conflict:
+ if other is pkg:
+ continue
+
+ for parent, atom in self._dynamic_config._parent_atoms.get(other, []):
+ atom_set = InternalPackageSet(
+ initial_atoms=(atom,), allow_repo=True)
+ if not atom_set.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg)):
+ self._dynamic_config._conflict_missed_update[pkg].setdefault(
+ "slot conflict", set())
+ self._dynamic_config._conflict_missed_update[pkg]["slot conflict"].add(
+ (parent, atom))
+
+
def _process_slot_conflicts(self):
"""
If there are any slot conflicts and backtracking is enabled,
@@ -820,16 +1208,21 @@ class depgraph(object):
is called, so that all relevant reverse dependencies are
available for use in backtracking decisions.
"""
- for (slot_atom, root), slot_nodes in \
- self._dynamic_config._slot_collision_info.items():
- self._process_slot_conflict(root, slot_atom, slot_nodes)
- def _process_slot_conflict(self, root, slot_atom, slot_nodes):
+ self._solve_non_slot_operator_slot_conflicts()
+
+ for conflict in self._dynamic_config._package_tracker.slot_conflicts():
+ self._process_slot_conflict(conflict)
+
+ def _process_slot_conflict(self, conflict):
"""
Process slot conflict data to identify specific atoms which
lead to conflict. These atoms only match a subset of the
packages that have been pulled into a given slot.
"""
+ root = conflict.root
+ slot_atom = conflict.atom
+ slot_nodes = conflict.pkgs
debug = "--debug" in self._frozen_config.myopts
@@ -897,21 +1290,13 @@ class depgraph(object):
all_parents, conflict_pkgs):
debug = "--debug" in self._frozen_config.myopts
- existing_node = self._dynamic_config._slot_pkg_map[root][slot_atom]
+ existing_node = next(self._dynamic_config._package_tracker.match(
+ root, slot_atom, installed=False))
+ # In order to avoid a missed update, first mask lower versions
+ # that conflict with higher versions (the backtracker visits
+ # these in reverse order).
+ conflict_pkgs.sort(reverse=True)
backtrack_data = []
- # The ordering of backtrack_data can make
- # a difference here, because both mask actions may lead
- # to valid, but different, solutions and the one with
- # 'existing_node' masked is usually the better one. Because
- # of that, we choose an order such that
- # the backtracker will first explore the choice with
- # existing_node masked. The backtracker reverses the
- # order, so the order it uses is the reverse of the
- # order shown here. See bug #339606.
- if existing_node in conflict_pkgs and \
- existing_node is not conflict_pkgs[-1]:
- conflict_pkgs.remove(existing_node)
- conflict_pkgs.append(existing_node)
for to_be_masked in conflict_pkgs:
# For missed update messages, find out which
# atoms matched to_be_selected that did not
@@ -922,19 +1307,6 @@ class depgraph(object):
if parent_atom not in parent_atoms)
backtrack_data.append((to_be_masked, conflict_atoms))
- if len(backtrack_data) > 1:
- # NOTE: Generally, we prefer to mask the higher
- # version since this solves common cases in which a
- # lower version is needed so that all dependencies
- # will be satisfied (bug #337178). However, if
- # existing_node happens to be installed then we
- # mask that since this is a common case that is
- # triggered when --update is not enabled.
- if existing_node.installed:
- pass
- elif any(pkg > existing_node for pkg in conflict_pkgs):
- backtrack_data.reverse()
-
to_be_masked = backtrack_data[-1][0]
self._dynamic_config._backtrack_infos.setdefault(
@@ -956,7 +1328,7 @@ class depgraph(object):
def _slot_conflict_backtrack_abi(self, pkg, slot_nodes, conflict_atoms):
"""
- If one or more conflict atoms have a SLOT/ABI dep that can be resolved
+ If one or more conflict atoms have a slot/sub-slot dep that can be resolved
by rebuilding the parent package, then schedule the rebuild via
backtracking, and return True. Otherwise, return False.
"""
@@ -964,7 +1336,7 @@ class depgraph(object):
found_update = False
for parent_atom, conflict_pkgs in conflict_atoms.items():
parent, atom = parent_atom
- if atom.slot_abi_op != "=" or not parent.built:
+ if atom.slot_operator != "=" or not parent.built:
continue
if pkg not in conflict_pkgs:
@@ -977,13 +1349,96 @@ class depgraph(object):
dep = Dependency(atom=atom, child=other_pkg,
parent=parent, root=pkg.root)
- if self._slot_abi_update_probe(dep):
- self._slot_abi_update_backtrack(dep)
+ new_dep = \
+ self._slot_operator_update_probe_slot_conflict(dep)
+ if new_dep is not None:
+ self._slot_operator_update_backtrack(dep,
+ new_dep=new_dep)
found_update = True
return found_update
- def _slot_abi_update_backtrack(self, dep, new_child_slot=None):
+ def _slot_change_probe(self, dep):
+ """
+ @rtype: bool
+ @return: True if dep.child should be rebuilt due to a change
+ in sub-slot (without revbump, as in bug #456208).
+ """
+ if not (isinstance(dep.parent, Package) and \
+ not dep.parent.built and dep.child.built):
+ return None
+
+ root_config = self._frozen_config.roots[dep.root]
+ matches = []
+ try:
+ matches.append(self._pkg(dep.child.cpv, "ebuild",
+ root_config, myrepo=dep.child.repo))
+ except PackageNotFound:
+ pass
+
+ for unbuilt_child in chain(matches,
+ self._iter_match_pkgs(root_config, "ebuild",
+ Atom("=%s" % (dep.child.cpv,)))):
+ if unbuilt_child in self._dynamic_config._runtime_pkg_mask:
+ continue
+ if self._frozen_config.excluded_pkgs.findAtomForPackage(
+ unbuilt_child,
+ modified_use=self._pkg_use_enabled(unbuilt_child)):
+ continue
+ if not self._pkg_visibility_check(unbuilt_child):
+ continue
+ break
+ else:
+ return None
+
+ if unbuilt_child.slot == dep.child.slot and \
+ unbuilt_child.sub_slot == dep.child.sub_slot:
+ return None
+
+ return unbuilt_child
+
+ def _slot_change_backtrack(self, dep, new_child_slot):
+ child = dep.child
+ if "--debug" in self._frozen_config.myopts:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("backtracking due to slot/sub-slot change:")
+ msg.append(" child package: %s" % child)
+ msg.append(" child slot: %s/%s" %
+ (child.slot, child.sub_slot))
+ msg.append(" new child: %s" % new_child_slot)
+ msg.append(" new child slot: %s/%s" %
+ (new_child_slot.slot, new_child_slot.sub_slot))
+ msg.append(" parent package: %s" % dep.parent)
+ msg.append(" atom: %s" % dep.atom)
+ msg.append("")
+ writemsg_level("\n".join(msg),
+ noiselevel=-1, level=logging.DEBUG)
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ config = backtrack_infos.setdefault("config", {})
+
+ # mask unwanted binary packages if necessary
+ masks = {}
+ if not child.installed:
+ masks.setdefault(dep.child, {})["slot_operator_mask_built"] = None
+ if masks:
+ config.setdefault("slot_operator_mask_built", {}).update(masks)
+
+ # trigger replacement of installed packages if necessary
+ reinstalls = set()
+ if child.installed:
+ replacement_atom = self._replace_installed_atom(child)
+ if replacement_atom is not None:
+ reinstalls.add((child.root, replacement_atom))
+ if reinstalls:
+ config.setdefault("slot_operator_replace_installed",
+ set()).update(reinstalls)
+
+ self._dynamic_config._need_restart = True
+
+ def _slot_operator_update_backtrack(self, dep, new_child_slot=None,
+ new_dep=None):
if new_child_slot is None:
child = dep.child
else:
@@ -997,6 +1452,8 @@ class depgraph(object):
if new_child_slot is not None:
msg.append(" new child slot package: %s" % new_child_slot)
msg.append(" parent package: %s" % dep.parent)
+ if new_dep is not None:
+ msg.append(" new parent pkg: %s" % new_dep.parent)
msg.append(" atom: %s" % dep.atom)
msg.append("")
writemsg_level("\n".join(msg),
@@ -1008,28 +1465,54 @@ class depgraph(object):
abi_masks = {}
if new_child_slot is None:
if not child.installed:
- abi_masks.setdefault(child, {})["slot_abi_mask_built"] = None
+ abi_masks.setdefault(child, {})["slot_operator_mask_built"] = None
if not dep.parent.installed:
- abi_masks.setdefault(dep.parent, {})["slot_abi_mask_built"] = None
+ abi_masks.setdefault(dep.parent, {})["slot_operator_mask_built"] = None
if abi_masks:
- config.setdefault("slot_abi_mask_built", {}).update(abi_masks)
+ config.setdefault("slot_operator_mask_built", {}).update(abi_masks)
# trigger replacement of installed packages if necessary
abi_reinstalls = set()
if dep.parent.installed:
- abi_reinstalls.add((dep.parent.root, dep.parent.slot_atom))
+ if new_dep is not None:
+ replacement_atom = new_dep.parent.slot_atom
+ else:
+ replacement_atom = self._replace_installed_atom(dep.parent)
+ if replacement_atom is not None:
+ abi_reinstalls.add((dep.parent.root, replacement_atom))
if new_child_slot is None and child.installed:
- abi_reinstalls.add((child.root, child.slot_atom))
+ replacement_atom = self._replace_installed_atom(child)
+ if replacement_atom is not None:
+ abi_reinstalls.add((child.root, replacement_atom))
if abi_reinstalls:
- config.setdefault("slot_abi_replace_installed",
+ config.setdefault("slot_operator_replace_installed",
set()).update(abi_reinstalls)
self._dynamic_config._need_restart = True
- def _slot_abi_update_probe(self, dep, new_child_slot=False):
+ def _slot_operator_update_probe_slot_conflict(self, dep):
+ new_dep = self._slot_operator_update_probe(dep, slot_conflict=True)
+
+ if new_dep is not None:
+ return new_dep
+
+ if self._dynamic_config._autounmask is True:
+
+ for autounmask_level in self._autounmask_levels():
+
+ new_dep = self._slot_operator_update_probe(dep,
+ slot_conflict=True, autounmask_level=autounmask_level)
+
+ if new_dep is not None:
+ return new_dep
+
+ return None
+
+ def _slot_operator_update_probe(self, dep, new_child_slot=False,
+ slot_conflict=False, autounmask_level=None):
"""
- SLOT/ABI := operators tend to prevent updates from getting pulled in,
- since installed packages pull in packages with the SLOT/ABI that they
+ slot/sub-slot := operators tend to prevent updates from getting pulled in,
+ since installed packages pull in packages with the slot/sub-slot that they
were built against. Detect this case so that we can schedule rebuilds
and reinstalls when appropriate.
NOTE: This function only searches for updates that involve upgrades
@@ -1048,20 +1531,70 @@ class depgraph(object):
return None
debug = "--debug" in self._frozen_config.myopts
+ selective = "selective" in self._dynamic_config.myparams
want_downgrade = None
+ def check_reverse_dependencies(existing_pkg, candidate_pkg):
+ """
+ Check if candidate_pkg satisfies all of existing_pkg's non-
+ slot operator parents.
+ """
+ for parent, atom in self._dynamic_config._parent_atoms.get(existing_pkg, []):
+ if atom.slot_operator == "=" and parent.built:
+ continue
+
+ atom_set = InternalPackageSet(initial_atoms=(atom,),
+ allow_repo=True)
+ if not atom_set.findAtomForPackage(candidate_pkg,
+ modified_use=self._pkg_use_enabled(candidate_pkg)):
+ return False
+ return True
+
+
for replacement_parent in self._iter_similar_available(dep.parent,
- dep.parent.slot_atom):
+ dep.parent.slot_atom, autounmask_level=autounmask_level):
- for atom in replacement_parent.validated_atoms:
- if not atom.slot_abi_op == "=" or \
- atom.blocker or \
+ if not check_reverse_dependencies(dep.parent, replacement_parent):
+ continue
+
+ selected_atoms = None
+
+ atoms = set()
+ invalid_metadata = False
+ for dep_key in ("DEPEND", "HDEPEND", "RDEPEND", "PDEPEND"):
+ dep_string = replacement_parent._metadata[dep_key]
+ if not dep_string:
+ continue
+
+ try:
+ dep_string = portage.dep.use_reduce(dep_string,
+ uselist=self._pkg_use_enabled(replacement_parent),
+ is_valid_flag=replacement_parent.iuse.is_valid_flag,
+ flat=True, token_class=Atom,
+ eapi=replacement_parent.eapi)
+ except portage.exception.InvalidDependString:
+ invalid_metadata = True
+ break
+
+ atoms.update(token for token in dep_string if isinstance(token, Atom))
+
+ if invalid_metadata:
+ continue
+
+ # List of list of child,atom pairs for each atom.
+ replacement_candidates = []
+ # Set of all packages all atoms can agree on.
+ all_candidate_pkgs = None
+
+ for atom in atoms:
+ if atom.blocker or \
atom.cp != dep.atom.cp:
continue
# Discard USE deps, we're only searching for an approximate
# pattern, and dealing with USE states is too complex for
# this purpose.
+ unevaluated_atom = atom.unevaluated_atom
atom = atom.without_use
if replacement_parent.built and \
@@ -1071,11 +1604,13 @@ class depgraph(object):
# parent and search for another.
break
+ candidate_pkg_atoms = []
+ candidate_pkgs = []
for pkg in self._iter_similar_available(
dep.child, atom):
if pkg.slot == dep.child.slot and \
- pkg.slot_abi == dep.child.slot_abi:
- # If SLOT/ABI is identical, then there's
+ pkg.sub_slot == dep.child.sub_slot:
+ # If slot/sub-slot is identical, then there's
# no point in updating.
continue
if new_child_slot:
@@ -1093,39 +1628,192 @@ class depgraph(object):
want_downgrade = self._downgrade_probe(dep.child)
# be careful not to trigger a rebuild when
# the only version available with a
- # different slot_abi is an older version
+ # different slot_operator is an older version
if not want_downgrade:
continue
+ insignificant = False
+ if not slot_conflict and \
+ selective and \
+ dep.parent.installed and \
+ dep.child.installed and \
+ dep.parent >= replacement_parent and \
+ dep.child.cpv == pkg.cpv:
+ # Then can happen if the child's sub-slot changed
+ # without a revision bump. The sub-slot change is
+ # considered insignificant until one of its parent
+ # packages needs to be rebuilt (which may trigger a
+ # slot conflict).
+ insignificant = True
+
+ if not insignificant:
+ # Evaluate USE conditionals and || deps, in order
+ # to see if this atom is really desirable, since
+ # otherwise we may trigger an undesirable rebuild
+ # as in bug #460304.
+ if selected_atoms is None:
+ selected_atoms = self._select_atoms_probe(
+ dep.child.root, replacement_parent)
+ if unevaluated_atom not in selected_atoms:
+ continue
+
+ if not insignificant and \
+ check_reverse_dependencies(dep.child, pkg):
+
+ candidate_pkg_atoms.append((pkg, unevaluated_atom))
+ candidate_pkgs.append(pkg)
+ replacement_candidates.append(candidate_pkg_atoms)
+ if all_candidate_pkgs is None:
+ all_candidate_pkgs = set(candidate_pkgs)
+ else:
+ all_candidate_pkgs.intersection_update(candidate_pkgs)
+
+ if not all_candidate_pkgs:
+ # If the atoms that connect parent and child can't agree on
+ # any replacement child, we can't do anything.
+ continue
+
+ # Now select one of the pkgs as replacement. This is as easy as
+ # selecting the highest version.
+ # The more complicated part is to choose an atom for the
+ # new Dependency object. Choose the one which ranked the selected
+ # parent highest.
+ selected = None
+ for candidate_pkg_atoms in replacement_candidates:
+ for i, (pkg, atom) in enumerate(candidate_pkg_atoms):
+ if pkg not in all_candidate_pkgs:
+ continue
+ if selected is None or \
+ selected[0] < pkg or \
+ (selected[0] is pkg and i < selected[2]):
+ selected = (pkg, atom, i)
+
+ if debug:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("slot_operator_update_probe:")
+ msg.append(" existing child package: %s" % dep.child)
+ msg.append(" existing parent package: %s" % dep.parent)
+ msg.append(" new child package: %s" % selected[0])
+ msg.append(" new parent package: %s" % replacement_parent)
+ msg.append("")
+ writemsg_level("\n".join(msg),
+ noiselevel=-1, level=logging.DEBUG)
+
+ return Dependency(parent=replacement_parent,
+ child=selected[0], atom=selected[1])
+
+ if debug:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("slot_operator_update_probe:")
+ msg.append(" existing child package: %s" % dep.child)
+ msg.append(" existing parent package: %s" % dep.parent)
+ msg.append(" new child package: %s" % None)
+ msg.append(" new parent package: %s" % None)
+ msg.append("")
+ writemsg_level("\n".join(msg),
+ noiselevel=-1, level=logging.DEBUG)
+
+ return None
+
+ def _slot_operator_unsatisfied_probe(self, dep):
+
+ if dep.parent.installed and \
+ self._frozen_config.excluded_pkgs.findAtomForPackage(dep.parent,
+ modified_use=self._pkg_use_enabled(dep.parent)):
+ return False
+
+ debug = "--debug" in self._frozen_config.myopts
+
+ for replacement_parent in self._iter_similar_available(dep.parent,
+ dep.parent.slot_atom):
+
+ for atom in replacement_parent.validated_atoms:
+ if not atom.slot_operator == "=" or \
+ atom.blocker or \
+ atom.cp != dep.atom.cp:
+ continue
+
+ # Discard USE deps, we're only searching for an approximate
+ # pattern, and dealing with USE states is too complex for
+ # this purpose.
+ atom = atom.without_use
+
+ pkg, existing_node = self._select_package(dep.root, atom,
+ onlydeps=dep.onlydeps)
+
+ if pkg is not None:
+
if debug:
msg = []
msg.append("")
msg.append("")
- msg.append("slot_abi_update_probe:")
- msg.append(" existing child package: %s" % dep.child)
+ msg.append("slot_operator_unsatisfied_probe:")
msg.append(" existing parent package: %s" % dep.parent)
- msg.append(" new child package: %s" % pkg)
+ msg.append(" existing parent atom: %s" % dep.atom)
msg.append(" new parent package: %s" % replacement_parent)
+ msg.append(" new child package: %s" % pkg)
msg.append("")
writemsg_level("\n".join(msg),
noiselevel=-1, level=logging.DEBUG)
- return pkg
+ return True
if debug:
msg = []
msg.append("")
msg.append("")
- msg.append("slot_abi_update_probe:")
- msg.append(" existing child package: %s" % dep.child)
+ msg.append("slot_operator_unsatisfied_probe:")
msg.append(" existing parent package: %s" % dep.parent)
- msg.append(" new child package: %s" % None)
+ msg.append(" existing parent atom: %s" % dep.atom)
msg.append(" new parent package: %s" % None)
+ msg.append(" new child package: %s" % None)
msg.append("")
writemsg_level("\n".join(msg),
noiselevel=-1, level=logging.DEBUG)
- return None
+ return False
+
+ def _slot_operator_unsatisfied_backtrack(self, dep):
+
+ parent = dep.parent
+
+ if "--debug" in self._frozen_config.myopts:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("backtracking due to unsatisfied "
+ "built slot-operator dep:")
+ msg.append(" parent package: %s" % parent)
+ msg.append(" atom: %s" % dep.atom)
+ msg.append("")
+ writemsg_level("\n".join(msg),
+ noiselevel=-1, level=logging.DEBUG)
+
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ config = backtrack_infos.setdefault("config", {})
+
+ # mask unwanted binary packages if necessary
+ masks = {}
+ if not parent.installed:
+ masks.setdefault(parent, {})["slot_operator_mask_built"] = None
+ if masks:
+ config.setdefault("slot_operator_mask_built", {}).update(masks)
+
+ # trigger replacement of installed packages if necessary
+ reinstalls = set()
+ if parent.installed:
+ replacement_atom = self._replace_installed_atom(parent)
+ if replacement_atom is not None:
+ reinstalls.add((parent.root, replacement_atom))
+ if reinstalls:
+ config.setdefault("slot_operator_replace_installed",
+ set()).update(reinstalls)
+
+ self._dynamic_config._need_restart = True
def _downgrade_probe(self, pkg):
"""
@@ -1142,7 +1830,19 @@ class depgraph(object):
return available_pkg is not None
- def _iter_similar_available(self, graph_pkg, atom):
+ def _select_atoms_probe(self, root, pkg):
+ selected_atoms = []
+ use = self._pkg_use_enabled(pkg)
+ for k in pkg._dep_keys:
+ v = pkg._metadata.get(k)
+ if not v:
+ continue
+ selected_atoms.extend(self._select_atoms(
+ root, v, myuse=use, parent=pkg)[pkg])
+ return frozenset(x.unevaluated_atom for
+ x in selected_atoms)
+
+ def _iter_similar_available(self, graph_pkg, atom, autounmask_level=None):
"""
Given a package that's in the graph, do a rough check to
see if a similar package is available to install. The given
@@ -1166,49 +1866,91 @@ class depgraph(object):
if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
modified_use=self._pkg_use_enabled(pkg)):
continue
- if not self._pkg_visibility_check(pkg):
- continue
if pkg.built:
if self._equiv_binary_installed(pkg):
continue
if not (not use_ebuild_visibility and
(usepkgonly or useoldpkg_atoms.findAtomForPackage(
pkg, modified_use=self._pkg_use_enabled(pkg)))) and \
- not self._equiv_ebuild_visible(pkg):
+ not self._equiv_ebuild_visible(pkg,
+ autounmask_level=autounmask_level):
continue
+ if not self._pkg_visibility_check(pkg,
+ autounmask_level=autounmask_level):
+ continue
yield pkg
- def _slot_abi_trigger_reinstalls(self):
+ def _replace_installed_atom(self, inst_pkg):
+ """
+ Given an installed package, generate an atom suitable for
+ slot_operator_replace_installed backtracking info. The replacement
+ SLOT may differ from the installed SLOT, so first search by cpv.
"""
- Search for packages with slot-abi deps on older slots, and schedule
+ built_pkgs = []
+ for pkg in self._iter_similar_available(inst_pkg,
+ Atom("=%s" % inst_pkg.cpv)):
+ if not pkg.built:
+ return pkg.slot_atom
+ elif not pkg.installed:
+ # avoid using SLOT from a built instance
+ built_pkgs.append(pkg)
+
+ for pkg in self._iter_similar_available(inst_pkg, inst_pkg.slot_atom):
+ if not pkg.built:
+ return pkg.slot_atom
+ elif not pkg.installed:
+ # avoid using SLOT from a built instance
+ built_pkgs.append(pkg)
+
+ if built_pkgs:
+ best_version = None
+ for pkg in built_pkgs:
+ if best_version is None or pkg > best_version:
+ best_version = pkg
+ return best_version.slot_atom
+
+ return None
+
+ def _slot_operator_trigger_reinstalls(self):
+ """
+ Search for packages with slot-operator deps on older slots, and schedule
rebuilds if they can link to a newer slot that's in the graph.
"""
- rebuild_if_new_slot_abi = self._dynamic_config.myparams.get(
- "rebuild_if_new_slot_abi", "y") == "y"
+ rebuild_if_new_slot = self._dynamic_config.myparams.get(
+ "rebuild_if_new_slot", "y") == "y"
- for slot_key, slot_info in self._dynamic_config._slot_abi_deps.items():
+ for slot_key, slot_info in self._dynamic_config._slot_operator_deps.items():
for dep in slot_info:
- if not (dep.child.built and dep.parent and
+
+ atom = dep.atom
+ if atom.slot_operator is None:
+ continue
+
+ if not atom.slot_operator_built:
+ new_child_slot = self._slot_change_probe(dep)
+ if new_child_slot is not None:
+ self._slot_change_backtrack(dep, new_child_slot)
+ continue
+
+ if not (dep.parent and
isinstance(dep.parent, Package) and dep.parent.built):
continue
# Check for slot update first, since we don't want to
# trigger reinstall of the child package when a newer
# slot will be used instead.
- if rebuild_if_new_slot_abi:
- new_child = self._slot_abi_update_probe(dep,
+ if rebuild_if_new_slot:
+ new_dep = self._slot_operator_update_probe(dep,
new_child_slot=True)
- if new_child:
- self._slot_abi_update_backtrack(dep,
- new_child_slot=new_child)
- break
+ if new_dep is not None:
+ self._slot_operator_update_backtrack(dep,
+ new_child_slot=new_dep.child)
if dep.want_update:
- if self._slot_abi_update_probe(dep):
- self._slot_abi_update_backtrack(dep)
- break
+ if self._slot_operator_update_probe(dep):
+ self._slot_operator_update_backtrack(dep)
def _reinstall_for_flags(self, pkg, forced_flags,
orig_use, orig_iuse, cur_use, cur_iuse):
@@ -1222,18 +1964,22 @@ class depgraph(object):
in ("y", "auto"))
newuse = "--newuse" in self._frozen_config.myopts
changed_use = "changed-use" == self._frozen_config.myopts.get("--reinstall")
+ feature_flags = _get_feature_flags(
+ _get_eapi_attrs(pkg.eapi))
if newuse or (binpkg_respect_use and not changed_use):
flags = set(orig_iuse.symmetric_difference(
cur_iuse).difference(forced_flags))
flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
cur_iuse.intersection(cur_use)))
+ flags.difference_update(feature_flags)
if flags:
return flags
elif changed_use or binpkg_respect_use:
- flags = orig_iuse.intersection(orig_use).symmetric_difference(
- cur_iuse.intersection(cur_use))
+ flags = set(orig_iuse.intersection(orig_use).symmetric_difference(
+ cur_iuse.intersection(cur_use)))
+ flags.difference_update(feature_flags)
if flags:
return flags
return None
@@ -1319,11 +2065,16 @@ class depgraph(object):
buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
nodeps = "--nodeps" in self._frozen_config.myopts
if dep.blocker:
+
+ # Slot collision nodes are not allowed to block other packages since
+ # blocker validation is only able to account for one package per slot.
+ is_slot_conflict_parent = any(dep.parent in conflict.pkgs[1:] for conflict in \
+ self._dynamic_config._package_tracker.slot_conflicts())
if not buildpkgonly and \
not nodeps and \
not dep.collapsed_priority.ignored and \
not dep.collapsed_priority.optional and \
- dep.parent not in self._dynamic_config._slot_collision_nodes:
+ not is_slot_conflict_parent:
if dep.parent.onlydeps:
# It's safe to ignore blockers if the
# parent is an --onlydeps node.
@@ -1331,7 +2082,7 @@ class depgraph(object):
# The blocker applies to the root where
# the parent is or will be installed.
blocker = Blocker(atom=dep.atom,
- eapi=dep.parent.metadata["EAPI"],
+ eapi=dep.parent.eapi,
priority=dep.priority, root=dep.parent.root)
self._dynamic_config._blocker_parents.add(blocker, dep.parent)
return 1
@@ -1343,8 +2094,8 @@ class depgraph(object):
# The caller has selected a specific package
# via self._minimize_packages().
dep_pkg = dep.child
- existing_node = self._dynamic_config._slot_pkg_map[
- dep.root].get(dep_pkg.slot_atom)
+ existing_node = next(self._dynamic_config._package_tracker.match(
+ dep.root, dep_pkg.slot_atom, installed=False), None)
if not dep_pkg:
if (dep.collapsed_priority.optional or
@@ -1368,9 +2119,17 @@ class depgraph(object):
(dep.parent,
self._dynamic_config._runtime_pkg_mask[
dep.parent]), noiselevel=-1)
- elif not self.need_restart():
+ elif dep.atom.slot_operator_built and \
+ self._slot_operator_unsatisfied_probe(dep):
+ self._slot_operator_unsatisfied_backtrack(dep)
+ return 1
+ else:
# Do not backtrack if only USE have to be changed in
- # order to satisfy the dependency.
+ # order to satisfy the dependency. Note that when
+ # want_restart_for_use_change sets the need_restart
+ # flag, it causes _select_pkg_highest_available to
+ # return None, and eventually we come through here
+ # and skip the "missing dependency" backtracking path.
dep_pkg, existing_node = \
self._select_package(dep.root, dep.atom.without_use,
onlydeps=dep.onlydeps)
@@ -1401,7 +2160,9 @@ class depgraph(object):
return 1
def _check_slot_conflict(self, pkg, atom):
- existing_node = self._dynamic_config._slot_pkg_map[pkg.root].get(pkg.slot_atom)
+ existing_node = next(self._dynamic_config._package_tracker.match(
+ pkg.root, pkg.slot_atom, installed=False), None)
+
matches = None
if existing_node:
matches = pkg.cpv == existing_node.cpv
@@ -1477,12 +2238,13 @@ class depgraph(object):
# package selection, since we want to prompt the user
# for USE adjustment rather than have REQUIRED_USE
# affect package selection and || dep choices.
- if not pkg.built and pkg.metadata.get("REQUIRED_USE") and \
- eapi_has_required_use(pkg.metadata["EAPI"]):
+ if not pkg.built and pkg._metadata.get("REQUIRED_USE") and \
+ eapi_has_required_use(pkg.eapi):
required_use_is_sat = check_required_use(
- pkg.metadata["REQUIRED_USE"],
+ pkg._metadata["REQUIRED_USE"],
self._pkg_use_enabled(pkg),
- pkg.iuse.is_valid_flag)
+ pkg.iuse.is_valid_flag,
+ eapi=pkg.eapi)
if not required_use_is_sat:
if dep.atom is not None and dep.parent is not None:
self._add_parent_atom(pkg, (dep.parent, dep.atom))
@@ -1505,30 +2267,29 @@ class depgraph(object):
existing_node, existing_node_matches = \
self._check_slot_conflict(pkg, dep.atom)
- slot_collision = False
if existing_node:
if existing_node_matches:
# The existing node can be reused.
- if arg_atoms:
- for parent_atom in arg_atoms:
- parent, atom = parent_atom
- self._dynamic_config.digraph.add(existing_node, parent,
- priority=priority)
- self._add_parent_atom(existing_node, parent_atom)
- # If a direct circular dependency is not an unsatisfied
- # buildtime dependency then drop it here since otherwise
- # it can skew the merge order calculation in an unwanted
- # way.
- if existing_node != myparent or \
- (priority.buildtime and not priority.satisfied):
- self._dynamic_config.digraph.addnode(existing_node, myparent,
- priority=priority)
- if dep.atom is not None and dep.parent is not None:
- self._add_parent_atom(existing_node,
- (dep.parent, dep.atom))
- return 1
+ if pkg != existing_node:
+ pkg = existing_node
+ previously_added = True
+ try:
+ arg_atoms = list(self._iter_atoms_for_pkg(pkg))
+ except InvalidDependString as e:
+ if not pkg.installed:
+ # should have been masked before
+ # it was selected
+ raise
+
+ if debug:
+ writemsg_level(
+ "%s%s %s\n" % ("Re-used Child:".ljust(15),
+ pkg, pkg_use_display(pkg,
+ self._frozen_config.myopts,
+ modified_use=self._pkg_use_enabled(pkg))),
+ level=logging.DEBUG, noiselevel=-1)
+
else:
- self._add_slot_conflict(pkg)
if debug:
writemsg_level(
"%s%s %s\n" % ("Slot Conflict:".ljust(15),
@@ -1537,23 +2298,8 @@ class depgraph(object):
modified_use=self._pkg_use_enabled(existing_node))),
level=logging.DEBUG, noiselevel=-1)
- slot_collision = True
-
- if slot_collision:
- # Now add this node to the graph so that self.display()
- # can show use flags and --tree portage.output. This node is
- # only being partially added to the graph. It must not be
- # allowed to interfere with the other nodes that have been
- # added. Do not overwrite data for existing nodes in
- # self._dynamic_config.mydbapi since that data will be used for blocker
- # validation.
- # Even though the graph is now invalid, continue to process
- # dependencies so that things like --fetchonly can still
- # function despite collisions.
- pass
- elif not previously_added:
- self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
- self._dynamic_config.mydbapi[pkg.root].cpv_inject(pkg)
+ if not previously_added:
+ self._dynamic_config._package_tracker.add_pkg(pkg)
self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
self._dynamic_config._highest_pkg_cache.clear()
self._check_masks(pkg)
@@ -1563,11 +2309,11 @@ class depgraph(object):
# doesn't already. Any pre-existing providers will be preferred
# over this one.
try:
- pkgsettings.setinst(pkg.cpv, pkg.metadata)
+ pkgsettings.setinst(pkg.cpv, pkg._metadata)
# For consistency, also update the global virtuals.
settings = self._frozen_config.roots[pkg.root].settings
settings.unlock()
- settings.setinst(pkg.cpv, pkg.metadata)
+ settings.setinst(pkg.cpv, pkg._metadata)
settings.lock()
except portage.exception.InvalidDependString:
if not pkg.installed:
@@ -1577,12 +2323,19 @@ class depgraph(object):
if arg_atoms:
self._dynamic_config._set_nodes.add(pkg)
- # Do this even when addme is False (--onlydeps) so that the
+ # Do this even for onlydeps, so that the
# parent/child relationship is always known in case
# self._show_slot_collision_notice() needs to be called later.
- self._dynamic_config.digraph.add(pkg, myparent, priority=priority)
- if dep.atom is not None and dep.parent is not None:
- self._add_parent_atom(pkg, (dep.parent, dep.atom))
+ # If a direct circular dependency is not an unsatisfied
+ # buildtime dependency then drop it here since otherwise
+ # it can skew the merge order calculation in an unwanted
+ # way.
+ if pkg != dep.parent or \
+ (priority.buildtime and not priority.satisfied):
+ self._dynamic_config.digraph.add(pkg,
+ dep.parent, priority=priority)
+ if dep.atom is not None and dep.parent is not None:
+ self._add_parent_atom(pkg, (dep.parent, dep.atom))
if arg_atoms:
for parent_atom in arg_atoms:
@@ -1612,9 +2365,9 @@ class depgraph(object):
not (deep is not True and depth > deep))
dep.child = pkg
- if (not pkg.onlydeps and pkg.built and
- dep.atom and dep.atom.slot_abi_built):
- self._add_slot_abi_dep(dep)
+ if (not pkg.onlydeps and
+ dep.atom and dep.atom.slot_operator is not None):
+ self._add_slot_operator_dep(dep)
recurse = deep is True or depth + 1 <= deep
dep_stack = self._dynamic_config._dep_stack
@@ -1629,6 +2382,64 @@ class depgraph(object):
dep_stack.append(pkg)
return 1
+
+ def _remove_pkg(self, pkg):
+ """
+ Remove a package and all its then parentless digraph
+ children from all depgraph datastructures.
+ """
+ debug = "--debug" in self._frozen_config.myopts
+ if debug:
+ writemsg_level(
+ "Removing package: %s\n" % pkg,
+ level=logging.DEBUG, noiselevel=-1)
+
+ try:
+ children = [child for child in self._dynamic_config.digraph.child_nodes(pkg) \
+ if child is not pkg]
+ self._dynamic_config.digraph.remove(pkg)
+ except KeyError:
+ children = []
+
+ self._dynamic_config._package_tracker.discard_pkg(pkg)
+
+ self._dynamic_config._parent_atoms.pop(pkg, None)
+ self._dynamic_config._set_nodes.discard(pkg)
+
+ for child in children:
+ try:
+ self._dynamic_config._parent_atoms[child] = set((parent, atom) \
+ for (parent, atom) in self._dynamic_config._parent_atoms[child] \
+ if parent is not pkg)
+ except KeyError:
+ pass
+
+ # Remove slot operator dependencies.
+ slot_key = (pkg.root, pkg.slot_atom)
+ if slot_key in self._dynamic_config._slot_operator_deps:
+ self._dynamic_config._slot_operator_deps[slot_key] = \
+ [dep for dep in self._dynamic_config._slot_operator_deps[slot_key] \
+ if dep.child is not pkg]
+ if not self._dynamic_config._slot_operator_deps[slot_key]:
+ del self._dynamic_config._slot_operator_deps[slot_key]
+
+ # Remove blockers.
+ self._dynamic_config._blocker_parents.discard(pkg)
+ self._dynamic_config._irrelevant_blockers.discard(pkg)
+ self._dynamic_config._unsolvable_blockers.discard(pkg)
+ self._dynamic_config._blocked_pkgs.discard(pkg)
+ self._dynamic_config._blocked_world_pkgs.pop(pkg, None)
+
+ for child in children:
+ if child in self._dynamic_config.digraph and \
+ not self._dynamic_config.digraph.parent_nodes(child):
+ self._remove_pkg(child)
+
+ # Clear caches.
+ self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
+ self._dynamic_config._highest_pkg_cache.clear()
+
+
def _check_masks(self, pkg):
slot_key = (pkg.root, pkg.slot_atom)
@@ -1647,33 +2458,23 @@ class depgraph(object):
self._dynamic_config._parent_atoms[pkg] = parent_atoms
parent_atoms.add(parent_atom)
- def _add_slot_abi_dep(self, dep):
+ def _add_slot_operator_dep(self, dep):
slot_key = (dep.root, dep.child.slot_atom)
- slot_info = self._dynamic_config._slot_abi_deps.get(slot_key)
+ slot_info = self._dynamic_config._slot_operator_deps.get(slot_key)
if slot_info is None:
slot_info = []
- self._dynamic_config._slot_abi_deps[slot_key] = slot_info
+ self._dynamic_config._slot_operator_deps[slot_key] = slot_info
slot_info.append(dep)
- def _add_slot_conflict(self, pkg):
- self._dynamic_config._slot_collision_nodes.add(pkg)
- slot_key = (pkg.slot_atom, pkg.root)
- slot_nodes = self._dynamic_config._slot_collision_info.get(slot_key)
- if slot_nodes is None:
- slot_nodes = set()
- slot_nodes.add(self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom])
- self._dynamic_config._slot_collision_info[slot_key] = slot_nodes
- slot_nodes.add(pkg)
-
def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
myroot = pkg.root
- metadata = pkg.metadata
+ metadata = pkg._metadata
removal_action = "remove" in self._dynamic_config.myparams
+ eapi_attrs = _get_eapi_attrs(pkg.eapi)
edepend={}
- depkeys = ["DEPEND","RDEPEND","PDEPEND"]
- for k in depkeys:
+ for k in Package._dep_keys:
edepend[k] = metadata[k]
if not pkg.built and \
@@ -1700,31 +2501,44 @@ class depgraph(object):
# Removal actions never traverse ignored buildtime
# dependencies, so it's safe to discard them early.
edepend["DEPEND"] = ""
+ edepend["HDEPEND"] = ""
ignore_build_time_deps = True
+ ignore_depend_deps = ignore_build_time_deps
+ ignore_hdepend_deps = ignore_build_time_deps
+
if removal_action:
depend_root = myroot
else:
- depend_root = self._frozen_config._running_root.root
- root_deps = self._frozen_config.myopts.get("--root-deps")
- if root_deps is not None:
- if root_deps is True:
- depend_root = myroot
- elif root_deps == "rdeps":
- ignore_build_time_deps = True
+ if eapi_attrs.hdepend:
+ depend_root = myroot
+ else:
+ depend_root = self._frozen_config._running_root.root
+ root_deps = self._frozen_config.myopts.get("--root-deps")
+ if root_deps is not None:
+ if root_deps is True:
+ depend_root = myroot
+ elif root_deps == "rdeps":
+ ignore_depend_deps = True
# If rebuild mode is not enabled, it's safe to discard ignored
# build-time dependencies. If you want these deps to be traversed
# in "complete" mode then you need to specify --with-bdeps=y.
- if ignore_build_time_deps and \
- not self._rebuild.rebuild:
- edepend["DEPEND"] = ""
+ if not self._rebuild.rebuild:
+ if ignore_depend_deps:
+ edepend["DEPEND"] = ""
+ if ignore_hdepend_deps:
+ edepend["HDEPEND"] = ""
deps = (
(depend_root, edepend["DEPEND"],
self._priority(buildtime=True,
- optional=(pkg.built or ignore_build_time_deps),
- ignored=ignore_build_time_deps)),
+ optional=(pkg.built or ignore_depend_deps),
+ ignored=ignore_depend_deps)),
+ (self._frozen_config._running_root.root, edepend["HDEPEND"],
+ self._priority(buildtime=True,
+ optional=(pkg.built or ignore_hdepend_deps),
+ ignored=ignore_hdepend_deps)),
(myroot, edepend["RDEPEND"],
self._priority(runtime=True)),
(myroot, edepend["PDEPEND"],
@@ -1749,7 +2563,7 @@ class depgraph(object):
uselist=self._pkg_use_enabled(pkg),
is_valid_flag=pkg.iuse.is_valid_flag,
opconvert=True, token_class=Atom,
- eapi=pkg.metadata['EAPI'])
+ eapi=pkg.eapi)
except portage.exception.InvalidDependString as e:
if not pkg.installed:
# should have been masked before it was selected
@@ -1763,7 +2577,7 @@ class depgraph(object):
dep_string = portage.dep.use_reduce(dep_string,
uselist=self._pkg_use_enabled(pkg),
opconvert=True, token_class=Atom,
- eapi=pkg.metadata['EAPI'])
+ eapi=pkg.eapi)
except portage.exception.InvalidDependString as e:
self._dynamic_config._masked_installed.add(pkg)
del e
@@ -1806,6 +2620,37 @@ class depgraph(object):
finally:
self._dynamic_config._autounmask = _autounmask_backup
+ def _ignore_dependency(self, atom, pkg, child, dep, mypriority, recurse_satisfied):
+ """
+ In some cases, dep_check will return deps that shouldn't
+ be processed any further, so they are identified and
+ discarded here. Try to discard as few as possible since
+ discarded dependencies reduce the amount of information
+ available for optimization of merge order.
+ Don't ignore dependencies if pkg has a slot operator dependency on the child
+ and the child has changed slot/sub_slot.
+ """
+ if not mypriority.satisfied:
+ return False
+ slot_operator_rebuild = False
+ if atom.slot_operator == '=' and \
+ (pkg.root, pkg.slot_atom) in self._dynamic_config._slot_operator_replace_installed and \
+ mypriority.satisfied is not child and \
+ mypriority.satisfied.installed and \
+ child and \
+ not child.installed and \
+ (child.slot != mypriority.satisfied.slot or child.sub_slot != mypriority.satisfied.sub_slot):
+ slot_operator_rebuild = True
+
+ return not atom.blocker and \
+ not recurse_satisfied and \
+ mypriority.satisfied.visible and \
+ dep.child is not None and \
+ not dep.child.installed and \
+ not any(self._dynamic_config._package_tracker.match(
+ dep.child.root, dep.child.slot_atom, installed=False)) and \
+ not slot_operator_rebuild
+
def _wrapped_add_pkg_dep_string(self, pkg, dep_root, dep_priority,
dep_string, allow_unsatisfied):
depth = pkg.depth + 1
@@ -1864,6 +2709,13 @@ class depgraph(object):
mypriority = dep_priority.copy()
if not atom.blocker:
+
+ if atom.slot_operator == "=":
+ if mypriority.buildtime:
+ mypriority.buildtime_slot_op = True
+ if mypriority.runtime:
+ mypriority.runtime_slot_op = True
+
inst_pkgs = [inst_pkg for inst_pkg in
reversed(vardb.match_pkgs(atom))
if not reinstall_atoms.findAtomForPackage(inst_pkg,
@@ -1883,19 +2735,12 @@ class depgraph(object):
priority=mypriority, root=dep_root)
# In some cases, dep_check will return deps that shouldn't
- # be proccessed any further, so they are identified and
+ # be processed any further, so they are identified and
# discarded here. Try to discard as few as possible since
# discarded dependencies reduce the amount of information
# available for optimization of merge order.
ignored = False
- if not atom.blocker and \
- not recurse_satisfied and \
- mypriority.satisfied and \
- mypriority.satisfied.visible and \
- dep.child is not None and \
- not dep.child.installed and \
- self._dynamic_config._slot_pkg_map[dep.child.root].get(
- dep.child.slot_atom) is None:
+ if self._ignore_dependency(atom, pkg, child, dep, mypriority, recurse_satisfied):
myarg = None
try:
myarg = next(self._iter_atoms_for_pkg(dep.child), None)
@@ -1998,14 +2843,7 @@ class depgraph(object):
collapsed_parent=pkg, collapsed_priority=dep_priority)
ignored = False
- if not atom.blocker and \
- not recurse_satisfied and \
- mypriority.satisfied and \
- mypriority.satisfied.visible and \
- dep.child is not None and \
- not dep.child.installed and \
- self._dynamic_config._slot_pkg_map[dep.child.root].get(
- dep.child.slot_atom) is None:
+ if self._ignore_dependency(atom, pkg, child, dep, mypriority, recurse_satisfied):
myarg = None
try:
myarg = next(self._iter_atoms_for_pkg(dep.child), None)
@@ -2053,7 +2891,7 @@ class depgraph(object):
yield (atom, None)
continue
dep_pkg, existing_node = self._select_package(
- root_config.root, atom)
+ root_config.root, atom, parent=parent)
if dep_pkg is None:
yield (atom, None)
continue
@@ -2105,12 +2943,12 @@ class depgraph(object):
# Yield ~, =*, < and <= atoms first, since those are more likely to
# cause slot conflicts, and we want those atoms to be displayed
# in the resulting slot conflict message (see bug #291142).
- # Give similar treatment to SLOT/ABI atoms.
+ # Give similar treatment to slot/sub-slot atoms.
conflict_atoms = []
normal_atoms = []
abi_atoms = []
for atom in cp_atoms:
- if atom.slot_abi_built:
+ if atom.slot_operator_built:
abi_atoms.append(atom)
continue
conflict = False
@@ -2135,7 +2973,7 @@ class depgraph(object):
def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
"""
Queue disjunctive (virtual and ||) deps in self._dynamic_config._dep_disjunctive_stack.
- Yields non-disjunctive deps. Raises InvalidDependString when
+ Yields non-disjunctive deps. Raises InvalidDependString when
necessary.
"""
for x in dep_struct:
@@ -2242,9 +3080,24 @@ class depgraph(object):
continue
yield arg, atom
- def select_files(self, myfiles):
+ def select_files(self, args):
+ # Use the global event loop for spinner progress
+ # indication during file owner lookups (bug #461412).
+ spinner_id = None
+ try:
+ spinner = self._frozen_config.spinner
+ if spinner is not None and \
+ spinner.update is not spinner.update_quiet:
+ spinner_id = self._event_loop.idle_add(
+ self._frozen_config.spinner.update)
+ return self._select_files(args)
+ finally:
+ if spinner_id is not None:
+ self._event_loop.source_remove(spinner_id)
+
+ def _select_files(self, myfiles):
"""Given a list of .tbz2s, .ebuilds sets, and deps, populate
- self._dynamic_config._initial_arg_list and call self._resolve to create the
+ self._dynamic_config._initial_arg_list and call self._resolve to create the
appropriate depgraph and return a favorite list."""
self._load_vdb()
debug = "--debug" in self._frozen_config.myopts
@@ -2277,8 +3130,18 @@ class depgraph(object):
writemsg("!!! Please ensure the tbz2 exists as specified.\n\n", noiselevel=-1)
return 0, myfavorites
mytbz2=portage.xpak.tbz2(x)
- mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
- if os.path.realpath(x) != \
+ mykey = None
+ cat = mytbz2.getfile("CATEGORY")
+ if cat is not None:
+ cat = _unicode_decode(cat.strip(),
+ encoding=_encodings['repo.content'])
+ mykey = cat + "/" + os.path.basename(x)[:-5]
+
+ if mykey is None:
+ writemsg(colorize("BAD", "\n*** Package is missing CATEGORY metadata: %s.\n\n" % x), noiselevel=-1)
+ self._dynamic_config._skip_restart = True
+ return 0, myfavorites
+ elif os.path.realpath(x) != \
os.path.realpath(bindb.bintree.getname(mykey)):
writemsg(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n\n"), noiselevel=-1)
self._dynamic_config._skip_restart = True
@@ -2293,15 +3156,16 @@ class depgraph(object):
pkgdir = os.path.dirname(ebuild_path)
tree_root = os.path.dirname(os.path.dirname(pkgdir))
cp = pkgdir[len(tree_root)+1:]
- e = portage.exception.PackageNotFound(
- ("%s is not in a valid portage tree " + \
- "hierarchy or does not exist") % x)
+ error_msg = ("\n\n!!! '%s' is not in a valid portage tree "
+ "hierarchy or does not exist\n") % x
if not portage.isvalidatom(cp):
- raise e
+ writemsg(error_msg, noiselevel=-1)
+ return 0, myfavorites
cat = portage.catsplit(cp)[0]
mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
if not portage.isvalidatom("="+mykey):
- raise e
+ writemsg(error_msg, noiselevel=-1)
+ return 0, myfavorites
ebuild_path = portdb.findname(mykey)
if ebuild_path:
if ebuild_path != os.path.join(os.path.realpath(tree_root),
@@ -2317,8 +3181,8 @@ class depgraph(object):
countdown(int(self._frozen_config.settings["EMERGE_WARNING_DELAY"]),
"Continuing...")
else:
- raise portage.exception.PackageNotFound(
- "%s is not in a valid portage tree hierarchy or does not exist" % x)
+ writemsg(error_msg, noiselevel=-1)
+ return 0, myfavorites
pkg = self._pkg(mykey, "ebuild", root_config,
onlydeps=onlydeps, myrepo=portdb.getRepositoryName(
os.path.dirname(os.path.dirname(os.path.dirname(ebuild_path)))))
@@ -2351,6 +3215,30 @@ class depgraph(object):
raise portage.exception.PackageSetNotFound(s)
if s in depgraph_sets.sets:
continue
+
+ try:
+ set_atoms = root_config.setconfig.getSetAtoms(s)
+ except portage.exception.PackageSetNotFound as e:
+ writemsg_level("\n\n", level=logging.ERROR,
+ noiselevel=-1)
+ for pset in list(depgraph_sets.sets.values()) + [sets[s]]:
+ for error_msg in pset.errors:
+ writemsg_level("%s\n" % (error_msg,),
+ level=logging.ERROR, noiselevel=-1)
+
+ writemsg_level(("emerge: the given set '%s' "
+ "contains a non-existent set named '%s'.\n") % \
+ (s, e), level=logging.ERROR, noiselevel=-1)
+ if s in ('world', 'selected') and \
+ SETPREFIX + e.value in sets['selected']:
+ writemsg_level(("Use `emerge --deselect %s%s` to "
+ "remove this set from world_sets.\n") %
+ (SETPREFIX, e,), level=logging.ERROR,
+ noiselevel=-1)
+ writemsg_level("\n", level=logging.ERROR,
+ noiselevel=-1)
+ return False, myfavorites
+
pset = sets[s]
depgraph_sets.sets[s] = pset
args.append(SetArg(arg=x, pset=pset,
@@ -2370,7 +3258,7 @@ class depgraph(object):
# came from, if any.
# 2) It takes away freedom from the resolver to choose other
# possible expansions when necessary.
- if "/" in x:
+ if "/" in x.split(":")[0]:
args.append(AtomArg(arg=x, atom=Atom(x, allow_repo=True),
root_config=root_config))
continue
@@ -2471,13 +3359,8 @@ class depgraph(object):
return 0, []
for cpv in owners:
- slot = vardb.aux_get(cpv, ["SLOT"])[0]
- if not slot:
- # portage now masks packages with missing slot, but it's
- # possible that one was installed by an older version
- atom = Atom(portage.cpv_getkey(cpv))
- else:
- atom = Atom("%s:%s" % (portage.cpv_getkey(cpv), slot))
+ pkg = vardb._pkg_str(cpv, None)
+ atom = Atom("%s:%s" % (pkg.cp, pkg.slot))
args.append(AtomArg(arg=atom, atom=atom,
root_config=root_config))
@@ -2542,7 +3425,7 @@ class depgraph(object):
# Order needs to be preserved since a feature of --nodeps
# is to allow the user to force a specific merge order.
self._dynamic_config._initial_arg_list = args[:]
-
+
return self._resolve(myfavorites)
def _gen_reinstall_sets(self):
@@ -2552,8 +3435,8 @@ class depgraph(object):
atom_list.append((root, '__auto_rebuild__', atom))
for root, atom in self._rebuild.reinstall_list:
atom_list.append((root, '__auto_reinstall__', atom))
- for root, atom in self._dynamic_config._slot_abi_replace_installed:
- atom_list.append((root, '__auto_slot_abi_replace_installed__', atom))
+ for root, atom in self._dynamic_config._slot_operator_replace_installed:
+ atom_list.append((root, '__auto_slot_operator_replace_installed__', atom))
set_dict = {}
for root, set_name, atom in atom_list:
@@ -2572,8 +3455,8 @@ class depgraph(object):
root_config=self._frozen_config.roots[root])
def _resolve(self, myfavorites):
- """Given self._dynamic_config._initial_arg_list, pull in the root nodes,
- call self._creategraph to process theier deps and return
+ """Given self._dynamic_config._initial_arg_list, pull in the root nodes,
+ call self._creategraph to process theier deps and return
a favorite list."""
debug = "--debug" in self._frozen_config.myopts
onlydeps = "--onlydeps" in self._frozen_config.myopts
@@ -2624,6 +3507,16 @@ class depgraph(object):
if pprovided_match:
continue
+ excluded = False
+ for any_match in self._iter_match_pkgs_any(
+ self._frozen_config.roots[myroot], atom):
+ if self._frozen_config.excluded_pkgs.findAtomForPackage(
+ any_match, modified_use=self._pkg_use_enabled(any_match)):
+ excluded = True
+ break
+ if excluded:
+ continue
+
if not (isinstance(arg, SetArg) and \
arg.name in ("selected", "system", "world")):
self._dynamic_config._unsatisfied_deps_for_display.append(
@@ -2692,7 +3585,8 @@ class depgraph(object):
except self._unknown_internal_error:
return False, myfavorites
- if (self._dynamic_config._slot_collision_info and
+ have_slot_conflict = any(self._dynamic_config._package_tracker.slot_conflicts())
+ if (have_slot_conflict and
not self._accept_blocker_conflicts()) or \
(self._dynamic_config._allow_backtracking and
"slot conflict" in self._dynamic_config._backtrack_infos):
@@ -2707,11 +3601,47 @@ class depgraph(object):
return False, myfavorites
if "config" in self._dynamic_config._backtrack_infos and \
- ("slot_abi_mask_built" in self._dynamic_config._backtrack_infos["config"] or
- "slot_abi_replace_installed" in self._dynamic_config._backtrack_infos["config"]) and \
+ ("slot_operator_mask_built" in self._dynamic_config._backtrack_infos["config"] or
+ "slot_operator_replace_installed" in self._dynamic_config._backtrack_infos["config"]) and \
self.need_restart():
return False, myfavorites
+ if not self._dynamic_config._prune_rebuilds and \
+ self._dynamic_config._slot_operator_replace_installed and \
+ self._get_missed_updates():
+ # When there are missed updates, we might have triggered
+ # some unnecessary rebuilds (see bug #439688). So, prune
+ # all the rebuilds and backtrack with the problematic
+ # updates masked. The next backtrack run should pull in
+ # any rebuilds that are really needed, and this
+ # prune_rebuilds path should never be entered more than
+ # once in a series of backtracking nodes (in order to
+ # avoid a backtracking loop).
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ config = backtrack_infos.setdefault("config", {})
+ config["prune_rebuilds"] = True
+ self._dynamic_config._need_restart = True
+ return False, myfavorites
+
+ if self.need_restart():
+ # want_restart_for_use_change triggers this
+ return False, myfavorites
+
+ if "--fetchonly" not in self._frozen_config.myopts and \
+ "--buildpkgonly" in self._frozen_config.myopts:
+ graph_copy = self._dynamic_config.digraph.copy()
+ removed_nodes = set()
+ for node in graph_copy:
+ if not isinstance(node, Package) or \
+ node.operation == "nomerge":
+ removed_nodes.add(node)
+ graph_copy.difference_update(removed_nodes)
+ if not graph_copy.hasallzeros(ignore_priority = \
+ DepPrioritySatisfiedRange.ignore_medium):
+ self._dynamic_config._buildpkgonly_deps_unsatisfied = True
+ self._dynamic_config._skip_restart = True
+ return False, myfavorites
+
# Any failures except those due to autounmask *alone* should return
# before this point, since the success_without_autounmask flag that's
# set below is reserved for cases where there are *zero* other
@@ -2773,8 +3703,8 @@ class depgraph(object):
if refs is None:
refs = []
atom_arg_map[atom_key] = refs
- if arg not in refs:
- refs.append(arg)
+ if arg not in refs:
+ refs.append(arg)
for root in self._dynamic_config.sets:
depgraph_sets = self._dynamic_config.sets[root]
@@ -2804,14 +3734,15 @@ class depgraph(object):
slots = set()
for cpv in vardb.match(atom):
# don't mix new virtuals with old virtuals
- if portage.cpv_getkey(cpv) == highest_pkg.cp:
- slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
+ pkg = vardb._pkg_str(cpv, None)
+ if pkg.cp == highest_pkg.cp:
+ slots.add(pkg.slot)
- slots.add(highest_pkg.metadata["SLOT"])
+ slots.add(highest_pkg.slot)
if len(slots) == 1:
return []
greedy_pkgs = []
- slots.remove(highest_pkg.metadata["SLOT"])
+ slots.remove(highest_pkg.slot)
while slots:
slot = slots.pop()
slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
@@ -2825,9 +3756,9 @@ class depgraph(object):
return [pkg.slot_atom for pkg in greedy_pkgs]
blockers = {}
- blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
+ blocker_dep_keys = Package._dep_keys
for pkg in greedy_pkgs + [highest_pkg]:
- dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
+ dep_str = " ".join(pkg._metadata[k] for k in blocker_dep_keys)
try:
selected_atoms = self._select_atoms(
pkg.root, dep_str, self._pkg_use_enabled(pkg),
@@ -2879,7 +3810,8 @@ class depgraph(object):
not been scheduled for replacement.
"""
kwargs["trees"] = self._dynamic_config._graph_trees
- return self._select_atoms_highest_available(*pargs, **kwargs)
+ return self._select_atoms_highest_available(*pargs,
+ **portage._native_kwargs(kwargs))
def _select_atoms_highest_available(self, root, depstring,
myuse=None, parent=None, strict=True, trees=None, priority=None):
@@ -2890,7 +3822,7 @@ class depgraph(object):
eapi = None
is_valid_flag = None
if parent is not None:
- eapi = parent.metadata['EAPI']
+ eapi = parent.eapi
if not parent.installed:
is_valid_flag = parent.iuse.is_valid_flag
depstring = portage.dep.use_reduce(depstring,
@@ -2898,9 +3830,9 @@ class depgraph(object):
is_valid_flag=is_valid_flag, eapi=eapi)
if (self._dynamic_config.myparams.get(
- "ignore_built_slot_abi_deps", "n") == "y" and
+ "ignore_built_slot_operator_deps", "n") == "y" and
parent and parent.built):
- ignore_built_slot_abi_deps(depstring)
+ ignore_built_slot_operator_deps(depstring)
pkgsettings = self._frozen_config.pkgsettings[root]
if trees is None:
@@ -3005,35 +3937,37 @@ class depgraph(object):
def _expand_virt_from_graph(self, root, atom):
if not isinstance(atom, Atom):
atom = Atom(atom)
- graphdb = self._dynamic_config.mydbapi[root]
- match = graphdb.match_pkgs(atom)
- if not match:
- yield atom
- return
- pkg = match[-1]
- if not pkg.cpv.startswith("virtual/"):
- yield atom
- return
- try:
- rdepend = self._select_atoms_from_graph(
- pkg.root, pkg.metadata.get("RDEPEND", ""),
- myuse=self._pkg_use_enabled(pkg),
- parent=pkg, strict=False)
- except InvalidDependString as e:
- writemsg_level("!!! Invalid RDEPEND in " + \
- "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
- (pkg.root, pkg.cpv, e),
- noiselevel=-1, level=logging.ERROR)
+
+ if not atom.cp.startswith("virtual/"):
yield atom
return
- for atoms in rdepend.values():
- for atom in atoms:
- if hasattr(atom, "_orig_atom"):
- # Ignore virtual atoms since we're only
- # interested in expanding the real atoms.
- continue
- yield atom
+ any_match = False
+ for pkg in self._dynamic_config._package_tracker.match(root, atom):
+ try:
+ rdepend = self._select_atoms_from_graph(
+ pkg.root, pkg._metadata.get("RDEPEND", ""),
+ myuse=self._pkg_use_enabled(pkg),
+ parent=pkg, strict=False)
+ except InvalidDependString as e:
+ writemsg_level("!!! Invalid RDEPEND in " + \
+ "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
+ (pkg.root, pkg.cpv, e),
+ noiselevel=-1, level=logging.ERROR)
+ continue
+
+ for atoms in rdepend.values():
+ for atom in atoms:
+ if hasattr(atom, "_orig_atom"):
+ # Ignore virtual atoms since we're only
+ # interested in expanding the real atoms.
+ continue
+ yield atom
+
+ any_match = True
+
+ if not any_match:
+ yield atom
def _virt_deps_visible(self, pkg, ignore_use=False):
"""
@@ -3044,7 +3978,7 @@ class depgraph(object):
"""
try:
rdepend = self._select_atoms(
- pkg.root, pkg.metadata.get("RDEPEND", ""),
+ pkg.root, pkg._metadata.get("RDEPEND", ""),
myuse=self._pkg_use_enabled(pkg),
parent=pkg, priority=self._priority(runtime=True))
except InvalidDependString as e:
@@ -3083,19 +4017,29 @@ class depgraph(object):
child = None
all_parents = self._dynamic_config._parent_atoms
graph = self._dynamic_config.digraph
+ verbose_main_repo_display = "--verbose-main-repo-display" in \
+ self._frozen_config.myopts
+
+ def format_pkg(pkg):
+ pkg_name = "%s" % (pkg.cpv,)
+ if verbose_main_repo_display or pkg.repo != \
+ pkg.root_config.settings.repositories.mainRepo().name:
+ pkg_name += _repo_separator + pkg.repo
+ return pkg_name
if target_atom is not None and isinstance(node, Package):
affecting_use = set()
- for dep_str in "DEPEND", "RDEPEND", "PDEPEND":
+ for dep_str in Package._dep_keys:
try:
affecting_use.update(extract_affecting_use(
- node.metadata[dep_str], target_atom,
- eapi=node.metadata["EAPI"]))
+ node._metadata[dep_str], target_atom,
+ eapi=node.eapi))
except InvalidDependString:
if not node.installed:
raise
affecting_use.difference_update(node.use.mask, node.use.force)
- pkg_name = _unicode_decode("%s") % (node.cpv,)
+ pkg_name = format_pkg(node)
+
if affecting_use:
usedep = []
for flag in affecting_use:
@@ -3150,7 +4094,7 @@ class depgraph(object):
node_type = "set"
else:
node_type = "argument"
- dep_chain.append((_unicode_decode("%s") % (node,), node_type))
+ dep_chain.append(("%s" % (node,), node_type))
elif node is not start_node:
for ppkg, patom in all_parents[child]:
@@ -3167,23 +4111,23 @@ class depgraph(object):
if priorities is None:
# This edge comes from _parent_atoms and was not added to
# the graph, and _parent_atoms does not contain priorities.
- dep_strings.add(node.metadata["DEPEND"])
- dep_strings.add(node.metadata["RDEPEND"])
- dep_strings.add(node.metadata["PDEPEND"])
+ for k in Package._dep_keys:
+ dep_strings.add(node._metadata[k])
else:
for priority in priorities:
if priority.buildtime:
- dep_strings.add(node.metadata["DEPEND"])
+ for k in Package._buildtime_keys:
+ dep_strings.add(node._metadata[k])
if priority.runtime:
- dep_strings.add(node.metadata["RDEPEND"])
+ dep_strings.add(node._metadata["RDEPEND"])
if priority.runtime_post:
- dep_strings.add(node.metadata["PDEPEND"])
+ dep_strings.add(node._metadata["PDEPEND"])
affecting_use = set()
for dep_str in dep_strings:
try:
affecting_use.update(extract_affecting_use(
- dep_str, atom, eapi=node.metadata["EAPI"]))
+ dep_str, atom, eapi=node.eapi))
except InvalidDependString:
if not node.installed:
raise
@@ -3192,7 +4136,7 @@ class depgraph(object):
affecting_use.difference_update(node.use.mask, \
node.use.force)
- pkg_name = _unicode_decode("%s") % (node.cpv,)
+ pkg_name = format_pkg(node)
if affecting_use:
usedep = []
for flag in affecting_use:
@@ -3244,8 +4188,7 @@ class depgraph(object):
if self._dynamic_config.digraph.parent_nodes(parent_arg):
selected_parent = parent_arg
else:
- dep_chain.append(
- (_unicode_decode("%s") % (parent_arg,), "argument"))
+ dep_chain.append(("%s" % (parent_arg,), "argument"))
selected_parent = None
node = selected_parent
@@ -3260,7 +4203,7 @@ class depgraph(object):
else:
display_list.append("required by %s" % node)
- msg = "#" + ", ".join(display_list) + "\n"
+ msg = "# " + "\n# ".join(display_list) + "\n"
return msg
@@ -3281,7 +4224,7 @@ class depgraph(object):
if arg:
xinfo='"%s"' % arg
if isinstance(myparent, AtomArg):
- xinfo = _unicode_decode('"%s"') % (myparent,)
+ xinfo = '"%s"' % (myparent,)
# Discard null/ from failed cpv_expand category expansion.
xinfo = xinfo.replace("null/", "")
if root != self._frozen_config._running_root.root:
@@ -3326,9 +4269,9 @@ class depgraph(object):
repo = metadata.get('repository')
pkg = self._pkg(cpv, pkg_type, root_config,
installed=installed, myrepo=repo)
- # pkg.metadata contains calculated USE for ebuilds,
+ # pkg._metadata contains calculated USE for ebuilds,
# required later for getMissingLicenses.
- metadata = pkg.metadata
+ metadata = pkg._metadata
if pkg.invalid:
# Avoid doing any operations with packages that
# have invalid metadata. It would be unsafe at
@@ -3367,12 +4310,13 @@ class depgraph(object):
raise
if not mreasons and \
not pkg.built and \
- pkg.metadata.get("REQUIRED_USE") and \
- eapi_has_required_use(pkg.metadata["EAPI"]):
+ pkg._metadata.get("REQUIRED_USE") and \
+ eapi_has_required_use(pkg.eapi):
if not check_required_use(
- pkg.metadata["REQUIRED_USE"],
+ pkg._metadata["REQUIRED_USE"],
self._pkg_use_enabled(pkg),
- pkg.iuse.is_valid_flag):
+ pkg.iuse.is_valid_flag,
+ eapi=pkg.eapi):
required_use_unsatisfied.append(pkg)
continue
root_slot = (pkg.root, pkg.slot_atom)
@@ -3422,7 +4366,7 @@ class depgraph(object):
continue
missing_use_adjustable.add(pkg)
- required_use = pkg.metadata.get("REQUIRED_USE")
+ required_use = pkg._metadata.get("REQUIRED_USE")
required_use_warning = ""
if required_use:
old_use = self._pkg_use_enabled(pkg)
@@ -3431,8 +4375,10 @@ class depgraph(object):
new_use.add(flag)
for flag in need_disable:
new_use.discard(flag)
- if check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
- not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
+ if check_required_use(required_use, old_use,
+ pkg.iuse.is_valid_flag, eapi=pkg.eapi) \
+ and not check_required_use(required_use, new_use,
+ pkg.iuse.is_valid_flag, eapi=pkg.eapi):
required_use_warning = ", this change violates use flag constraints " + \
"defined by %s: '%s'" % (pkg.cpv, human_readable_required_use(required_use))
@@ -3470,7 +4416,7 @@ class depgraph(object):
if any(x in untouchable_flags for x in involved_flags):
continue
- required_use = myparent.metadata.get("REQUIRED_USE")
+ required_use = myparent._metadata.get("REQUIRED_USE")
required_use_warning = ""
if required_use:
old_use = self._pkg_use_enabled(myparent)
@@ -3480,8 +4426,12 @@ class depgraph(object):
new_use.discard(flag)
else:
new_use.add(flag)
- if check_required_use(required_use, old_use, myparent.iuse.is_valid_flag) and \
- not check_required_use(required_use, new_use, myparent.iuse.is_valid_flag):
+ if check_required_use(required_use, old_use,
+ myparent.iuse.is_valid_flag,
+ eapi=myparent.eapi) and \
+ not check_required_use(required_use, new_use,
+ myparent.iuse.is_valid_flag,
+ eapi=myparent.eapi):
required_use_warning = ", this change violates use flag constraints " + \
"defined by %s: '%s'" % (myparent.cpv, \
human_readable_required_use(required_use))
@@ -3568,14 +4518,15 @@ class depgraph(object):
writemsg("\n The following REQUIRED_USE flag constraints " + \
"are unsatisfied:\n", noiselevel=-1)
reduced_noise = check_required_use(
- pkg.metadata["REQUIRED_USE"],
+ pkg._metadata["REQUIRED_USE"],
self._pkg_use_enabled(pkg),
- pkg.iuse.is_valid_flag).tounicode()
+ pkg.iuse.is_valid_flag,
+ eapi=pkg.eapi).tounicode()
writemsg(" %s\n" % \
human_readable_required_use(reduced_noise),
noiselevel=-1)
normalized_required_use = \
- " ".join(pkg.metadata["REQUIRED_USE"].split())
+ " ".join(pkg._metadata["REQUIRED_USE"].split())
if reduced_noise != normalized_required_use:
writemsg("\n The above constraints " + \
"are a subset of the following complete expression:\n",
@@ -3620,57 +4571,17 @@ class depgraph(object):
not cp_exists and \
self._frozen_config.myopts.get(
"--misspell-suggestions", "y") != "n":
- cp = myparent.atom.cp.lower()
- cat, pkg = portage.catsplit(cp)
- if cat == "null":
- cat = None
writemsg("\nemerge: searching for similar names..."
, noiselevel=-1)
- all_cp = set()
- all_cp.update(vardb.cp_all())
+ dbs = [vardb]
if "--usepkgonly" not in self._frozen_config.myopts:
- all_cp.update(portdb.cp_all())
+ dbs.append(portdb)
if "--usepkg" in self._frozen_config.myopts:
- all_cp.update(bindb.cp_all())
- # discard dir containing no ebuilds
- all_cp.discard(cp)
-
- orig_cp_map = {}
- for cp_orig in all_cp:
- orig_cp_map.setdefault(cp_orig.lower(), []).append(cp_orig)
- all_cp = set(orig_cp_map)
-
- if cat:
- matches = difflib.get_close_matches(cp, all_cp)
- else:
- pkg_to_cp = {}
- for other_cp in list(all_cp):
- other_pkg = portage.catsplit(other_cp)[1]
- if other_pkg == pkg:
- # Check for non-identical package that
- # differs only by upper/lower case.
- identical = True
- for cp_orig in orig_cp_map[other_cp]:
- if portage.catsplit(cp_orig)[1] != \
- portage.catsplit(atom.cp)[1]:
- identical = False
- break
- if identical:
- # discard dir containing no ebuilds
- all_cp.discard(other_cp)
- continue
- pkg_to_cp.setdefault(other_pkg, set()).add(other_cp)
- pkg_matches = difflib.get_close_matches(pkg, pkg_to_cp)
- matches = []
- for pkg_match in pkg_matches:
- matches.extend(pkg_to_cp[pkg_match])
+ dbs.append(bindb)
- matches_orig_case = []
- for cp in matches:
- matches_orig_case.extend(orig_cp_map[cp])
- matches = matches_orig_case
+ matches = similar_name_search(dbs, atom)
if len(matches) == 1:
writemsg("\nemerge: Maybe you meant " + matches[0] + "?\n"
@@ -3691,8 +4602,7 @@ class depgraph(object):
dep_chain = self._get_dep_chain(myparent, atom)
for node, node_type in dep_chain:
msg.append('(dependency required by "%s" [%s])' % \
- (colorize('INFORM', _unicode_decode("%s") % \
- (node)), node_type))
+ (colorize('INFORM', "%s" % (node)), node_type))
if msg:
writemsg("\n".join(msg), noiselevel=-1)
@@ -3770,7 +4680,8 @@ class depgraph(object):
# the newly built package still won't have the expected slot.
# Therefore, assume that such SLOT dependencies are already
# satisfied rather than forcing a rebuild.
- if not matched_something and installed and atom.slot is not None:
+ if not matched_something and installed and \
+ atom.slot is not None and not atom.slot_operator_built:
if "remove" in self._dynamic_config.myparams:
# We need to search the portdbapi, which is not in our
@@ -3794,11 +4705,11 @@ class depgraph(object):
for other_db, other_type, other_built, \
other_installed, other_keys in dbs:
try:
- if atom.slot == \
- other_db.aux_get(cpv, ["SLOT"])[0]:
+ if portage.dep._match_slot(atom,
+ other_db._pkg_str(_unicode(cpv), None)):
slot_available = True
break
- except KeyError:
+ except (KeyError, InvalidData):
pass
if not slot_available:
continue
@@ -3810,12 +4721,12 @@ class depgraph(object):
yield inst_pkg
return
- def _select_pkg_highest_available(self, root, atom, onlydeps=False):
+ def _select_pkg_highest_available(self, root, atom, onlydeps=False, parent=None):
cache_key = (root, atom, atom.unevaluated_atom, onlydeps, self._dynamic_config._autounmask)
ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
if ret is not None:
return ret
- ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
+ ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps, parent=parent)
self._dynamic_config._highest_pkg_cache[cache_key] = ret
pkg, existing = ret
if pkg is not None:
@@ -3847,6 +4758,36 @@ class depgraph(object):
return not arg
+ def _want_update_pkg(self, parent, pkg):
+
+ if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg)):
+ return False
+
+ arg_atoms = None
+ try:
+ arg_atoms = list(self._iter_atoms_for_pkg(pkg))
+ except InvalidDependString:
+ if not pkg.installed:
+ # should have been masked before it was selected
+ raise
+
+ depth = parent.depth or 0
+ depth += 1
+
+ if arg_atoms:
+ for arg, atom in arg_atoms:
+ if arg.reset_depth:
+ depth = 0
+ break
+
+ deep = self._dynamic_config.myparams.get("deep", 0)
+ update = "--update" in self._frozen_config.myopts
+
+ return (not self._dynamic_config._complete_mode and
+ (arg_atoms or update) and
+ not (deep is not True and depth > deep))
+
def _equiv_ebuild_visible(self, pkg, autounmask_level=None):
try:
pkg_eb = self._pkg(
@@ -3867,7 +4808,7 @@ class depgraph(object):
return True
def _equiv_binary_installed(self, pkg):
- build_time = pkg.metadata.get('BUILD_TIME')
+ build_time = pkg.build_time
if not build_time:
return False
@@ -3877,7 +4818,7 @@ class depgraph(object):
except PackageNotFound:
return False
- return build_time == inst_pkg.metadata.get('BUILD_TIME')
+ return build_time == inst_pkg.build_time
class _AutounmaskLevel(object):
__slots__ = ("allow_use_changes", "allow_unstable_keywords", "allow_license_changes", \
@@ -3898,8 +4839,9 @@ class depgraph(object):
1. USE + license
2. USE + ~arch + license
3. USE + ~arch + license + missing keywords
- 4. USE + ~arch + license + masks
- 5. USE + ~arch + license + missing keywords + masks
+ 4. USE + license + masks
+ 5. USE + ~arch + license + masks
+ 6. USE + ~arch + license + missing keywords + masks
Some thoughts:
* Do least invasive changes first.
@@ -3919,15 +4861,25 @@ class depgraph(object):
autounmask_level.allow_license_changes = True
yield autounmask_level
- for only_use_changes in (False,):
+ autounmask_level.allow_unstable_keywords = True
+ yield autounmask_level
- autounmask_level.allow_unstable_keywords = (not only_use_changes)
- autounmask_level.allow_license_changes = (not only_use_changes)
+ if not autounmask_keep_masks:
- for missing_keyword, unmask in ((False,False), (True, False), (False, True), (True, True)):
+ autounmask_level.allow_missing_keywords = True
+ yield autounmask_level
- if (only_use_changes or autounmask_keep_masks) and (missing_keyword or unmask):
- break
+ # 4. USE + license + masks
+ # Try to respect keywords while discarding
+ # package.mask (see bug #463394).
+ autounmask_level.allow_unstable_keywords = False
+ autounmask_level.allow_missing_keywords = False
+ autounmask_level.allow_unmasks = True
+ yield autounmask_level
+
+ autounmask_level.allow_unstable_keywords = True
+
+ for missing_keyword, unmask in ((False, True), (True, True)):
autounmask_level.allow_missing_keywords = missing_keyword
autounmask_level.allow_unmasks = unmask
@@ -3935,33 +4887,42 @@ class depgraph(object):
yield autounmask_level
- def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
- pkg, existing = self._wrapped_select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
+ def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False, parent=None):
+ pkg, existing = self._wrapped_select_pkg_highest_available_imp(
+ root, atom, onlydeps=onlydeps, parent=parent)
default_selection = (pkg, existing)
- def reset_pkg(pkg):
+ if self._dynamic_config._autounmask is True:
if pkg is not None and \
pkg.installed and \
not self._want_installed_pkg(pkg):
pkg = None
- if self._dynamic_config._autounmask is True:
- reset_pkg(pkg)
+ # Temporarily reset _need_restart state, in order to
+ # avoid interference as reported in bug #459832.
+ earlier_need_restart = self._dynamic_config._need_restart
+ self._dynamic_config._need_restart = False
+ try:
+ for autounmask_level in self._autounmask_levels():
+ if pkg is not None:
+ break
- for autounmask_level in self._autounmask_levels():
- if pkg is not None:
- break
+ pkg, existing = \
+ self._wrapped_select_pkg_highest_available_imp(
+ root, atom, onlydeps=onlydeps,
+ autounmask_level=autounmask_level, parent=parent)
- pkg, existing = \
- self._wrapped_select_pkg_highest_available_imp(
- root, atom, onlydeps=onlydeps,
- autounmask_level=autounmask_level)
+ if pkg is not None and \
+ pkg.installed and \
+ not self._want_installed_pkg(pkg):
+ pkg = None
- reset_pkg(pkg)
-
- if self._dynamic_config._need_restart:
- return None, None
+ if self._dynamic_config._need_restart:
+ return None, None
+ finally:
+ if earlier_need_restart:
+ self._dynamic_config._need_restart = True
if pkg is None:
# This ensures that we can fall back to an installed package
@@ -4091,25 +5052,29 @@ class depgraph(object):
new_changes = {}
for flag, state in target_use.items():
+ real_flag = pkg.iuse.get_real_flag(flag)
+ if real_flag is None:
+ # Triggered by use-dep defaults.
+ continue
if state:
- if flag not in old_use:
- if new_changes.get(flag) == False:
+ if real_flag not in old_use:
+ if new_changes.get(real_flag) == False:
return old_use
- new_changes[flag] = True
+ new_changes[real_flag] = True
new_use.add(flag)
else:
- if flag in old_use:
- if new_changes.get(flag) == True:
+ if real_flag in old_use:
+ if new_changes.get(real_flag) == True:
return old_use
- new_changes[flag] = False
+ new_changes[real_flag] = False
new_use.update(old_use.difference(target_use))
def want_restart_for_use_change(pkg, new_use):
if pkg not in self._dynamic_config.digraph.nodes:
return False
- for key in "DEPEND", "RDEPEND", "PDEPEND", "LICENSE":
- dep = pkg.metadata[key]
+ for key in Package._dep_keys + ("LICENSE",):
+ dep = pkg._metadata[key]
old_val = set(portage.dep.use_reduce(dep, pkg.use.enabled, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
new_val = set(portage.dep.use_reduce(dep, new_use, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
@@ -4132,9 +5097,11 @@ class depgraph(object):
if new_changes != old_changes:
#Don't do the change if it violates REQUIRED_USE.
- required_use = pkg.metadata.get("REQUIRED_USE")
- if required_use and check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
- not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
+ required_use = pkg._metadata.get("REQUIRED_USE")
+ if required_use and check_required_use(required_use, old_use,
+ pkg.iuse.is_valid_flag, eapi=pkg.eapi) and \
+ not check_required_use(required_use, new_use,
+ pkg.iuse.is_valid_flag, eapi=pkg.eapi):
return old_use
if any(x in pkg.use.mask for x in new_changes) or \
@@ -4150,14 +5117,13 @@ class depgraph(object):
self._dynamic_config._need_restart = True
return new_use
- def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, autounmask_level=None):
+ def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, autounmask_level=None, parent=None):
root_config = self._frozen_config.roots[root]
pkgsettings = self._frozen_config.pkgsettings[root]
dbs = self._dynamic_config._filtered_trees[root]["dbs"]
vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
# List of acceptable packages, ordered by type preference.
matched_packages = []
- matched_pkgs_ignore_use = []
highest_version = None
if not isinstance(atom, portage.dep.Atom):
atom = portage.dep.Atom(atom)
@@ -4209,7 +5175,7 @@ class depgraph(object):
# Ignore USE deps for the initial match since we want to
# ensure that updates aren't missed solely due to the user's
# USE configuration.
- for pkg in self._iter_match_pkgs(root_config, pkg_type, atom.without_use,
+ for pkg in self._iter_match_pkgs(root_config, pkg_type, atom.without_use,
onlydeps=onlydeps):
if pkg.cp != atom_cp and have_new_virt:
# pull in a new-style virtual instead
@@ -4295,8 +5261,8 @@ class depgraph(object):
for selected_pkg in matched_packages:
if selected_pkg.type_name == "binary" and \
selected_pkg.cpv == pkg.cpv and \
- selected_pkg.metadata.get('BUILD_TIME') == \
- pkg.metadata.get('BUILD_TIME'):
+ selected_pkg.build_time == \
+ pkg.build_time:
identical_binary = True
break
@@ -4339,7 +5305,6 @@ class depgraph(object):
if atom.use:
- matched_pkgs_ignore_use.append(pkg)
if autounmask_level and autounmask_level.allow_use_changes and not pkg.built:
target_use = {}
for flag in atom.use.enabled:
@@ -4352,8 +5317,11 @@ class depgraph(object):
use_match = True
can_adjust_use = not pkg.built
- missing_enabled = atom.use.missing_enabled.difference(pkg.iuse.all)
- missing_disabled = atom.use.missing_disabled.difference(pkg.iuse.all)
+ is_valid_flag = pkg.iuse.is_valid_flag
+ missing_enabled = frozenset(x for x in
+ atom.use.missing_enabled if not is_valid_flag(x))
+ missing_disabled = frozenset(x for x in
+ atom.use.missing_disabled if not is_valid_flag(x))
if atom.use.enabled:
if any(x in atom.use.enabled for x in missing_disabled):
@@ -4406,7 +5374,9 @@ class depgraph(object):
# will always end with a break statement below
# this point.
if find_existing_node:
- e_pkg = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
+ e_pkg = next(self._dynamic_config._package_tracker.match(
+ root, pkg.slot_atom, installed=False), None)
+
if not e_pkg:
break
@@ -4427,50 +5397,56 @@ class depgraph(object):
break
# Compare built package to current config and
# reject the built package if necessary.
- if built and not useoldpkg and (not installed or matched_pkgs_ignore_use) and \
- ("--newuse" in self._frozen_config.myopts or \
- "--reinstall" in self._frozen_config.myopts or \
- (not installed and self._dynamic_config.myparams.get(
- "binpkg_respect_use") in ("y", "auto"))):
- iuses = pkg.iuse.all
- old_use = self._pkg_use_enabled(pkg)
- if myeb:
- pkgsettings.setcpv(myeb)
- else:
- pkgsettings.setcpv(pkg)
- now_use = pkgsettings["PORTAGE_USE"].split()
- forced_flags = set()
- forced_flags.update(pkgsettings.useforce)
- forced_flags.update(pkgsettings.usemask)
- cur_iuse = iuses
- if myeb and not usepkgonly and not useoldpkg:
- cur_iuse = myeb.iuse.all
- reinstall_for_flags = self._reinstall_for_flags(pkg,
- forced_flags, old_use, iuses, now_use, cur_iuse)
- if reinstall_for_flags:
- if not pkg.installed:
- self._dynamic_config.ignored_binaries.setdefault(pkg, set()).update(reinstall_for_flags)
+ reinstall_use = ("--newuse" in self._frozen_config.myopts or \
+ "--reinstall" in self._frozen_config.myopts)
+ respect_use = self._dynamic_config.myparams.get("binpkg_respect_use") in ("y", "auto")
+ if built and not useoldpkg and \
+ (not installed or matched_packages) and \
+ not (installed and
+ self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg))):
+ if myeb and "--newrepo" in self._frozen_config.myopts and myeb.repo != pkg.repo:
break
+ elif reinstall_use or (not installed and respect_use):
+ iuses = pkg.iuse.all
+ old_use = self._pkg_use_enabled(pkg)
+ if myeb:
+ pkgsettings.setcpv(myeb)
+ else:
+ pkgsettings.setcpv(pkg)
+ now_use = pkgsettings["PORTAGE_USE"].split()
+ forced_flags = set()
+ forced_flags.update(pkgsettings.useforce)
+ forced_flags.update(pkgsettings.usemask)
+ cur_iuse = iuses
+ if myeb and not usepkgonly and not useoldpkg:
+ cur_iuse = myeb.iuse.all
+ reinstall_for_flags = self._reinstall_for_flags(pkg,
+ forced_flags, old_use, iuses, now_use, cur_iuse)
+ if reinstall_for_flags:
+ if not pkg.installed:
+ self._dynamic_config.ignored_binaries.setdefault(pkg, set()).update(reinstall_for_flags)
+ break
# Compare current config to installed package
# and do not reinstall if possible.
- if not installed and not useoldpkg and \
- ("--newuse" in self._frozen_config.myopts or \
- "--reinstall" in self._frozen_config.myopts) and \
- cpv in vardb.match(atom):
- forced_flags = set()
- forced_flags.update(pkg.use.force)
- forced_flags.update(pkg.use.mask)
+ if not installed and not useoldpkg and cpv in vardb.match(atom):
inst_pkg = vardb.match_pkgs('=' + pkg.cpv)[0]
- old_use = inst_pkg.use.enabled
- old_iuse = inst_pkg.iuse.all
- cur_use = self._pkg_use_enabled(pkg)
- cur_iuse = pkg.iuse.all
- reinstall_for_flags = \
- self._reinstall_for_flags(pkg,
- forced_flags, old_use, old_iuse,
- cur_use, cur_iuse)
- if reinstall_for_flags:
+ if "--newrepo" in self._frozen_config.myopts and pkg.repo != inst_pkg.repo:
reinstall = True
+ elif reinstall_use:
+ forced_flags = set()
+ forced_flags.update(pkg.use.force)
+ forced_flags.update(pkg.use.mask)
+ old_use = inst_pkg.use.enabled
+ old_iuse = inst_pkg.iuse.all
+ cur_use = self._pkg_use_enabled(pkg)
+ cur_iuse = pkg.iuse.all
+ reinstall_for_flags = \
+ self._reinstall_for_flags(pkg,
+ forced_flags, old_use, old_iuse,
+ cur_use, cur_iuse)
+ if reinstall_for_flags:
+ reinstall = True
if reinstall_atoms.findAtomForPackage(pkg, \
modified_use=self._pkg_use_enabled(pkg)):
reinstall = True
@@ -4512,6 +5488,26 @@ class depgraph(object):
return existing_node, existing_node
if len(matched_packages) > 1:
+ if parent is not None and \
+ (parent.root, parent.slot_atom) in self._dynamic_config._slot_operator_replace_installed:
+ # We're forcing a rebuild of the parent because we missed some
+ # update because of a slot operator dep.
+ if atom.slot_operator == "=" and atom.sub_slot is None:
+ # This one is a slot operator dep. Exclude the installed packages if a newer non-installed
+ # pkg exists.
+ highest_installed = None
+ for pkg in matched_packages:
+ if pkg.installed:
+ if highest_installed is None or pkg.version > highest_installed.version:
+ highest_installed = pkg
+
+ if highest_installed:
+ non_installed = [pkg for pkg in matched_packages \
+ if not pkg.installed and pkg.version > highest_installed.version]
+
+ if non_installed:
+ matched_packages = non_installed
+
if rebuilt_binaries:
inst_pkg = None
built_pkg = None
@@ -4529,15 +5525,8 @@ class depgraph(object):
# non-empty, in order to avoid cases like to
# bug #306659 where BUILD_TIME fields are missing
# in local and/or remote Packages file.
- try:
- built_timestamp = int(built_pkg.metadata['BUILD_TIME'])
- except (KeyError, ValueError):
- built_timestamp = 0
-
- try:
- installed_timestamp = int(inst_pkg.metadata['BUILD_TIME'])
- except (KeyError, ValueError):
- installed_timestamp = 0
+ built_timestamp = built_pkg.build_time
+ installed_timestamp = inst_pkg.build_time
if unbuilt_pkg is not None and unbuilt_pkg > built_pkg:
pass
@@ -4584,7 +5573,7 @@ class depgraph(object):
# ordered by type preference ("ebuild" type is the last resort)
return matched_packages[-1], existing_node
- def _select_pkg_from_graph(self, root, atom, onlydeps=False):
+ def _select_pkg_from_graph(self, root, atom, onlydeps=False, parent=None):
"""
Select packages that have already been added to the graph or
those that are installed and have not been scheduled for
@@ -4594,11 +5583,18 @@ class depgraph(object):
matches = graph_db.match_pkgs(atom)
if not matches:
return None, None
- pkg = matches[-1] # highest match
- in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
- return pkg, in_graph
- def _select_pkg_from_installed(self, root, atom, onlydeps=False):
+ # There may be multiple matches, and they may
+ # conflict with eachother, so choose the highest
+ # version that has already been added to the graph.
+ for pkg in reversed(matches):
+ if pkg in self._dynamic_config.digraph:
+ return pkg, pkg
+
+ # Fall back to installed packages
+ return self._select_pkg_from_installed(root, atom, onlydeps=onlydeps, parent=parent)
+
+ def _select_pkg_from_installed(self, root, atom, onlydeps=False, parent=None):
"""
Select packages that are installed.
"""
@@ -4621,8 +5617,18 @@ class depgraph(object):
unmasked = [pkg for pkg in matches if not pkg.masks]
if unmasked:
matches = unmasked
+ if len(matches) > 1:
+ # Now account for packages for which existing
+ # ebuilds are masked or unavailable (bug #445506).
+ unmasked = [pkg for pkg in matches if
+ self._equiv_ebuild_visible(pkg)]
+ if unmasked:
+ matches = unmasked
+
pkg = matches[-1] # highest match
- in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
+ in_graph = next(self._dynamic_config._package_tracker.match(
+ root, pkg.slot_atom, installed=False), None)
+
return pkg, in_graph
def _complete_graph(self, required_sets=None):
@@ -4649,9 +5655,9 @@ class depgraph(object):
"complete_if_new_use", "y") == "y"
complete_if_new_ver = self._dynamic_config.myparams.get(
"complete_if_new_ver", "y") == "y"
- rebuild_if_new_slot_abi = self._dynamic_config.myparams.get(
- "rebuild_if_new_slot_abi", "y") == "y"
- complete_if_new_slot = rebuild_if_new_slot_abi
+ rebuild_if_new_slot = self._dynamic_config.myparams.get(
+ "rebuild_if_new_slot", "y") == "y"
+ complete_if_new_slot = rebuild_if_new_slot
if "complete" not in self._dynamic_config.myparams and \
(complete_if_new_use or
@@ -4670,10 +5676,16 @@ class depgraph(object):
inst_pkg = vardb.match_pkgs(node.slot_atom)
if inst_pkg and inst_pkg[0].cp == node.cp:
inst_pkg = inst_pkg[0]
- if complete_if_new_ver and \
- (inst_pkg < node or node < inst_pkg):
- version_change = True
- break
+ if complete_if_new_ver:
+ if inst_pkg < node or node < inst_pkg:
+ version_change = True
+ break
+ elif not (inst_pkg.slot == node.slot and
+ inst_pkg.sub_slot == node.sub_slot):
+ # slot/sub-slot change without revbump gets
+ # similar treatment to a version change
+ version_change = True
+ break
# Intersect enabled USE with IUSE, in order to
# ignore forced USE from implicit IUSE flags, since
@@ -4689,7 +5701,8 @@ class depgraph(object):
if complete_if_new_slot:
cp_list = vardb.match_pkgs(Atom(node.cp))
if (cp_list and cp_list[0].cp == node.cp and
- not any(node.slot == pkg.slot for pkg in cp_list)):
+ not any(node.slot == pkg.slot and
+ node.sub_slot == pkg.sub_slot for pkg in cp_list)):
version_change = True
break
@@ -4795,7 +5808,7 @@ class depgraph(object):
return 0
return 1
- def _pkg(self, cpv, type_name, root_config, installed=False,
+ def _pkg(self, cpv, type_name, root_config, installed=False,
onlydeps=False, myrepo = None):
"""
Get a package instance from the cache, or create a new
@@ -4813,10 +5826,14 @@ class depgraph(object):
installed=installed, onlydeps=onlydeps))
if pkg is None and onlydeps and not installed:
# Maybe it already got pulled in as a "merge" node.
- pkg = self._dynamic_config.mydbapi[root_config.root].get(
- Package._gen_hash_key(cpv=cpv, type_name=type_name,
- repo_name=myrepo, root_config=root_config,
- installed=installed, onlydeps=False))
+ for candidate in self._dynamic_config._package_tracker.match(
+ root_config.root, Atom("="+cpv)):
+ if candidate.type_name == type_name and \
+ candidate.repo_name == myrepo and \
+ candidate.root_config is root_config and \
+ candidate.installed == installed and \
+ not candidate.onlydeps:
+ pkg = candidate
if pkg is None:
tree_type = self.pkg_tree_map[type_name]
@@ -4866,7 +5883,7 @@ class depgraph(object):
# For installed packages, always ignore blockers from DEPEND since
# only runtime dependencies should be relevant for packages that
# are already built.
- dep_keys = ["RDEPEND", "PDEPEND"]
+ dep_keys = Package._runtime_keys
for myroot in self._frozen_config.trees:
if self._frozen_config.myopts.get("--root-deps") is not None and \
@@ -4876,7 +5893,8 @@ class depgraph(object):
vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
pkgsettings = self._frozen_config.pkgsettings[myroot]
root_config = self._frozen_config.roots[myroot]
- final_db = self._dynamic_config.mydbapi[myroot]
+ final_db = PackageTrackerDbapiWrapper(
+ myroot, self._dynamic_config._package_tracker)
blocker_cache = BlockerCache(myroot, vardb)
stale_cache = set(blocker_cache)
@@ -4893,7 +5911,7 @@ class depgraph(object):
# the merge process or by --depclean. Always warn about
# packages masked by license, since the user likely wants
# to adjust ACCEPT_LICENSE.
- if pkg in final_db:
+ if pkg in self._dynamic_config._package_tracker:
if not self._pkg_visibility_check(pkg,
trust_graph=False) and \
(pkg_in_graph or 'LICENSE' in pkg.masks):
@@ -4928,7 +5946,7 @@ class depgraph(object):
self._spinner_update()
blocker_data = blocker_cache.get(cpv)
if blocker_data is not None and \
- blocker_data.counter != long(pkg.metadata["COUNTER"]):
+ blocker_data.counter != pkg.counter:
blocker_data = None
# If blocker data from the graph is available, use
@@ -4945,9 +5963,8 @@ class depgraph(object):
blockers is not None:
# Re-use the blockers from the graph.
blocker_atoms = sorted(blockers)
- counter = long(pkg.metadata["COUNTER"])
blocker_data = \
- blocker_cache.BlockerData(counter, blocker_atoms)
+ blocker_cache.BlockerData(pkg.counter, blocker_atoms)
blocker_cache[pkg.cpv] = blocker_data
continue
@@ -4972,13 +5989,14 @@ class depgraph(object):
# matches (this can happen if an atom lacks a
# category).
show_invalid_depstring_notice(
- pkg, depstr, _unicode_decode("%s") % (e,))
+ pkg, depstr, "%s" % (e,))
del e
raise
if not success:
- replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
- if replacement_pkg and \
- replacement_pkg[0].operation == "merge":
+ replacement_pkgs = self._dynamic_config._package_tracker.match(
+ myroot, pkg.slot_atom)
+ if any(replacement_pkg[0].operation == "merge" for \
+ replacement_pkg in replacement_pkgs):
# This package is being replaced anyway, so
# ignore invalid dependencies so as not to
# annoy the user too much (otherwise they'd be
@@ -4989,22 +6007,20 @@ class depgraph(object):
blocker_atoms = [myatom for myatom in atoms \
if myatom.blocker]
blocker_atoms.sort()
- counter = long(pkg.metadata["COUNTER"])
blocker_cache[cpv] = \
- blocker_cache.BlockerData(counter, blocker_atoms)
+ blocker_cache.BlockerData(pkg.counter, blocker_atoms)
if blocker_atoms:
try:
for atom in blocker_atoms:
blocker = Blocker(atom=atom,
- eapi=pkg.metadata["EAPI"],
+ eapi=pkg.eapi,
priority=self._priority(runtime=True),
root=myroot)
self._dynamic_config._blocker_parents.add(blocker, pkg)
except portage.exception.InvalidAtom as e:
depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
show_invalid_depstring_notice(
- pkg, depstr,
- _unicode_decode("Invalid Atom: %s") % (e,))
+ pkg, depstr, "Invalid Atom: %s" % (e,))
return False
for cpv in stale_cache:
del blocker_cache[cpv]
@@ -5025,8 +6041,7 @@ class depgraph(object):
virtuals = root_config.settings.getvirtuals()
myroot = blocker.root
initial_db = self._frozen_config.trees[myroot]["vartree"].dbapi
- final_db = self._dynamic_config.mydbapi[myroot]
-
+
provider_virtual = False
if blocker.cp in virtuals and \
not self._have_new_virt(blocker.root, blocker.cp):
@@ -5053,7 +6068,7 @@ class depgraph(object):
blocked_final = set()
for atom in atoms:
- for pkg in final_db.match_pkgs(atom):
+ for pkg in self._dynamic_config._package_tracker.match(myroot, atom):
if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
blocked_final.add(pkg)
@@ -5137,7 +6152,7 @@ class depgraph(object):
for inst_pkg, inst_task in depends_on_order:
uninst_task = Package(built=inst_pkg.built,
cpv=inst_pkg.cpv, installed=inst_pkg.installed,
- metadata=inst_pkg.metadata,
+ metadata=inst_pkg._metadata,
operation="uninstall",
root_config=inst_pkg.root_config,
type_name=inst_pkg.type_name)
@@ -5203,7 +6218,12 @@ class depgraph(object):
mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
- def altlist(self, reversed=False):
+ def altlist(self, reversed=DeprecationWarning):
+
+ if reversed is not DeprecationWarning:
+ warnings.warn("The reversed parameter of "
+ "_emerge.depgraph.depgraph.altlist() is deprecated",
+ DeprecationWarning, stacklevel=2)
while self._dynamic_config._serialized_tasks_cache is None:
self._resolve_conflicts()
@@ -5213,9 +6233,13 @@ class depgraph(object):
except self._serialize_tasks_retry:
pass
- retlist = self._dynamic_config._serialized_tasks_cache[:]
- if reversed:
+ retlist = self._dynamic_config._serialized_tasks_cache
+ if reversed is not DeprecationWarning and reversed:
+ # TODO: remove the "reversed" parameter (builtin name collision)
+ retlist = list(retlist)
retlist.reverse()
+ retlist = tuple(retlist)
+
return retlist
def _implicit_libc_deps(self, mergelist, graph):
@@ -5226,19 +6250,15 @@ class depgraph(object):
libc_pkgs = {}
implicit_libc_roots = (self._frozen_config._running_root.root,)
for root in implicit_libc_roots:
- graphdb = self._dynamic_config.mydbapi[root]
vardb = self._frozen_config.trees[root]["vartree"].dbapi
for atom in self._expand_virt_from_graph(root,
portage.const.LIBC_PACKAGE_ATOM):
if atom.blocker:
continue
- match = graphdb.match_pkgs(atom)
- if not match:
- continue
- pkg = match[-1]
- if pkg.operation == "merge" and \
- not vardb.cpv_exists(pkg.cpv):
- libc_pkgs.setdefault(pkg.root, set()).add(pkg)
+ for pkg in self._dynamic_config._package_tracker.match(root, atom):
+ if pkg.operation == "merge" and \
+ not vardb.cpv_exists(pkg.cpv):
+ libc_pkgs.setdefault(pkg.root, set()).add(pkg)
if not libc_pkgs:
return
@@ -5326,7 +6346,7 @@ class depgraph(object):
if "complete" not in self._dynamic_config.myparams and \
self._dynamic_config._allow_backtracking and \
- self._dynamic_config._slot_collision_nodes and \
+ any(self._dynamic_config._package_tracker.slot_conflicts()) and \
not self._accept_blocker_conflicts():
self._dynamic_config.myparams["complete"] = True
@@ -5335,10 +6355,13 @@ class depgraph(object):
self._process_slot_conflicts()
- self._slot_abi_trigger_reinstalls()
+ if self._dynamic_config._allow_backtracking:
+ self._slot_operator_trigger_reinstalls()
if not self._validate_blockers():
- self._dynamic_config._skip_restart = True
+ # Blockers don't trigger the _skip_restart flag, since
+ # backtracking may solve blockers when it solves slot
+ # conflicts (or by blind luck).
raise self._unknown_internal_error()
def _serialize_tasks(self):
@@ -5436,8 +6459,8 @@ class depgraph(object):
initial_atoms=[PORTAGE_PACKAGE_ATOM])
running_portage = self._frozen_config.trees[running_root]["vartree"].dbapi.match_pkgs(
PORTAGE_PACKAGE_ATOM)
- replacement_portage = self._dynamic_config.mydbapi[running_root].match_pkgs(
- PORTAGE_PACKAGE_ATOM)
+ replacement_portage = list(self._dynamic_config._package_tracker.match(
+ running_root, Atom(PORTAGE_PACKAGE_ATOM)))
if running_portage:
running_portage = running_portage[0]
@@ -5455,7 +6478,7 @@ class depgraph(object):
if running_portage is not None:
try:
portage_rdepend = self._select_atoms_highest_available(
- running_root, running_portage.metadata["RDEPEND"],
+ running_root, running_portage._metadata["RDEPEND"],
myuse=self._pkg_use_enabled(running_portage),
parent=running_portage, strict=False)
except portage.exception.InvalidDependString as e:
@@ -5474,18 +6497,15 @@ class depgraph(object):
for root in implicit_libc_roots:
libc_pkgs = set()
vardb = self._frozen_config.trees[root]["vartree"].dbapi
- graphdb = self._dynamic_config.mydbapi[root]
for atom in self._expand_virt_from_graph(root,
portage.const.LIBC_PACKAGE_ATOM):
if atom.blocker:
continue
- match = graphdb.match_pkgs(atom)
- if not match:
- continue
- pkg = match[-1]
- if pkg.operation == "merge" and \
- not vardb.cpv_exists(pkg.cpv):
- libc_pkgs.add(pkg)
+
+ for pkg in self._dynamic_config._package_tracker.match(root, atom):
+ if pkg.operation == "merge" and \
+ not vardb.cpv_exists(pkg.cpv):
+ libc_pkgs.add(pkg)
if libc_pkgs:
# If there's also an os-headers upgrade, we need to
@@ -5494,13 +6514,11 @@ class depgraph(object):
portage.const.OS_HEADERS_PACKAGE_ATOM):
if atom.blocker:
continue
- match = graphdb.match_pkgs(atom)
- if not match:
- continue
- pkg = match[-1]
- if pkg.operation == "merge" and \
- not vardb.cpv_exists(pkg.cpv):
- asap_nodes.append(pkg)
+
+ for pkg in self._dynamic_config._package_tracker.match(root, atom):
+ if pkg.operation == "merge" and \
+ not vardb.cpv_exists(pkg.cpv):
+ asap_nodes.append(pkg)
asap_nodes.extend(libc_pkgs)
@@ -5803,8 +6821,7 @@ class depgraph(object):
other_version = None
for pkg in vardb.match_pkgs(atom):
if pkg.cpv == task.cpv and \
- pkg.metadata["COUNTER"] == \
- task.metadata["COUNTER"]:
+ pkg.counter == task.counter:
continue
other_version = pkg
break
@@ -5843,13 +6860,12 @@ class depgraph(object):
# For packages in the world set, go ahead an uninstall
# when necessary, as long as the atom will be satisfied
# in the final state.
- graph_db = self._dynamic_config.mydbapi[task.root]
skip = False
try:
for atom in root_config.sets[
"selected"].iterAtomsForPackage(task):
satisfied = False
- for pkg in graph_db.match_pkgs(atom):
+ for pkg in self._dynamic_config._package_tracker.match(task.root, atom):
if pkg == inst_pkg:
continue
satisfied = True
@@ -5931,12 +6947,11 @@ class depgraph(object):
# node unnecessary (due to occupying the same SLOT),
# and we want to avoid executing a separate uninstall
# task in that case.
- slot_node = self._dynamic_config.mydbapi[uninst_task.root
- ].match_pkgs(uninst_task.slot_atom)
- if slot_node and \
- slot_node[0].operation == "merge":
- mygraph.add(slot_node[0], uninst_task,
- priority=BlockerDepPriority.instance)
+ for slot_node in self._dynamic_config._package_tracker.match(
+ uninst_task.root, uninst_task.slot_atom):
+ if slot_node.operation == "merge":
+ mygraph.add(slot_node, uninst_task,
+ priority=BlockerDepPriority.instance)
# Reset the state variables for leaf node selection and
# continue trying to select leaf nodes.
@@ -6011,7 +7026,7 @@ class depgraph(object):
inst_pkg = inst_pkg[0]
uninst_task = Package(built=inst_pkg.built,
cpv=inst_pkg.cpv, installed=inst_pkg.installed,
- metadata=inst_pkg.metadata,
+ metadata=inst_pkg._metadata,
operation="uninstall",
root_config=inst_pkg.root_config,
type_name=inst_pkg.type_name)
@@ -6083,17 +7098,22 @@ class depgraph(object):
for blocker in unsolvable_blockers:
retlist.append(blocker)
+ retlist = tuple(retlist)
+
if unsolvable_blockers and \
not self._accept_blocker_conflicts():
self._dynamic_config._unsatisfied_blockers_for_display = unsolvable_blockers
- self._dynamic_config._serialized_tasks_cache = retlist[:]
+ self._dynamic_config._serialized_tasks_cache = retlist
self._dynamic_config._scheduler_graph = scheduler_graph
- self._dynamic_config._skip_restart = True
+ # Blockers don't trigger the _skip_restart flag, since
+ # backtracking may solve blockers when it solves slot
+ # conflicts (or by blind luck).
raise self._unknown_internal_error()
- if self._dynamic_config._slot_collision_info and \
+ have_slot_conflict = any(self._dynamic_config._package_tracker.slot_conflicts())
+ if have_slot_conflict and \
not self._accept_blocker_conflicts():
- self._dynamic_config._serialized_tasks_cache = retlist[:]
+ self._dynamic_config._serialized_tasks_cache = retlist
self._dynamic_config._scheduler_graph = scheduler_graph
raise self._unknown_internal_error()
@@ -6147,13 +7167,8 @@ class depgraph(object):
def _show_merge_list(self):
if self._dynamic_config._serialized_tasks_cache is not None and \
not (self._dynamic_config._displayed_list is not None and \
- (self._dynamic_config._displayed_list == self._dynamic_config._serialized_tasks_cache or \
- self._dynamic_config._displayed_list == \
- list(reversed(self._dynamic_config._serialized_tasks_cache)))):
- display_list = self._dynamic_config._serialized_tasks_cache[:]
- if "--tree" in self._frozen_config.myopts:
- display_list.reverse()
- self.display(display_list)
+ self._dynamic_config._displayed_list is self._dynamic_config._serialized_tasks_cache):
+ self.display(self._dynamic_config._serialized_tasks_cache)
def _show_unsatisfied_blockers(self, blockers):
self._show_merge_list()
@@ -6175,6 +7190,18 @@ class depgraph(object):
for blocker in blockers:
for pkg in chain(self._dynamic_config._blocked_pkgs.child_nodes(blocker), \
self._dynamic_config._blocker_parents.parent_nodes(blocker)):
+
+ is_slot_conflict_pkg = False
+ for conflict in self._dynamic_config._package_tracker.slot_conflicts():
+ if conflict.root == pkg.root and conflict.atom == pkg.slot_atom:
+ is_slot_conflict_pkg = True
+ break
+ if is_slot_conflict_pkg:
+ # The slot conflict display has better noise reduction
+ # than the unsatisfied blockers display, so skip
+ # unsatisfied blockers display for packages involved
+ # directly in slot conflicts (see bug #385391).
+ continue
parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
if not parent_atoms:
atom = self._dynamic_config._blocked_world_pkgs.get(pkg)
@@ -6232,7 +7259,14 @@ class depgraph(object):
else:
# Display the specific atom from SetArg or
# Package types.
- msg.append("%s required by %s" % (atom, parent))
+ if atom != atom.unevaluated_atom:
+ # Show the unevaluated atom, since it can reveal
+ # issues with conditional use-flags missing
+ # from IUSE.
+ msg.append("%s (%s) required by %s" %
+ (atom.unevaluated_atom, atom, parent))
+ else:
+ msg.append("%s required by %s" % (atom, parent))
msg.append("\n")
msg.append("\n")
@@ -6248,6 +7282,10 @@ class depgraph(object):
# redundantly displaying this exact same merge list
# again via _show_merge_list().
self._dynamic_config._displayed_list = mylist
+
+ if "--tree" in self._frozen_config.myopts:
+ mylist = tuple(reversed(mylist))
+
display = Display()
return display(self, mylist, favorites, verbosity)
@@ -6320,7 +7358,7 @@ class depgraph(object):
if is_latest:
unstable_keyword_msg[root].append(">=%s %s\n" % (pkg.cpv, keyword))
elif is_latest_in_slot:
- unstable_keyword_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], keyword))
+ unstable_keyword_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, keyword))
else:
unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
else:
@@ -6343,7 +7381,7 @@ class depgraph(object):
keyword = reason.unmask_hint.value
comment, filename = portage.getmaskingreason(
- pkg.cpv, metadata=pkg.metadata,
+ pkg.cpv, metadata=pkg._metadata,
settings=pkgsettings,
portdb=pkg.root_config.trees["porttree"].dbapi,
return_location=True)
@@ -6360,7 +7398,7 @@ class depgraph(object):
if is_latest:
p_mask_change_msg[root].append(">=%s\n" % pkg.cpv)
elif is_latest_in_slot:
- p_mask_change_msg[root].append(">=%s:%s\n" % (pkg.cpv, pkg.metadata["SLOT"]))
+ p_mask_change_msg[root].append(">=%s:%s\n" % (pkg.cpv, pkg.slot))
else:
p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
else:
@@ -6385,7 +7423,7 @@ class depgraph(object):
if is_latest:
use_changes_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
elif is_latest_in_slot:
- use_changes_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], " ".join(adjustments)))
+ use_changes_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, " ".join(adjustments)))
else:
use_changes_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
@@ -6402,7 +7440,7 @@ class depgraph(object):
if is_latest:
license_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
elif is_latest_in_slot:
- license_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], " ".join(sorted(missing_licenses))))
+ license_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, " ".join(sorted(missing_licenses))))
else:
license_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
@@ -6442,7 +7480,7 @@ class depgraph(object):
if stat.S_ISREG(st.st_mode):
last_file_path = p
elif stat.S_ISDIR(st.st_mode):
- if os.path.basename(p) in _ignorecvs_dirs:
+ if os.path.basename(p) in VCS_DIRS:
continue
try:
contents = os.listdir(p)
@@ -6511,24 +7549,25 @@ class depgraph(object):
if len(roots) > 1:
writemsg("\nFor %s:\n" % abs_user_config, noiselevel=-1)
+ def _writemsg(reason, file):
+ writemsg(('\nThe following %s are necessary to proceed:\n'
+ ' (see "%s" in the portage(5) man page for more details)\n')
+ % (colorize('BAD', reason), file), noiselevel=-1)
+
if root in unstable_keyword_msg:
- writemsg("\nThe following " + colorize("BAD", "keyword changes") + \
- " are necessary to proceed:\n", noiselevel=-1)
+ _writemsg('keyword changes', 'package.accept_keywords')
writemsg(format_msg(unstable_keyword_msg[root]), noiselevel=-1)
if root in p_mask_change_msg:
- writemsg("\nThe following " + colorize("BAD", "mask changes") + \
- " are necessary to proceed:\n", noiselevel=-1)
+ _writemsg('mask changes', 'package.unmask')
writemsg(format_msg(p_mask_change_msg[root]), noiselevel=-1)
if root in use_changes_msg:
- writemsg("\nThe following " + colorize("BAD", "USE changes") + \
- " are necessary to proceed:\n", noiselevel=-1)
+ _writemsg('USE changes', 'package.use')
writemsg(format_msg(use_changes_msg[root]), noiselevel=-1)
if root in license_msg:
- writemsg("\nThe following " + colorize("BAD", "license changes") + \
- " are necessary to proceed:\n", noiselevel=-1)
+ _writemsg('license changes', 'package.license')
writemsg(format_msg(license_msg[root]), noiselevel=-1)
protect_obj = {}
@@ -6542,11 +7581,12 @@ class depgraph(object):
def write_changes(root, changes, file_to_write_to):
file_contents = None
try:
- file_contents = io.open(
+ with io.open(
_unicode_encode(file_to_write_to,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['content'],
- errors='replace').readlines()
+ errors='replace') as f:
+ file_contents = f.readlines()
except IOError as e:
if e.errno == errno.ENOENT:
file_contents = []
@@ -6612,10 +7652,16 @@ class depgraph(object):
noiselevel=-1)
writemsg("".join(problems), noiselevel=-1)
elif write_to_file and roots:
- writemsg("\nAutounmask changes successfully written. Remember to run dispatch-conf.\n", \
+ writemsg("\nAutounmask changes successfully written.\n",
noiselevel=-1)
+ for root in roots:
+ chk_updated_cfg_files(root,
+ [os.path.join(os.sep, USER_CONFIG_PATH)])
elif not pretend and not autounmask_write and roots:
- writemsg("\nUse --autounmask-write to write changes to config files (honoring CONFIG_PROTECT).\n", \
+ writemsg("\nUse --autounmask-write to write changes to config files (honoring\n"
+ "CONFIG_PROTECT). Carefully examine the list of proposed changes,\n"
+ "paying special attention to mask or keyword changes that may expose\n"
+ "experimental or unstable packages.\n",
noiselevel=-1)
@@ -6632,21 +7678,35 @@ class depgraph(object):
self._show_circular_deps(
self._dynamic_config._circular_deps_for_display)
- # The slot conflict display has better noise reduction than
- # the unsatisfied blockers display, so skip unsatisfied blockers
- # display if there are slot conflicts (see bug #385391).
- if self._dynamic_config._slot_collision_info:
+ unresolved_conflicts = False
+ have_slot_conflict = any(self._dynamic_config._package_tracker.slot_conflicts())
+ if have_slot_conflict:
+ unresolved_conflicts = True
self._show_slot_collision_notice()
- elif self._dynamic_config._unsatisfied_blockers_for_display is not None:
+ if self._dynamic_config._unsatisfied_blockers_for_display is not None:
+ unresolved_conflicts = True
self._show_unsatisfied_blockers(
self._dynamic_config._unsatisfied_blockers_for_display)
- else:
+
+ # Only show missed updates if there are no unresolved conflicts,
+ # since they may be irrelevant after the conflicts are solved.
+ if not unresolved_conflicts:
self._show_missed_update()
+ if self._frozen_config.myopts.get("--verbose-slot-rebuilds", 'y') != 'n':
+ self._compute_abi_rebuild_info()
+ self._show_abi_rebuild_info()
+
self._show_ignored_binaries()
self._display_autounmask()
+ for depgraph_sets in self._dynamic_config.sets.values():
+ for pset in depgraph_sets.sets.values():
+ for error_msg in pset.errors:
+ writemsg_level("%s\n" % (error_msg,),
+ level=logging.ERROR, noiselevel=-1)
+
# TODO: Add generic support for "set problem" handlers so that
# the below warnings aren't special cases for world only.
@@ -6722,7 +7782,7 @@ class depgraph(object):
pkgsettings = self._frozen_config.pkgsettings[pkg.root]
mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
masked_packages.append((root_config, pkgsettings,
- pkg.cpv, pkg.repo, pkg.metadata, mreasons))
+ pkg.cpv, pkg.repo, pkg._metadata, mreasons))
if masked_packages:
writemsg("\n" + colorize("BAD", "!!!") + \
" The following updates are masked by LICENSE changes:\n",
@@ -6737,7 +7797,7 @@ class depgraph(object):
pkgsettings = self._frozen_config.pkgsettings[pkg.root]
mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled)
masked_packages.append((root_config, pkgsettings,
- pkg.cpv, pkg.repo, pkg.metadata, mreasons))
+ pkg.cpv, pkg.repo, pkg._metadata, mreasons))
if masked_packages:
writemsg("\n" + colorize("BAD", "!!!") + \
" The following installed packages are masked:\n",
@@ -6747,7 +7807,15 @@ class depgraph(object):
writemsg("\n", noiselevel=-1)
for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
- self._show_unsatisfied_dep(*pargs, **kwargs)
+ self._show_unsatisfied_dep(*pargs,
+ **portage._native_kwargs(kwargs))
+
+ if self._dynamic_config._buildpkgonly_deps_unsatisfied:
+ self._show_merge_list()
+ writemsg("\n!!! --buildpkgonly requires all "
+ "dependencies to be merged.\n", noiselevel=-1)
+ writemsg("!!! Cannot merge requested packages. "
+ "Merge deps and try again.\n\n", noiselevel=-1)
def saveNomergeFavorites(self):
"""Find atoms in favorites that are not in the mergelist and add them
@@ -6808,16 +7876,31 @@ class depgraph(object):
all_added.append(SETPREFIX + k)
all_added.extend(added_favorites)
all_added.sort()
- for a in all_added:
- if a.startswith(SETPREFIX):
- filename = "world_sets"
- else:
- filename = "world"
- writemsg_stdout(
- ">>> Recording %s in \"%s\" favorites file...\n" %
- (colorize("INFORM", _unicode(a)), filename), noiselevel=-1)
if all_added:
- world_set.update(all_added)
+ skip = False
+ if "--ask" in self._frozen_config.myopts:
+ writemsg_stdout("\n", noiselevel=-1)
+ for a in all_added:
+ writemsg_stdout(" %s %s\n" % (colorize("GOOD", "*"), a),
+ noiselevel=-1)
+ writemsg_stdout("\n", noiselevel=-1)
+ prompt = "Would you like to add these packages to your world " \
+ "favorites?"
+ enter_invalid = '--ask-enter-invalid' in \
+ self._frozen_config.myopts
+ if userquery(prompt, enter_invalid) == "No":
+ skip = True
+
+ if not skip:
+ for a in all_added:
+ if a.startswith(SETPREFIX):
+ filename = "world_sets"
+ else:
+ filename = "world"
+ writemsg_stdout(
+ ">>> Recording %s in \"%s\" favorites file...\n" %
+ (colorize("INFORM", _unicode(a)), filename), noiselevel=-1)
+ world_set.update(all_added)
if world_locked:
world_set.unlock()
@@ -6844,7 +7927,6 @@ class depgraph(object):
else:
args = []
- fakedb = self._dynamic_config.mydbapi
serialized_tasks = []
masked_tasks = []
for x in mergelist:
@@ -6902,7 +7984,7 @@ class depgraph(object):
self._dynamic_config._unsatisfied_deps_for_display.append(
((pkg.root, "="+pkg.cpv), {"myparent":None}))
- fakedb[myroot].cpv_inject(pkg)
+ self._dynamic_config._package_tracker.add_pkg(pkg)
serialized_tasks.append(pkg)
self._spinner_update()
@@ -7092,14 +8174,15 @@ class depgraph(object):
try:
for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
self._show_unsatisfied_dep(
- *pargs, check_autounmask_breakage=True, **kwargs)
+ *pargs, check_autounmask_breakage=True,
+ **portage._native_kwargs(kwargs))
except self._autounmask_breakage:
return True
return False
def get_backtrack_infos(self):
return self._dynamic_config._backtrack_infos
-
+
class _dep_check_composite_db(dbapi):
"""
@@ -7214,8 +8297,9 @@ class _dep_check_composite_db(dbapi):
elif not self._depgraph._equiv_ebuild_visible(pkg):
return False
- in_graph = self._depgraph._dynamic_config._slot_pkg_map[
- self._root].get(pkg.slot_atom)
+ in_graph = next(self._depgraph._dynamic_config._package_tracker.match(
+ self._root, pkg.slot_atom, installed=False), None)
+
if in_graph is None:
# Mask choices for packages which are not the highest visible
# version within their slot (since they usually trigger slot
@@ -7234,7 +8318,7 @@ class _dep_check_composite_db(dbapi):
return True
def aux_get(self, cpv, wants):
- metadata = self._cpv_pkg_map[cpv].metadata
+ metadata = self._cpv_pkg_map[cpv]._metadata
return [metadata.get(x, "") for x in wants]
def match_pkgs(self, atom):
@@ -7308,14 +8392,14 @@ def _spinner_stop(spinner):
portage.writemsg_stdout("... done!\n")
-def backtrack_depgraph(settings, trees, myopts, myparams,
+def backtrack_depgraph(settings, trees, myopts, myparams,
myaction, myfiles, spinner):
"""
Raises PackageSetNotFound if myfiles contains a missing package set.
"""
_spinner_start(spinner, myopts)
try:
- return _backtrack_depgraph(settings, trees, myopts, myparams,
+ return _backtrack_depgraph(settings, trees, myopts, myparams,
myaction, myfiles, spinner)
finally:
_spinner_stop(spinner)
@@ -7412,7 +8496,7 @@ def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
skip_masked = True
skip_unsatisfied = True
mergelist = mtimedb["resume"]["mergelist"]
- dropped_tasks = set()
+ dropped_tasks = {}
frozen_config = _frozen_depgraph_config(settings, trees,
myopts, spinner)
while True:
@@ -7426,12 +8510,21 @@ def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
raise
graph = mydepgraph._dynamic_config.digraph
- unsatisfied_parents = dict((dep.parent, dep.parent) \
- for dep in e.value)
+ unsatisfied_parents = {}
traversed_nodes = set()
- unsatisfied_stack = list(unsatisfied_parents)
+ unsatisfied_stack = [(dep.parent, dep.atom) for dep in e.value]
while unsatisfied_stack:
- pkg = unsatisfied_stack.pop()
+ pkg, atom = unsatisfied_stack.pop()
+ if atom is not None and \
+ mydepgraph._select_pkg_from_installed(
+ pkg.root, atom)[0] is not None:
+ continue
+ atoms = unsatisfied_parents.get(pkg)
+ if atoms is None:
+ atoms = []
+ unsatisfied_parents[pkg] = atoms
+ if atom is not None:
+ atoms.append(atom)
if pkg in traversed_nodes:
continue
traversed_nodes.add(pkg)
@@ -7440,7 +8533,8 @@ def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
# package scheduled for merge, removing this
# package may cause the the parent package's
# dependency to become unsatisfied.
- for parent_node in graph.parent_nodes(pkg):
+ for parent_node, atom in \
+ mydepgraph._dynamic_config._parent_atoms.get(pkg, []):
if not isinstance(parent_node, Package) \
or parent_node.operation not in ("merge", "nomerge"):
continue
@@ -7448,8 +8542,7 @@ def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
# ensure that a package with an unsatisfied depenedency
# won't get pulled in, even indirectly via a soft
# dependency.
- unsatisfied_parents[parent_node] = parent_node
- unsatisfied_stack.append(parent_node)
+ unsatisfied_stack.append((parent_node, atom))
unsatisfied_tuples = frozenset(tuple(parent_node)
for parent_node in unsatisfied_parents
@@ -7470,8 +8563,8 @@ def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
# Exclude installed packages that have been removed from the graph due
# to failure to build/install runtime dependencies after the dependent
# package has already been installed.
- dropped_tasks.update(pkg for pkg in \
- unsatisfied_parents if pkg.operation != "nomerge")
+ dropped_tasks.update((pkg, atoms) for pkg, atoms in \
+ unsatisfied_parents.items() if pkg.operation != "nomerge")
del e, graph, traversed_nodes, \
unsatisfied_parents, unsatisfied_stack
@@ -7557,9 +8650,11 @@ def show_masked_packages(masked_packages):
shown_comments.add(comment)
portdb = root_config.trees["porttree"].dbapi
for l in missing_licenses:
- l_path = portdb.findLicensePath(l)
if l in shown_licenses:
continue
+ l_path = portdb.findLicensePath(l)
+ if l_path is None:
+ continue
msg = ("A copy of the '%s' license" + \
" is located at '%s'.\n\n") % (l, l_path)
writemsg(msg, noiselevel=-1)
@@ -7586,9 +8681,9 @@ def _get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
portdb=root_config.trees["porttree"].dbapi, myrepo=myrepo)
if not pkg.installed:
- if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
+ if not pkgsettings._accept_chost(pkg.cpv, pkg._metadata):
mreasons.append(_MaskReason("CHOST", "CHOST: %s" % \
- pkg.metadata["CHOST"]))
+ pkg._metadata["CHOST"]))
if pkg.invalid:
for msgs in pkg.invalid.values():
@@ -7596,7 +8691,7 @@ def _get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
mreasons.append(
_MaskReason("invalid", "invalid: %s" % (msg,)))
- if not pkg.metadata["SLOT"]:
+ if not pkg._metadata["SLOT"]:
mreasons.append(
_MaskReason("invalid", "SLOT: undefined"))
diff --git a/pym/_emerge/emergelog.py b/pym/_emerge/emergelog.py
index b1b093f52..aea94f74e 100644
--- a/pym/_emerge/emergelog.py
+++ b/pym/_emerge/emergelog.py
@@ -1,7 +1,7 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from __future__ import print_function
+from __future__ import unicode_literals
import io
import sys
@@ -20,10 +20,6 @@ from portage.output import xtermTitle
_disable = True
_emerge_log_dir = '/var/log'
-# Coerce to unicode, in order to prevent TypeError when writing
-# raw bytes to TextIOWrapper with python2.
-_log_fmt = _unicode_decode("%.0f: %s\n")
-
def emergelog(xterm_titles, mystr, short_msg=None):
if _disable:
@@ -51,10 +47,10 @@ def emergelog(xterm_titles, mystr, short_msg=None):
mode=0o660)
mylock = portage.locks.lockfile(file_path)
try:
- mylogfile.write(_log_fmt % (time.time(), mystr))
+ mylogfile.write("%.0f: %s\n" % (time.time(), mystr))
mylogfile.close()
finally:
portage.locks.unlockfile(mylock)
except (IOError,OSError,portage.exception.PortageException) as e:
if secpass >= 1:
- print("emergelog():",e, file=sys.stderr)
+ portage.util.writemsg("emergelog(): %s\n" % (e,), noiselevel=-1)
diff --git a/pym/_emerge/getloadavg.py b/pym/_emerge/getloadavg.py
index e9babf13e..6a2794fb1 100644
--- a/pym/_emerge/getloadavg.py
+++ b/pym/_emerge/getloadavg.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2009 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage import os
@@ -11,7 +11,8 @@ if getloadavg is None:
Raises OSError if the load average was unobtainable.
"""
try:
- loadavg_str = open('/proc/loadavg').readline()
+ with open('/proc/loadavg') as f:
+ loadavg_str = f.readline()
except IOError:
# getloadavg() is only supposed to raise OSError, so convert
raise OSError('unknown')
diff --git a/pym/_emerge/help.py b/pym/_emerge/help.py
index a1dbb37cc..8e241a85c 100644
--- a/pym/_emerge/help.py
+++ b/pym/_emerge/help.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -9,15 +9,15 @@ def help():
print(bold("emerge:")+" the other white meat (command-line interface to the Portage system)")
print(bold("Usage:"))
print(" "+turquoise("emerge")+" [ "+green("options")+" ] [ "+green("action")+" ] [ "+turquoise("ebuild")+" | "+turquoise("tbz2")+" | "+turquoise("file")+" | "+turquoise("@set")+" | "+turquoise("atom")+" ] [ ... ]")
- print(" "+turquoise("emerge")+" [ "+green("options")+" ] [ "+green("action")+" ] < "+turquoise("system")+" | "+turquoise("world")+" >")
+ print(" "+turquoise("emerge")+" [ "+green("options")+" ] [ "+green("action")+" ] < "+turquoise("@system")+" | "+turquoise("@world")+" >")
print(" "+turquoise("emerge")+" < "+turquoise("--sync")+" | "+turquoise("--metadata")+" | "+turquoise("--info")+" >")
print(" "+turquoise("emerge")+" "+turquoise("--resume")+" [ "+green("--pretend")+" | "+green("--ask")+" | "+green("--skipfirst")+" ]")
- print(" "+turquoise("emerge")+" "+turquoise("--help")+" [ "+green("--verbose")+" ] ")
- print(bold("Options:")+" "+green("-")+"["+green("abBcCdDefgGhjkKlnNoOpPqrsStuvV")+"]")
+ print(" "+turquoise("emerge")+" "+turquoise("--help"))
+ print(bold("Options:")+" "+green("-")+"["+green("abBcCdDefgGhjkKlnNoOpPqrsStuvVw")+"]")
print(" [ " + green("--color")+" < " + turquoise("y") + " | "+ turquoise("n")+" > ] [ "+green("--columns")+" ]")
print(" [ "+green("--complete-graph")+" ] [ "+green("--deep")+" ]")
print(" [ "+green("--jobs") + " " + turquoise("JOBS")+" ] [ "+green("--keep-going")+" ] [ " + green("--load-average")+" " + turquoise("LOAD") + " ]")
- print(" [ "+green("--newuse")+" ] [ "+green("--noconfmem")+" ] [ "+green("--nospinner")+" ]")
+ print(" [ "+green("--newrepo")+" ] [ "+green("--newuse")+" ] [ "+green("--noconfmem")+" ] [ "+green("--nospinner")+" ]")
print(" [ "+green("--oneshot")+" ] [ "+green("--onlydeps")+" ] [ "+ green("--quiet-build")+" [ " + turquoise("y") + " | "+ turquoise("n")+" ] ]")
print(" [ "+green("--reinstall ")+turquoise("changed-use")+" ] [ " + green("--with-bdeps")+" < " + turquoise("y") + " | "+ turquoise("n")+" > ]")
print(bold("Actions:")+" [ "+green("--depclean")+" | "+green("--list-sets")+" | "+green("--search")+" | "+green("--sync")+" | "+green("--version")+" ]")
diff --git a/pym/_emerge/is_valid_package_atom.py b/pym/_emerge/is_valid_package_atom.py
index 7cb2a5bb1..112afc1ec 100644
--- a/pym/_emerge/is_valid_package_atom.py
+++ b/pym/_emerge/is_valid_package_atom.py
@@ -1,11 +1,12 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import re
from portage.dep import isvalidatom
def insert_category_into_atom(atom, category):
- alphanum = re.search(r'\w', atom)
+ # Handle '*' character for "extended syntax" wildcard support.
+ alphanum = re.search(r'[\*\w]', atom, re.UNICODE)
if alphanum:
ret = atom[:alphanum.start()] + "%s/" % category + \
atom[alphanum.start():]
@@ -14,7 +15,7 @@ def insert_category_into_atom(atom, category):
return ret
def is_valid_package_atom(x, allow_repo=False):
- if "/" not in x:
+ if "/" not in x.split(":")[0]:
x2 = insert_category_into_atom(x, 'cat')
if x2 != None:
x = x2
diff --git a/pym/_emerge/main.py b/pym/_emerge/main.py
index f19994c46..cfe133264 100644
--- a/pym/_emerge/main.py
+++ b/pym/_emerge/main.py
@@ -1,53 +1,24 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
-import logging
-import signal
-import stat
-import subprocess
-import sys
-import textwrap
import platform
+import sys
+
import portage
portage.proxy.lazyimport.lazyimport(globals(),
- 'portage.news:count_unread_news,display_news_notifications',
- 'portage.emaint.modules.logs.logs:CleanLogs',
+ 'logging',
+ 'portage.dep:Atom',
+ 'portage.util:writemsg_level',
+ 'textwrap',
+ '_emerge.actions:load_emerge_config,run_action,' + \
+ 'validate_ebuild_environment',
+ '_emerge.help:help@emerge_help',
+ '_emerge.is_valid_package_atom:insert_category_into_atom'
)
from portage import os
-from portage import _encodings
-from portage import _unicode_decode
-import _emerge.help
-import portage.xpak, errno, re, time
-from portage.output import colorize, xtermTitle, xtermTitleReset
-from portage.output import create_color_func
-good = create_color_func("GOOD")
-bad = create_color_func("BAD")
-
-from portage.const import _ENABLE_DYN_LINK_MAP
-import portage.elog
-import portage.util
-import portage.locks
-import portage.exception
-from portage.data import secpass
-from portage.dbapi.dep_expand import dep_expand
-from portage.util import normalize_path as normpath
-from portage.util import (shlex_split, varexpand,
- writemsg_level, writemsg_stdout)
-from portage._sets import SETPREFIX
-from portage._global_updates import _global_updates
-
-from _emerge.actions import action_config, action_sync, action_metadata, \
- action_regen, action_search, action_uninstall, action_info, action_build, \
- adjust_configs, chk_updated_cfg_files, display_missing_pkg_set, \
- display_news_notification, getportageversion, load_emerge_config
-import _emerge
-from _emerge.emergelog import emergelog
-from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
-from _emerge.is_valid_package_atom import is_valid_package_atom
-from _emerge.stdout_spinner import stdout_spinner
-from _emerge.userquery import userquery
+from portage.util._argparse import ArgumentParser
if sys.hexversion >= 0x3000000:
long = int
@@ -61,9 +32,11 @@ options=[
"--debug",
"--digest",
"--emptytree",
+"--verbose-conflicts",
"--fetchonly", "--fetch-all-uri",
"--ignore-default-opts",
"--noconfmem",
+"--newrepo",
"--newuse",
"--nodeps", "--noreplace",
"--nospinner", "--oneshot",
@@ -76,7 +49,6 @@ options=[
"--tree",
"--unordered-display",
"--update",
-"--verbose",
"--verbose-main-repo-display",
]
@@ -97,7 +69,7 @@ shortmapping={
"s":"--search", "S":"--searchdesc",
"t":"--tree",
"u":"--update",
-"v":"--verbose", "V":"--version"
+"V":"--version"
}
COWSAY_MOO = """
@@ -109,331 +81,12 @@ COWSAY_MOO = """
-----------------------
\ ^__^
\ (oo)\_______
- (__)\ )\/\
+ (__)\ )\/\\
||----w |
|| ||
"""
-def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
-
- if os.path.exists("/usr/bin/install-info"):
- out = portage.output.EOutput()
- regen_infodirs=[]
- for z in infodirs:
- if z=='':
- continue
- inforoot=normpath(root+z)
- if os.path.isdir(inforoot) and \
- not [x for x in os.listdir(inforoot) \
- if x.startswith('.keepinfodir')]:
- infomtime = os.stat(inforoot)[stat.ST_MTIME]
- if inforoot not in prev_mtimes or \
- prev_mtimes[inforoot] != infomtime:
- regen_infodirs.append(inforoot)
-
- if not regen_infodirs:
- portage.writemsg_stdout("\n")
- if portage.util.noiselimit >= 0:
- out.einfo("GNU info directory index is up-to-date.")
- else:
- portage.writemsg_stdout("\n")
- if portage.util.noiselimit >= 0:
- out.einfo("Regenerating GNU info directory index...")
-
- dir_extensions = ("", ".gz", ".bz2")
- icount=0
- badcount=0
- errmsg = ""
- for inforoot in regen_infodirs:
- if inforoot=='':
- continue
-
- if not os.path.isdir(inforoot) or \
- not os.access(inforoot, os.W_OK):
- continue
-
- file_list = os.listdir(inforoot)
- file_list.sort()
- dir_file = os.path.join(inforoot, "dir")
- moved_old_dir = False
- processed_count = 0
- for x in file_list:
- if x.startswith(".") or \
- os.path.isdir(os.path.join(inforoot, x)):
- continue
- if x.startswith("dir"):
- skip = False
- for ext in dir_extensions:
- if x == "dir" + ext or \
- x == "dir" + ext + ".old":
- skip = True
- break
- if skip:
- continue
- if processed_count == 0:
- for ext in dir_extensions:
- try:
- os.rename(dir_file + ext, dir_file + ext + ".old")
- moved_old_dir = True
- except EnvironmentError as e:
- if e.errno != errno.ENOENT:
- raise
- del e
- processed_count += 1
- try:
- proc = subprocess.Popen(
- ['/usr/bin/install-info',
- '--dir-file=%s' % os.path.join(inforoot, "dir"),
- os.path.join(inforoot, x)],
- env=dict(os.environ, LANG="C", LANGUAGE="C"),
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- except OSError:
- myso = None
- else:
- myso = _unicode_decode(
- proc.communicate()[0]).rstrip("\n")
- proc.wait()
- existsstr="already exists, for file `"
- if myso:
- if re.search(existsstr,myso):
- # Already exists... Don't increment the count for this.
- pass
- elif myso[:44]=="install-info: warning: no info dir entry in ":
- # This info file doesn't contain a DIR-header: install-info produces this
- # (harmless) warning (the --quiet switch doesn't seem to work).
- # Don't increment the count for this.
- pass
- else:
- badcount=badcount+1
- errmsg += myso + "\n"
- icount=icount+1
-
- if moved_old_dir and not os.path.exists(dir_file):
- # We didn't generate a new dir file, so put the old file
- # back where it was originally found.
- for ext in dir_extensions:
- try:
- os.rename(dir_file + ext + ".old", dir_file + ext)
- except EnvironmentError as e:
- if e.errno != errno.ENOENT:
- raise
- del e
-
- # Clean dir.old cruft so that they don't prevent
- # unmerge of otherwise empty directories.
- for ext in dir_extensions:
- try:
- os.unlink(dir_file + ext + ".old")
- except EnvironmentError as e:
- if e.errno != errno.ENOENT:
- raise
- del e
-
- #update mtime so we can potentially avoid regenerating.
- prev_mtimes[inforoot] = os.stat(inforoot)[stat.ST_MTIME]
-
- if badcount:
- out.eerror("Processed %d info files; %d errors." % \
- (icount, badcount))
- writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
- else:
- if icount > 0 and portage.util.noiselimit >= 0:
- out.einfo("Processed %d info files." % (icount,))
-
-def display_preserved_libs(vardbapi, myopts):
- MAX_DISPLAY = 3
-
- if vardbapi._linkmap is None or \
- vardbapi._plib_registry is None:
- # preserve-libs is entirely disabled
- return
-
- # Explicitly load and prune the PreservedLibsRegistry in order
- # to ensure that we do not display stale data.
- vardbapi._plib_registry.load()
-
- if vardbapi._plib_registry.hasEntries():
- if "--quiet" in myopts:
- print()
- print(colorize("WARN", "!!!") + " existing preserved libs found")
- return
- else:
- print()
- print(colorize("WARN", "!!!") + " existing preserved libs:")
-
- plibdata = vardbapi._plib_registry.getPreservedLibs()
- linkmap = vardbapi._linkmap
- consumer_map = {}
- owners = {}
-
- try:
- linkmap.rebuild()
- except portage.exception.CommandNotFound as e:
- writemsg_level("!!! Command Not Found: %s\n" % (e,),
- level=logging.ERROR, noiselevel=-1)
- del e
- else:
- search_for_owners = set()
- for cpv in plibdata:
- internal_plib_keys = set(linkmap._obj_key(f) \
- for f in plibdata[cpv])
- for f in plibdata[cpv]:
- if f in consumer_map:
- continue
- consumers = []
- for c in linkmap.findConsumers(f):
- # Filter out any consumers that are also preserved libs
- # belonging to the same package as the provider.
- if linkmap._obj_key(c) not in internal_plib_keys:
- consumers.append(c)
- consumers.sort()
- consumer_map[f] = consumers
- search_for_owners.update(consumers[:MAX_DISPLAY+1])
-
- owners = {}
- for f in search_for_owners:
- owner_set = set()
- for owner in linkmap.getOwners(f):
- owner_dblink = vardbapi._dblink(owner)
- if owner_dblink.exists():
- owner_set.add(owner_dblink)
- if owner_set:
- owners[f] = owner_set
-
- for cpv in plibdata:
- print(colorize("WARN", ">>>") + " package: %s" % cpv)
- samefile_map = {}
- for f in plibdata[cpv]:
- obj_key = linkmap._obj_key(f)
- alt_paths = samefile_map.get(obj_key)
- if alt_paths is None:
- alt_paths = set()
- samefile_map[obj_key] = alt_paths
- alt_paths.add(f)
-
- for alt_paths in samefile_map.values():
- alt_paths = sorted(alt_paths)
- for p in alt_paths:
- print(colorize("WARN", " * ") + " - %s" % (p,))
- f = alt_paths[0]
- consumers = consumer_map.get(f, [])
- for c in consumers[:MAX_DISPLAY]:
- print(colorize("WARN", " * ") + " used by %s (%s)" % \
- (c, ", ".join(x.mycpv for x in owners.get(c, []))))
- if len(consumers) == MAX_DISPLAY + 1:
- print(colorize("WARN", " * ") + " used by %s (%s)" % \
- (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
- for x in owners.get(consumers[MAX_DISPLAY], []))))
- elif len(consumers) > MAX_DISPLAY:
- print(colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY))
- print("Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries")
-
-def post_emerge(myaction, myopts, myfiles,
- target_root, trees, mtimedb, retval):
- """
- Misc. things to run at the end of a merge session.
-
- Update Info Files
- Update Config Files
- Update News Items
- Commit mtimeDB
- Display preserved libs warnings
-
- @param myaction: The action returned from parse_opts()
- @type myaction: String
- @param myopts: emerge options
- @type myopts: dict
- @param myfiles: emerge arguments
- @type myfiles: list
- @param target_root: The target EROOT for myaction
- @type target_root: String
- @param trees: A dictionary mapping each ROOT to it's package databases
- @type trees: dict
- @param mtimedb: The mtimeDB to store data needed across merge invocations
- @type mtimedb: MtimeDB class instance
- @param retval: Emerge's return value
- @type retval: Int
- """
-
- root_config = trees[target_root]["root_config"]
- vardbapi = trees[target_root]['vartree'].dbapi
- settings = vardbapi.settings
- info_mtimes = mtimedb["info"]
-
- # Load the most current variables from ${ROOT}/etc/profile.env
- settings.unlock()
- settings.reload()
- settings.regenerate()
- settings.lock()
-
- config_protect = shlex_split(settings.get("CONFIG_PROTECT", ""))
- infodirs = settings.get("INFOPATH","").split(":") + \
- settings.get("INFODIR","").split(":")
-
- os.chdir("/")
-
- if retval == os.EX_OK:
- exit_msg = " *** exiting successfully."
- else:
- exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
- emergelog("notitles" not in settings.features, exit_msg)
-
- _flush_elog_mod_echo()
-
- if not vardbapi._pkgs_changed:
- # GLEP 42 says to display news *after* an emerge --pretend
- if "--pretend" in myopts:
- display_news_notification(root_config, myopts)
- # If vdb state has not changed then there's nothing else to do.
- return
-
- vdb_path = os.path.join(root_config.settings['EROOT'], portage.VDB_PATH)
- portage.util.ensure_dirs(vdb_path)
- vdb_lock = None
- if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
- vardbapi.lock()
- vdb_lock = True
-
- if vdb_lock:
- try:
- if "noinfo" not in settings.features:
- chk_updated_info_files(target_root,
- infodirs, info_mtimes, retval)
- mtimedb.commit()
- finally:
- if vdb_lock:
- vardbapi.unlock()
-
- display_preserved_libs(vardbapi, myopts)
- chk_updated_cfg_files(settings['EROOT'], config_protect)
-
- display_news_notification(root_config, myopts)
-
- postemerge = os.path.join(settings["PORTAGE_CONFIGROOT"],
- portage.USER_CONFIG_PATH, "bin", "post_emerge")
- if os.access(postemerge, os.X_OK):
- hook_retval = portage.process.spawn(
- [postemerge], env=settings.environ())
- if hook_retval != os.EX_OK:
- writemsg_level(
- " %s spawn failed of %s\n" % (bad("*"), postemerge,),
- level=logging.ERROR, noiselevel=-1)
-
- clean_logs(settings)
-
- if "--quiet" not in myopts and \
- myaction is None and "@world" in myfiles:
- show_depclean_suggestion()
-
-def show_depclean_suggestion():
- out = portage.output.EOutput()
- msg = "After world updates, it is important to remove " + \
- "obsolete packages with emerge --depclean. Refer " + \
- "to `man emerge` for more information."
- for line in textwrap.wrap(msg, 72):
- out.ewarn(line)
-
def multiple_actions(action1, action2):
sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
@@ -455,6 +108,16 @@ def insert_optional_args(args):
return False
valid_integers = valid_integers()
+
+ class valid_floats(object):
+ def __contains__(self, s):
+ try:
+ return float(s) >= 0
+ except (ValueError, OverflowError):
+ return False
+
+ valid_floats = valid_floats()
+
y_or_n = ('y', 'n',)
new_args = []
@@ -468,6 +131,7 @@ def insert_optional_args(args):
'--buildpkg' : y_or_n,
'--complete-graph' : y_or_n,
'--deep' : valid_integers,
+ '--depclean-lib-check' : y_or_n,
'--deselect' : y_or_n,
'--binpkg-respect-use' : y_or_n,
'--fail-clean' : y_or_n,
@@ -475,10 +139,12 @@ def insert_optional_args(args):
'--getbinpkgonly' : y_or_n,
'--jobs' : valid_integers,
'--keep-going' : y_or_n,
+ '--load-average' : valid_floats,
'--package-moves' : y_or_n,
'--quiet' : y_or_n,
'--quiet-build' : y_or_n,
- '--rebuild-if-new-slot-abi': y_or_n,
+ '--quiet-fail' : y_or_n,
+ '--rebuild-if-new-slot': y_or_n,
'--rebuild-if-new-rev' : y_or_n,
'--rebuild-if-new-ver' : y_or_n,
'--rebuild-if-unbuilt' : y_or_n,
@@ -489,11 +155,10 @@ def insert_optional_args(args):
"--use-ebuild-visibility": y_or_n,
'--usepkg' : y_or_n,
'--usepkgonly' : y_or_n,
+ '--verbose' : y_or_n,
+ '--verbose-slot-rebuilds': y_or_n,
}
- if _ENABLE_DYN_LINK_MAP:
- default_arg_opts['--depclean-lib-check'] = y_or_n
-
short_arg_opts = {
'D' : valid_integers,
'j' : valid_integers,
@@ -509,6 +174,8 @@ def insert_optional_args(args):
'k' : y_or_n,
'K' : y_or_n,
'q' : y_or_n,
+ 'v' : y_or_n,
+ 'w' : y_or_n,
}
arg_stack = args[:]
@@ -597,14 +264,17 @@ def _find_bad_atoms(atoms, less_strict=False):
"""
bad_atoms = []
for x in ' '.join(atoms).split():
+ atom = x
+ if "/" not in x.split(":")[0]:
+ x_cat = insert_category_into_atom(x, 'dummy-category')
+ if x_cat is not None:
+ atom = x_cat
+
bad_atom = False
try:
- atom = portage.dep.Atom(x, allow_wildcard=True, allow_repo=less_strict)
+ atom = Atom(atom, allow_wildcard=True, allow_repo=less_strict)
except portage.exception.InvalidAtom:
- try:
- atom = portage.dep.Atom("*/"+x, allow_wildcard=True, allow_repo=less_strict)
- except portage.exception.InvalidAtom:
- bad_atom = True
+ bad_atom = True
if bad_atom or (atom.operator and not less_strict) or atom.blocker or atom.use:
bad_atoms.append(x)
@@ -632,31 +302,26 @@ def parse_opts(tmpcmdline, silent=False):
"--ask": {
"shortopt" : "-a",
"help" : "prompt before performing any actions",
- "type" : "choice",
"choices" : true_y_or_n
},
"--autounmask": {
"help" : "automatically unmask packages",
- "type" : "choice",
"choices" : true_y_or_n
},
"--autounmask-unrestricted-atoms": {
"help" : "write autounmask changes with >= atoms if possible",
- "type" : "choice",
"choices" : true_y_or_n
},
"--autounmask-keep-masks": {
"help" : "don't add package.unmask entries",
- "type" : "choice",
"choices" : true_y_or_n
},
"--autounmask-write": {
"help" : "write changes made by --autounmask to disk",
- "type" : "choice",
"choices" : true_y_or_n
},
@@ -665,6 +330,11 @@ def parse_opts(tmpcmdline, silent=False):
"action":"store"
},
+ "--accept-restrict": {
+ "help":"temporarily override ACCEPT_RESTRICT",
+ "action":"store"
+ },
+
"--backtrack": {
"help" : "Specifies how many times to backtrack if dependency " + \
@@ -676,7 +346,6 @@ def parse_opts(tmpcmdline, silent=False):
"--buildpkg": {
"shortopt" : "-b",
"help" : "build binary packages",
- "type" : "choice",
"choices" : true_y_or_n
},
@@ -694,25 +363,21 @@ def parse_opts(tmpcmdline, silent=False):
},
"--color": {
"help":"enable or disable color output",
- "type":"choice",
"choices":("y", "n")
},
"--complete-graph": {
"help" : "completely account for all known dependencies",
- "type" : "choice",
"choices" : true_y_or_n
},
"--complete-graph-if-new-use": {
"help" : "trigger --complete-graph behavior if USE or IUSE will change for an installed package",
- "type" : "choice",
"choices" : y_or_n
},
"--complete-graph-if-new-ver": {
"help" : "trigger --complete-graph behavior if an installed package version will change (upgrade or downgrade)",
- "type" : "choice",
"choices" : y_or_n
},
@@ -728,15 +393,18 @@ def parse_opts(tmpcmdline, silent=False):
"action" : "store"
},
+ "--depclean-lib-check": {
+ "help" : "check for consumers of libraries before removing them",
+ "choices" : true_y_or_n
+ },
+
"--deselect": {
"help" : "remove atoms/sets from the world file",
- "type" : "choice",
"choices" : true_y_or_n
},
"--dynamic-deps": {
"help": "substitute the dependencies of installed packages with the dependencies of unbuilt ebuilds",
- "type": "choice",
"choices": y_or_n
},
@@ -750,17 +418,15 @@ def parse_opts(tmpcmdline, silent=False):
"--fail-clean": {
"help" : "clean temp files after build failure",
- "type" : "choice",
"choices" : true_y_or_n
},
- "--ignore-built-slot-abi-deps": {
- "help": "Ignore the SLOT/ABI := operator parts of dependencies that have "
+ "--ignore-built-slot-operator-deps": {
+ "help": "Ignore the slot/sub-slot := operator parts of dependencies that have "
"been recorded when packages where built. This option is intended "
"only for debugging purposes, and it only affects built packages "
- "that specify SLOT/ABI := operator dependencies using the "
+ "that specify slot/sub-slot := operator dependencies using the "
"experimental \"4-slot-abi\" EAPI.",
- "type": "choice",
"choices": y_or_n
},
@@ -776,7 +442,6 @@ def parse_opts(tmpcmdline, silent=False):
"--keep-going": {
"help" : "continue as much as possible after an error",
- "type" : "choice",
"choices" : true_y_or_n
},
@@ -791,18 +456,15 @@ def parse_opts(tmpcmdline, silent=False):
"--misspell-suggestions": {
"help" : "enable package name misspell suggestions",
- "type" : "choice",
"choices" : ("y", "n")
},
"--with-bdeps": {
"help":"include unnecessary build time dependencies",
- "type":"choice",
"choices":("y", "n")
},
"--reinstall": {
"help":"specify conditions to trigger package reinstallation",
- "type":"choice",
"choices":["changed-use"]
},
@@ -817,21 +479,18 @@ def parse_opts(tmpcmdline, silent=False):
"--binpkg-respect-use": {
"help" : "discard binary packages if their use flags \
don't match the current configuration",
- "type" : "choice",
"choices" : true_y_or_n
},
"--getbinpkg": {
"shortopt" : "-g",
"help" : "fetch binary packages",
- "type" : "choice",
"choices" : true_y_or_n
},
"--getbinpkgonly": {
"shortopt" : "-G",
"help" : "fetch binary packages only",
- "type" : "choice",
"choices" : true_y_or_n
},
@@ -860,29 +519,40 @@ def parse_opts(tmpcmdline, silent=False):
"--package-moves": {
"help" : "perform package moves when necessary",
- "type" : "choice",
"choices" : true_y_or_n
},
+ "--prefix": {
+ "help" : "specify the installation prefix",
+ "action" : "store"
+ },
+
+ "--pkg-format": {
+ "help" : "format of result binary package",
+ "action" : "store",
+ },
+
"--quiet": {
"shortopt" : "-q",
"help" : "reduced or condensed output",
- "type" : "choice",
"choices" : true_y_or_n
},
"--quiet-build": {
"help" : "redirect build output to logs",
- "type" : "choice",
"choices" : true_y_or_n,
},
- "--rebuild-if-new-slot-abi": {
- "help" : ("Automatically rebuild or reinstall packages when SLOT/ABI := "
+ "--quiet-fail": {
+ "help" : "suppresses display of the build log on stdout",
+ "choices" : true_y_or_n,
+ },
+
+ "--rebuild-if-new-slot": {
+ "help" : ("Automatically rebuild or reinstall packages when slot/sub-slot := "
"operator dependencies can be satisfied by a newer slot, so that "
"older packages slots will become eligible for removal by the "
"--depclean action as soon as possible."),
- "type" : "choice",
"choices" : true_y_or_n
},
@@ -891,7 +561,6 @@ def parse_opts(tmpcmdline, silent=False):
"used at both build-time and run-time are built, " + \
"if the dependency is not already installed with the " + \
"same version and revision.",
- "type" : "choice",
"choices" : true_y_or_n
},
@@ -900,24 +569,21 @@ def parse_opts(tmpcmdline, silent=False):
"used at both build-time and run-time are built, " + \
"if the dependency is not already installed with the " + \
"same version. Revision numbers are ignored.",
- "type" : "choice",
"choices" : true_y_or_n
},
"--rebuild-if-unbuilt": {
"help" : "Rebuild packages when dependencies that are " + \
"used at both build-time and run-time are built.",
- "type" : "choice",
"choices" : true_y_or_n
},
"--rebuilt-binaries": {
"help" : "replace installed packages with binary " + \
"packages that have been rebuilt",
- "type" : "choice",
"choices" : true_y_or_n
},
-
+
"--rebuilt-binaries-timestamp": {
"help" : "use only binaries that are newer than this " + \
"timestamp for --rebuilt-binaries",
@@ -931,26 +597,23 @@ def parse_opts(tmpcmdline, silent=False):
"--root-deps": {
"help" : "modify interpretation of depedencies",
- "type" : "choice",
"choices" :("True", "rdeps")
},
"--select": {
+ "shortopt" : "-w",
"help" : "add specified packages to the world set " + \
"(inverse of --oneshot)",
- "type" : "choice",
"choices" : true_y_or_n
},
"--selective": {
"help" : "identical to --noreplace",
- "type" : "choice",
"choices" : true_y_or_n
},
"--use-ebuild-visibility": {
"help" : "use unbuilt ebuild metadata for visibility checks on built packages",
- "type" : "choice",
"choices" : true_y_or_n
},
@@ -964,42 +627,39 @@ def parse_opts(tmpcmdline, silent=False):
"--usepkg": {
"shortopt" : "-k",
"help" : "use binary packages",
- "type" : "choice",
"choices" : true_y_or_n
},
"--usepkgonly": {
"shortopt" : "-K",
"help" : "use only binary packages",
- "type" : "choice",
"choices" : true_y_or_n
},
+ "--verbose": {
+ "shortopt" : "-v",
+ "help" : "verbose output",
+ "choices" : true_y_or_n
+ },
+ "--verbose-slot-rebuilds": {
+ "help" : "verbose slot rebuild output",
+ "choices" : true_y_or_n
+ },
}
- if _ENABLE_DYN_LINK_MAP:
- argument_options["--depclean-lib-check"] = {
- "help" : "check for consumers of libraries before removing them",
- "type" : "choice",
- "choices" : true_y_or_n
- }
-
- from optparse import OptionParser
- parser = OptionParser()
- if parser.has_option("--help"):
- parser.remove_option("--help")
+ parser = ArgumentParser(add_help=False)
for action_opt in actions:
- parser.add_option("--" + action_opt, action="store_true",
+ parser.add_argument("--" + action_opt, action="store_true",
dest=action_opt.replace("-", "_"), default=False)
for myopt in options:
- parser.add_option(myopt, action="store_true",
+ parser.add_argument(myopt, action="store_true",
dest=myopt.lstrip("--").replace("-", "_"), default=False)
for shortopt, longopt in shortmapping.items():
- parser.add_option("-" + shortopt, action="store_true",
+ parser.add_argument("-" + shortopt, action="store_true",
dest=longopt.lstrip("--").replace("-", "_"), default=False)
for myalias, myopt in longopt_aliases.items():
- parser.add_option(myalias, action="store_true",
+ parser.add_argument(myalias, action="store_true",
dest=myopt.lstrip("--").replace("-", "_"), default=False)
for myopt, kwargs in argument_options.items():
@@ -1007,12 +667,12 @@ def parse_opts(tmpcmdline, silent=False):
args = [myopt]
if shortopt is not None:
args.append(shortopt)
- parser.add_option(dest=myopt.lstrip("--").replace("-", "_"),
+ parser.add_argument(dest=myopt.lstrip("--").replace("-", "_"),
*args, **kwargs)
tmpcmdline = insert_optional_args(tmpcmdline)
- myoptions, myargs = parser.parse_args(args=tmpcmdline)
+ myoptions, myargs = parser.parse_known_args(args=tmpcmdline)
if myoptions.ask in true_y:
myoptions.ask = True
@@ -1058,9 +718,8 @@ def parse_opts(tmpcmdline, silent=False):
else:
myoptions.complete_graph = None
- if _ENABLE_DYN_LINK_MAP:
- if myoptions.depclean_lib_check in true_y:
- myoptions.depclean_lib_check = True
+ if myoptions.depclean_lib_check in true_y:
+ myoptions.depclean_lib_check = True
if myoptions.exclude:
bad_atoms = _find_bad_atoms(myoptions.exclude)
@@ -1127,8 +786,11 @@ def parse_opts(tmpcmdline, silent=False):
if myoptions.quiet_build in true_y:
myoptions.quiet_build = 'y'
- if myoptions.rebuild_if_new_slot_abi in true_y:
- myoptions.rebuild_if_new_slot_abi = 'y'
+ if myoptions.quiet_fail in true_y:
+ myoptions.quiet_fail = 'y'
+
+ if myoptions.rebuild_if_new_slot in true_y:
+ myoptions.rebuild_if_new_slot = 'y'
if myoptions.rebuild_if_new_ver in true_y:
myoptions.rebuild_if_new_ver = True
@@ -1215,6 +877,9 @@ def parse_opts(tmpcmdline, silent=False):
myoptions.jobs = jobs
+ if myoptions.load_average == "True":
+ myoptions.load_average = None
+
if myoptions.load_average:
try:
load_average = float(myoptions.load_average)
@@ -1228,7 +893,7 @@ def parse_opts(tmpcmdline, silent=False):
(myoptions.load_average,))
myoptions.load_average = load_average
-
+
if myoptions.rebuilt_binaries_timestamp:
try:
rebuilt_binaries_timestamp = int(myoptions.rebuilt_binaries_timestamp)
@@ -1259,6 +924,11 @@ def parse_opts(tmpcmdline, silent=False):
else:
myoptions.usepkgonly = None
+ if myoptions.verbose in true_y:
+ myoptions.verbose = True
+ else:
+ myoptions.verbose = None
+
for myopt in options:
v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
if v:
@@ -1283,309 +953,10 @@ def parse_opts(tmpcmdline, silent=False):
if myaction is None and myoptions.deselect is True:
myaction = 'deselect'
- if myargs and isinstance(myargs[0], bytes):
- for i in range(len(myargs)):
- myargs[i] = portage._unicode_decode(myargs[i])
-
myfiles += myargs
return myaction, myopts, myfiles
-# Warn about features that may confuse users and
-# lead them to report invalid bugs.
-_emerge_features_warn = frozenset(['keeptemp', 'keepwork'])
-
-def validate_ebuild_environment(trees):
- features_warn = set()
- for myroot in trees:
- settings = trees[myroot]["vartree"].settings
- settings.validate()
- features_warn.update(
- _emerge_features_warn.intersection(settings.features))
-
- if features_warn:
- msg = "WARNING: The FEATURES variable contains one " + \
- "or more values that should be disabled under " + \
- "normal circumstances: %s" % " ".join(features_warn)
- out = portage.output.EOutput()
- for line in textwrap.wrap(msg, 65):
- out.ewarn(line)
-
-def apply_priorities(settings):
- ionice(settings)
- nice(settings)
-
-def nice(settings):
- try:
- os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
- except (OSError, ValueError) as e:
- out = portage.output.EOutput()
- out.eerror("Failed to change nice value to '%s'" % \
- settings["PORTAGE_NICENESS"])
- out.eerror("%s\n" % str(e))
-
-def ionice(settings):
-
- ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
- if ionice_cmd:
- ionice_cmd = portage.util.shlex_split(ionice_cmd)
- if not ionice_cmd:
- return
-
- variables = {"PID" : str(os.getpid())}
- cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
-
- try:
- rval = portage.process.spawn(cmd, env=os.environ)
- except portage.exception.CommandNotFound:
- # The OS kernel probably doesn't support ionice,
- # so return silently.
- return
-
- if rval != os.EX_OK:
- out = portage.output.EOutput()
- out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
- out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
-
-def clean_logs(settings):
-
- if "clean-logs" not in settings.features:
- return
-
- logdir = settings.get("PORT_LOGDIR")
- if logdir is None or not os.path.isdir(logdir):
- return
-
- options = {
- 'eerror': portage.output.EOutput().eerror,
- # uncomment next line to output a succeeded message
- #'einfo': portage.output.EOutput().einfo
- }
- cleanlogs = CleanLogs()
- cleanlogs.clean(settings=settings, options=options)
-
-def setconfig_fallback(root_config):
- setconfig = root_config.setconfig
- setconfig._create_default_config()
- setconfig._parse(update=True)
- root_config.sets = setconfig.getSets()
-
-def get_missing_sets(root_config):
- # emerge requires existence of "world", "selected", and "system"
- missing_sets = []
-
- for s in ("selected", "system", "world",):
- if s not in root_config.sets:
- missing_sets.append(s)
-
- return missing_sets
-
-def missing_sets_warning(root_config, missing_sets):
- if len(missing_sets) > 2:
- missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
- missing_sets_str += ', and "%s"' % missing_sets[-1]
- elif len(missing_sets) == 2:
- missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
- else:
- missing_sets_str = '"%s"' % missing_sets[-1]
- msg = ["emerge: incomplete set configuration, " + \
- "missing set(s): %s" % missing_sets_str]
- if root_config.sets:
- msg.append(" sets defined: %s" % ", ".join(root_config.sets))
- global_config_path = portage.const.GLOBAL_CONFIG_PATH
- if root_config.settings['EPREFIX']:
- global_config_path = os.path.join(root_config.settings['EPREFIX'],
- portage.const.GLOBAL_CONFIG_PATH.lstrip(os.sep))
- msg.append(" This usually means that '%s'" % \
- (os.path.join(global_config_path, "sets/portage.conf"),))
- msg.append(" is missing or corrupt.")
- msg.append(" Falling back to default world and system set configuration!!!")
- for line in msg:
- writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
-
-def ensure_required_sets(trees):
- warning_shown = False
- for root_trees in trees.values():
- missing_sets = get_missing_sets(root_trees["root_config"])
- if missing_sets and not warning_shown:
- warning_shown = True
- missing_sets_warning(root_trees["root_config"], missing_sets)
- if missing_sets:
- setconfig_fallback(root_trees["root_config"])
-
-def expand_set_arguments(myfiles, myaction, root_config):
- retval = os.EX_OK
- setconfig = root_config.setconfig
-
- sets = setconfig.getSets()
-
- # In order to know exactly which atoms/sets should be added to the
- # world file, the depgraph performs set expansion later. It will get
- # confused about where the atoms came from if it's not allowed to
- # expand them itself.
- do_not_expand = (None, )
- newargs = []
- for a in myfiles:
- if a in ("system", "world"):
- newargs.append(SETPREFIX+a)
- else:
- newargs.append(a)
- myfiles = newargs
- del newargs
- newargs = []
-
- # separators for set arguments
- ARG_START = "{"
- ARG_END = "}"
-
- for i in range(0, len(myfiles)):
- if myfiles[i].startswith(SETPREFIX):
- start = 0
- end = 0
- x = myfiles[i][len(SETPREFIX):]
- newset = ""
- while x:
- start = x.find(ARG_START)
- end = x.find(ARG_END)
- if start > 0 and start < end:
- namepart = x[:start]
- argpart = x[start+1:end]
-
- # TODO: implement proper quoting
- args = argpart.split(",")
- options = {}
- for a in args:
- if "=" in a:
- k, v = a.split("=", 1)
- options[k] = v
- else:
- options[a] = "True"
- setconfig.update(namepart, options)
- newset += (x[:start-len(namepart)]+namepart)
- x = x[end+len(ARG_END):]
- else:
- newset += x
- x = ""
- myfiles[i] = SETPREFIX+newset
-
- sets = setconfig.getSets()
-
- # display errors that occurred while loading the SetConfig instance
- for e in setconfig.errors:
- print(colorize("BAD", "Error during set creation: %s" % e))
-
- unmerge_actions = ("unmerge", "prune", "clean", "depclean")
-
- for a in myfiles:
- if a.startswith(SETPREFIX):
- s = a[len(SETPREFIX):]
- if s not in sets:
- display_missing_pkg_set(root_config, s)
- return (None, 1)
- setconfig.active.append(s)
- try:
- set_atoms = setconfig.getSetAtoms(s)
- except portage.exception.PackageSetNotFound as e:
- writemsg_level(("emerge: the given set '%s' " + \
- "contains a non-existent set named '%s'.\n") % \
- (s, e), level=logging.ERROR, noiselevel=-1)
- if s in ('world', 'selected') and \
- SETPREFIX + e.value in sets['selected']:
- writemsg_level(("Use `emerge --deselect %s%s` to "
- "remove this set from world_sets.\n") %
- (SETPREFIX, e,), level=logging.ERROR,
- noiselevel=-1)
- return (None, 1)
- if myaction in unmerge_actions and \
- not sets[s].supportsOperation("unmerge"):
- sys.stderr.write("emerge: the given set '%s' does " % s + \
- "not support unmerge operations\n")
- retval = 1
- elif not set_atoms:
- print("emerge: '%s' is an empty set" % s)
- elif myaction not in do_not_expand:
- newargs.extend(set_atoms)
- else:
- newargs.append(SETPREFIX+s)
- for e in sets[s].errors:
- print(e)
- else:
- newargs.append(a)
- return (newargs, retval)
-
-def repo_name_check(trees):
- missing_repo_names = set()
- for root_trees in trees.values():
- porttree = root_trees.get("porttree")
- if porttree:
- portdb = porttree.dbapi
- missing_repo_names.update(portdb.getMissingRepoNames())
- if portdb.porttree_root in missing_repo_names and \
- not os.path.exists(os.path.join(
- portdb.porttree_root, "profiles")):
- # This is normal if $PORTDIR happens to be empty,
- # so don't warn about it.
- missing_repo_names.remove(portdb.porttree_root)
-
- if missing_repo_names:
- msg = []
- msg.append("WARNING: One or more repositories " + \
- "have missing repo_name entries:")
- msg.append("")
- for p in missing_repo_names:
- msg.append("\t%s/profiles/repo_name" % (p,))
- msg.append("")
- msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
- "should be a plain text file containing a unique " + \
- "name for the repository on the first line.", 70))
- msg.append("\n")
- writemsg_level("".join("%s\n" % l for l in msg),
- level=logging.WARNING, noiselevel=-1)
-
- return bool(missing_repo_names)
-
-def repo_name_duplicate_check(trees):
- ignored_repos = {}
- for root, root_trees in trees.items():
- if 'porttree' in root_trees:
- portdb = root_trees['porttree'].dbapi
- if portdb.settings.get('PORTAGE_REPO_DUPLICATE_WARN') != '0':
- for repo_name, paths in portdb.getIgnoredRepos():
- k = (root, repo_name, portdb.getRepositoryPath(repo_name))
- ignored_repos.setdefault(k, []).extend(paths)
-
- if ignored_repos:
- msg = []
- msg.append('WARNING: One or more repositories ' + \
- 'have been ignored due to duplicate')
- msg.append(' profiles/repo_name entries:')
- msg.append('')
- for k in sorted(ignored_repos):
- msg.append(' %s overrides' % ", ".join(k))
- for path in ignored_repos[k]:
- msg.append(' %s' % (path,))
- msg.append('')
- msg.extend(' ' + x for x in textwrap.wrap(
- "All profiles/repo_name entries must be unique in order " + \
- "to avoid having duplicates ignored. " + \
- "Set PORTAGE_REPO_DUPLICATE_WARN=\"0\" in " + \
- "/etc/make.conf if you would like to disable this warning."))
- msg.append("\n")
- writemsg_level(''.join('%s\n' % l for l in msg),
- level=logging.WARNING, noiselevel=-1)
-
- return bool(ignored_repos)
-
-def config_protect_check(trees):
- for root, root_trees in trees.items():
- settings = root_trees["root_config"].settings
- if not settings.get("CONFIG_PROTECT"):
- msg = "!!! CONFIG_PROTECT is empty"
- if settings["ROOT"] != "/":
- msg += " for '%s'" % root
- msg += "\n"
- writemsg_level(msg, level=logging.WARN, noiselevel=-1)
-
def profile_check(trees, myaction):
if myaction in ("help", "info", "search", "sync", "version"):
return os.EX_OK
@@ -1603,16 +974,6 @@ def profile_check(trees, myaction):
return 1
return os.EX_OK
-def check_procfs():
- procfs_path = '/proc'
- if platform.system() not in ("Linux",) or \
- os.path.ismount(procfs_path):
- return os.EX_OK
- msg = "It seems that %s is not mounted. You have been warned." % procfs_path
- writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
- level=logging.ERROR, noiselevel=-1)
- return 1
-
def emerge_main(args=None):
"""
@param args: command arguments (default: sys.argv[1:])
@@ -1621,11 +982,12 @@ def emerge_main(args=None):
if args is None:
args = sys.argv[1:]
- portage._disable_legacy_globals()
- portage.dep._internal_warnings = True
+ args = portage._decode_argv(args)
+
# Disable color until we're sure that it should be enabled (after
# EMERGE_DEFAULT_OPTS has been parsed).
portage.output.havecolor = 0
+
# This first pass is just for options that need to be known as early as
# possible, such as --config-root. They will be parsed again later,
# together with EMERGE_DEFAULT_OPTS (which may vary depending on the
@@ -1637,428 +999,45 @@ def emerge_main(args=None):
os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
if "--root" in myopts:
os.environ["ROOT"] = myopts["--root"]
+ if "--prefix" in myopts:
+ os.environ["EPREFIX"] = myopts["--prefix"]
if "--accept-properties" in myopts:
os.environ["ACCEPT_PROPERTIES"] = myopts["--accept-properties"]
+ if "--accept-restrict" in myopts:
+ os.environ["ACCEPT_RESTRICT"] = myopts["--accept-restrict"]
+
+ # optimize --help (no need to load config / EMERGE_DEFAULT_OPTS)
+ if myaction == "help":
+ emerge_help()
+ return os.EX_OK
+ elif myaction == "moo":
+ print(COWSAY_MOO % platform.system())
+ return os.EX_OK
# Portage needs to ensure a sane umask for the files it creates.
os.umask(0o22)
- settings, trees, mtimedb = load_emerge_config()
- portdb = trees[settings['EROOT']]['porttree'].dbapi
- rval = profile_check(trees, myaction)
+ if myaction == "sync":
+ portage._sync_mode = True
+ emerge_config = load_emerge_config(
+ action=myaction, args=myfiles, opts=myopts)
+ rval = profile_check(emerge_config.trees, emerge_config.action)
if rval != os.EX_OK:
return rval
tmpcmdline = []
if "--ignore-default-opts" not in myopts:
- tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
+ tmpcmdline.extend(portage.util.shlex_split(
+ emerge_config.target_config.settings.get(
+ "EMERGE_DEFAULT_OPTS", "")))
tmpcmdline.extend(args)
- myaction, myopts, myfiles = parse_opts(tmpcmdline)
-
- # skip global updates prior to sync, since it's called after sync
- if myaction not in ('help', 'info', 'sync', 'version') and \
- myopts.get('--package-moves') != 'n' and \
- _global_updates(trees, mtimedb["updates"], quiet=("--quiet" in myopts)):
- mtimedb.commit()
- # Reload the whole config from scratch.
- settings, trees, mtimedb = load_emerge_config(trees=trees)
- portdb = trees[settings['EROOT']]['porttree'].dbapi
-
- xterm_titles = "notitles" not in settings.features
- if xterm_titles:
- xtermTitle("emerge")
-
- if "--digest" in myopts:
- os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
- # Reload the whole config from scratch so that the portdbapi internal
- # config is updated with new FEATURES.
- settings, trees, mtimedb = load_emerge_config(trees=trees)
- portdb = trees[settings['EROOT']]['porttree'].dbapi
-
- # NOTE: adjust_configs() can map options to FEATURES, so any relevant
- # options adjustments should be made prior to calling adjust_configs().
- if "--buildpkgonly" in myopts:
- myopts["--buildpkg"] = True
-
- adjust_configs(myopts, trees)
- apply_priorities(settings)
-
- if myaction == 'version':
- writemsg_stdout(getportageversion(
- settings["PORTDIR"], None,
- settings.profile_path, settings["CHOST"],
- trees[settings['EROOT']]['vartree'].dbapi) + '\n', noiselevel=-1)
- return 0
- elif myaction == 'help':
- _emerge.help.help()
- return 0
-
- spinner = stdout_spinner()
- if "candy" in settings.features:
- spinner.update = spinner.update_scroll
-
- if "--quiet" not in myopts:
- portage.deprecated_profile_check(settings=settings)
- if portage.const._ENABLE_REPO_NAME_WARN:
- # Bug #248603 - Disable warnings about missing
- # repo_name entries for stable branch.
- repo_name_check(trees)
- repo_name_duplicate_check(trees)
- config_protect_check(trees)
- check_procfs()
-
- if "getbinpkg" in settings.features:
- myopts["--getbinpkg"] = True
-
- if "--getbinpkgonly" in myopts:
- myopts["--getbinpkg"] = True
-
- if "--getbinpkgonly" in myopts:
- myopts["--usepkgonly"] = True
-
- if "--getbinpkg" in myopts:
- myopts["--usepkg"] = True
-
- if "--usepkgonly" in myopts:
- myopts["--usepkg"] = True
-
- if "--buildpkgonly" in myopts:
- # --buildpkgonly will not merge anything, so
- # it cancels all binary package options.
- for opt in ("--getbinpkg", "--getbinpkgonly",
- "--usepkg", "--usepkgonly"):
- myopts.pop(opt, None)
-
- for mytrees in trees.values():
- mydb = mytrees["porttree"].dbapi
- # Freeze the portdbapi for performance (memoize all xmatch results).
- mydb.freeze()
-
- if myaction in ('search', None) and \
- "--usepkg" in myopts:
- # Populate the bintree with current --getbinpkg setting.
- # This needs to happen before expand_set_arguments(), in case
- # any sets use the bintree.
- mytrees["bintree"].populate(
- getbinpkgs="--getbinpkg" in myopts)
-
- del mytrees, mydb
-
- if "moo" in myfiles:
- print(COWSAY_MOO % platform.system())
- msg = ("The above `emerge moo` display is deprecated. "
- "Please use `emerge --moo` instead.")
- for line in textwrap.wrap(msg, 50):
- print(" %s %s" % (colorize("WARN", "*"), line))
-
- for x in myfiles:
- ext = os.path.splitext(x)[1]
- if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
- print(colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n"))
- break
-
- root_config = trees[settings['EROOT']]['root_config']
- if myaction == "moo":
- print(COWSAY_MOO % platform.system())
- return os.EX_OK
- elif myaction == "list-sets":
- writemsg_stdout("".join("%s\n" % s for s in sorted(root_config.sets)))
- return os.EX_OK
- elif myaction == "check-news":
- news_counts = count_unread_news(
- root_config.trees["porttree"].dbapi,
- root_config.trees["vartree"].dbapi)
- if any(news_counts.values()):
- display_news_notifications(news_counts)
- elif "--quiet" not in myopts:
- print("", colorize("GOOD", "*"), "No news items were found.")
- return os.EX_OK
-
- ensure_required_sets(trees)
-
- # only expand sets for actions taking package arguments
- oldargs = myfiles[:]
- if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
- myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
- if retval != os.EX_OK:
- return retval
-
- # Need to handle empty sets specially, otherwise emerge will react
- # with the help message for empty argument lists
- if oldargs and not myfiles:
- print("emerge: no targets left after set expansion")
- return 0
-
- if ("--tree" in myopts) and ("--columns" in myopts):
- print("emerge: can't specify both of \"--tree\" and \"--columns\".")
- return 1
-
- if '--emptytree' in myopts and '--noreplace' in myopts:
- writemsg_level("emerge: can't specify both of " + \
- "\"--emptytree\" and \"--noreplace\".\n",
- level=logging.ERROR, noiselevel=-1)
- return 1
-
- if ("--quiet" in myopts):
- spinner.update = spinner.update_quiet
- portage.util.noiselimit = -1
-
- if "--fetch-all-uri" in myopts:
- myopts["--fetchonly"] = True
-
- if "--skipfirst" in myopts and "--resume" not in myopts:
- myopts["--resume"] = True
+ emerge_config.action, emerge_config.opts, emerge_config.args = \
+ parse_opts(tmpcmdline)
- # Allow -p to remove --ask
- if "--pretend" in myopts:
- myopts.pop("--ask", None)
-
- # forbid --ask when not in a terminal
- # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
- if ("--ask" in myopts) and (not sys.stdin.isatty()):
- portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
- noiselevel=-1)
- return 1
-
- if settings.get("PORTAGE_DEBUG", "") == "1":
- spinner.update = spinner.update_quiet
- portage.util.noiselimit = 0
- if "python-trace" in settings.features:
- import portage.debug as portage_debug
- portage_debug.set_trace(True)
-
- if not ("--quiet" in myopts):
- if '--nospinner' in myopts or \
- settings.get('TERM') == 'dumb' or \
- not sys.stdout.isatty():
- spinner.update = spinner.update_basic
-
- if "--debug" in myopts:
- print("myaction", myaction)
- print("myopts", myopts)
-
- if not myaction and not myfiles and "--resume" not in myopts:
- _emerge.help.help()
- return 1
-
- pretend = "--pretend" in myopts
- fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
- buildpkgonly = "--buildpkgonly" in myopts
-
- # check if root user is the current user for the actions where emerge needs this
- if portage.secpass < 2:
- # We've already allowed "--version" and "--help" above.
- if "--pretend" not in myopts and myaction not in ("search","info"):
- need_superuser = myaction in ('clean', 'depclean', 'deselect',
- 'prune', 'unmerge') or not \
- (fetchonly or \
- (buildpkgonly and secpass >= 1) or \
- myaction in ("metadata", "regen", "sync"))
- if portage.secpass < 1 or \
- need_superuser:
- if need_superuser:
- access_desc = "superuser"
- else:
- access_desc = "portage group"
- # Always show portage_group_warning() when only portage group
- # access is required but the user is not in the portage group.
- from portage.data import portage_group_warning
- if "--ask" in myopts:
- writemsg_stdout("This action requires %s access...\n" % \
- (access_desc,), noiselevel=-1)
- if portage.secpass < 1 and not need_superuser:
- portage_group_warning()
- if userquery("Would you like to add --pretend to options?",
- "--ask-enter-invalid" in myopts) == "No":
- return 128 + signal.SIGINT
- myopts["--pretend"] = True
- del myopts["--ask"]
- else:
- sys.stderr.write(("emerge: %s access is required\n") \
- % access_desc)
- if portage.secpass < 1 and not need_superuser:
- portage_group_warning()
- return 1
-
- # Disable emergelog for everything except build or unmerge operations.
- # This helps minimize parallel emerge.log entries that can confuse log
- # parsers like genlop.
- disable_emergelog = False
- for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
- if x in myopts:
- disable_emergelog = True
- break
- if disable_emergelog:
- pass
- elif myaction in ("search", "info"):
- disable_emergelog = True
- elif portage.data.secpass < 1:
- disable_emergelog = True
-
- _emerge.emergelog._disable = disable_emergelog
-
- if not disable_emergelog:
- if 'EMERGE_LOG_DIR' in settings:
- try:
- # At least the parent needs to exist for the lock file.
- portage.util.ensure_dirs(settings['EMERGE_LOG_DIR'])
- except portage.exception.PortageException as e:
- writemsg_level("!!! Error creating directory for " + \
- "EMERGE_LOG_DIR='%s':\n!!! %s\n" % \
- (settings['EMERGE_LOG_DIR'], e),
- noiselevel=-1, level=logging.ERROR)
- portage.util.ensure_dirs(_emerge.emergelog._emerge_log_dir)
- else:
- _emerge.emergelog._emerge_log_dir = settings["EMERGE_LOG_DIR"]
- else:
- _emerge.emergelog._emerge_log_dir = os.path.join(os.sep,
- settings["EPREFIX"].lstrip(os.sep), "var", "log")
- portage.util.ensure_dirs(_emerge.emergelog._emerge_log_dir)
-
- if not "--pretend" in myopts:
- emergelog(xterm_titles, "Started emerge on: "+\
- _unicode_decode(
- time.strftime("%b %d, %Y %H:%M:%S", time.localtime()),
- encoding=_encodings['content'], errors='replace'))
- myelogstr=""
- if myopts:
- opt_list = []
- for opt, arg in myopts.items():
- if arg is True:
- opt_list.append(opt)
- elif isinstance(arg, list):
- # arguments like --exclude that use 'append' action
- for x in arg:
- opt_list.append("%s=%s" % (opt, x))
- else:
- opt_list.append("%s=%s" % (opt, arg))
- myelogstr=" ".join(opt_list)
- if myaction:
- myelogstr += " --" + myaction
- if myfiles:
- myelogstr += " " + " ".join(oldargs)
- emergelog(xterm_titles, " *** emerge " + myelogstr)
- del oldargs
-
- def emergeexitsig(signum, frame):
- signal.signal(signal.SIGINT, signal.SIG_IGN)
- signal.signal(signal.SIGTERM, signal.SIG_IGN)
- portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
- sys.exit(128 + signum)
- signal.signal(signal.SIGINT, emergeexitsig)
- signal.signal(signal.SIGTERM, emergeexitsig)
-
- def emergeexit():
- """This gets out final log message in before we quit."""
- if "--pretend" not in myopts:
- emergelog(xterm_titles, " *** terminating.")
- if xterm_titles:
- xtermTitleReset()
- portage.atexit_register(emergeexit)
-
- if myaction in ("config", "metadata", "regen", "sync"):
- if "--pretend" in myopts:
- sys.stderr.write(("emerge: The '%s' action does " + \
- "not support '--pretend'.\n") % myaction)
- return 1
-
- if "sync" == myaction:
- return action_sync(settings, trees, mtimedb, myopts, myaction)
- elif "metadata" == myaction:
- action_metadata(settings, portdb, myopts)
- elif myaction=="regen":
- validate_ebuild_environment(trees)
- return action_regen(settings, portdb, myopts.get("--jobs"),
- myopts.get("--load-average"))
- # HELP action
- elif "config"==myaction:
- validate_ebuild_environment(trees)
- action_config(settings, trees, myopts, myfiles)
-
- # SEARCH action
- elif "search"==myaction:
- validate_ebuild_environment(trees)
- action_search(trees[settings['EROOT']]['root_config'],
- myopts, myfiles, spinner)
-
- elif myaction in ('clean', 'depclean', 'deselect', 'prune', 'unmerge'):
- validate_ebuild_environment(trees)
- rval = action_uninstall(settings, trees, mtimedb["ldpath"],
- myopts, myaction, myfiles, spinner)
- if not (myaction == 'deselect' or buildpkgonly or fetchonly or pretend):
- post_emerge(myaction, myopts, myfiles, settings['EROOT'],
- trees, mtimedb, rval)
- return rval
-
- elif myaction == 'info':
-
- # Ensure atoms are valid before calling unmerge().
- vardb = trees[settings['EROOT']]['vartree'].dbapi
- portdb = trees[settings['EROOT']]['porttree'].dbapi
- bindb = trees[settings['EROOT']]["bintree"].dbapi
- valid_atoms = []
- for x in myfiles:
- if is_valid_package_atom(x, allow_repo=True):
- try:
- #look at the installed files first, if there is no match
- #look at the ebuilds, since EAPI 4 allows running pkg_info
- #on non-installed packages
- valid_atom = dep_expand(x, mydb=vardb, settings=settings)
- if valid_atom.cp.split("/")[0] == "null":
- valid_atom = dep_expand(x, mydb=portdb, settings=settings)
- if valid_atom.cp.split("/")[0] == "null" and "--usepkg" in myopts:
- valid_atom = dep_expand(x, mydb=bindb, settings=settings)
- valid_atoms.append(valid_atom)
- except portage.exception.AmbiguousPackageName as e:
- msg = "The short ebuild name \"" + x + \
- "\" is ambiguous. Please specify " + \
- "one of the following " + \
- "fully-qualified ebuild names instead:"
- for line in textwrap.wrap(msg, 70):
- writemsg_level("!!! %s\n" % (line,),
- level=logging.ERROR, noiselevel=-1)
- for i in e.args[0]:
- writemsg_level(" %s\n" % colorize("INFORM", i),
- level=logging.ERROR, noiselevel=-1)
- writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
- return 1
- continue
- msg = []
- msg.append("'%s' is not a valid package atom." % (x,))
- msg.append("Please check ebuild(5) for full details.")
- writemsg_level("".join("!!! %s\n" % line for line in msg),
- level=logging.ERROR, noiselevel=-1)
- return 1
-
- return action_info(settings, trees, myopts, valid_atoms)
-
- # "update", "system", or just process files:
- else:
- validate_ebuild_environment(trees)
-
- for x in myfiles:
- if x.startswith(SETPREFIX) or \
- is_valid_package_atom(x, allow_repo=True):
- continue
- if x[:1] == os.sep:
- continue
- try:
- os.lstat(x)
+ try:
+ return run_action(emerge_config)
+ finally:
+ # Call destructors for our portdbapi instances.
+ for x in emerge_config.trees.values():
+ if "porttree" in x.lazy_items:
continue
- except OSError:
- pass
- msg = []
- msg.append("'%s' is not a valid package atom." % (x,))
- msg.append("Please check ebuild(5) for full details.")
- writemsg_level("".join("!!! %s\n" % line for line in msg),
- level=logging.ERROR, noiselevel=-1)
- return 1
-
- # GLEP 42 says to display news *after* an emerge --pretend
- if "--pretend" not in myopts:
- display_news_notification(root_config, myopts)
- retval = action_build(settings, trees, mtimedb,
- myopts, myaction, myfiles, spinner)
- post_emerge(myaction, myopts, myfiles, settings['EROOT'],
- trees, mtimedb, retval)
-
- return retval
+ x["porttree"].dbapi.close_caches()
diff --git a/pym/_emerge/post_emerge.py b/pym/_emerge/post_emerge.py
new file mode 100644
index 000000000..d5f1ba5fa
--- /dev/null
+++ b/pym/_emerge/post_emerge.py
@@ -0,0 +1,165 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import logging
+import textwrap
+
+import portage
+from portage import os
+from portage.emaint.modules.logs.logs import CleanLogs
+from portage.news import count_unread_news, display_news_notifications
+from portage.output import colorize
+from portage.util._dyn_libs.display_preserved_libs import \
+ display_preserved_libs
+from portage.util._info_files import chk_updated_info_files
+
+from .chk_updated_cfg_files import chk_updated_cfg_files
+from .emergelog import emergelog
+from ._flush_elog_mod_echo import _flush_elog_mod_echo
+
+def clean_logs(settings):
+
+ if "clean-logs" not in settings.features:
+ return
+
+ logdir = settings.get("PORT_LOGDIR")
+ if logdir is None or not os.path.isdir(logdir):
+ return
+
+ cleanlogs = CleanLogs()
+ errors = cleanlogs.clean(settings=settings)
+ if errors:
+ out = portage.output.EOutput()
+ for msg in errors:
+ out.eerror(msg)
+
+def display_news_notification(root_config, myopts):
+ if "news" not in root_config.settings.features:
+ return
+ portdb = root_config.trees["porttree"].dbapi
+ vardb = root_config.trees["vartree"].dbapi
+ news_counts = count_unread_news(portdb, vardb)
+ display_news_notifications(news_counts)
+
+def show_depclean_suggestion():
+ out = portage.output.EOutput()
+ msg = "After world updates, it is important to remove " + \
+ "obsolete packages with emerge --depclean. Refer " + \
+ "to `man emerge` for more information."
+ for line in textwrap.wrap(msg, 72):
+ out.ewarn(line)
+
+def post_emerge(myaction, myopts, myfiles,
+ target_root, trees, mtimedb, retval):
+ """
+ Misc. things to run at the end of a merge session.
+
+ Update Info Files
+ Update Config Files
+ Update News Items
+ Commit mtimeDB
+ Display preserved libs warnings
+
+ @param myaction: The action returned from parse_opts()
+ @type myaction: String
+ @param myopts: emerge options
+ @type myopts: dict
+ @param myfiles: emerge arguments
+ @type myfiles: list
+ @param target_root: The target EROOT for myaction
+ @type target_root: String
+ @param trees: A dictionary mapping each ROOT to it's package databases
+ @type trees: dict
+ @param mtimedb: The mtimeDB to store data needed across merge invocations
+ @type mtimedb: MtimeDB class instance
+ @param retval: Emerge's return value
+ @type retval: Int
+ """
+
+ root_config = trees[target_root]["root_config"]
+ vardbapi = trees[target_root]['vartree'].dbapi
+ settings = vardbapi.settings
+ info_mtimes = mtimedb["info"]
+
+ # Load the most current variables from ${ROOT}/etc/profile.env
+ settings.unlock()
+ settings.reload()
+ settings.regenerate()
+ settings.lock()
+
+ config_protect = portage.util.shlex_split(
+ settings.get("CONFIG_PROTECT", ""))
+ infodirs = settings.get("INFOPATH","").split(":") + \
+ settings.get("INFODIR","").split(":")
+
+ os.chdir("/")
+
+ if retval == os.EX_OK:
+ exit_msg = " *** exiting successfully."
+ else:
+ exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
+ emergelog("notitles" not in settings.features, exit_msg)
+
+ _flush_elog_mod_echo()
+
+ if not vardbapi._pkgs_changed:
+ # GLEP 42 says to display news *after* an emerge --pretend
+ if "--pretend" in myopts:
+ display_news_notification(root_config, myopts)
+ # If vdb state has not changed then there's nothing else to do.
+ return
+
+ vdb_path = os.path.join(root_config.settings['EROOT'], portage.VDB_PATH)
+ portage.util.ensure_dirs(vdb_path)
+ vdb_lock = None
+ if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
+ vardbapi.lock()
+ vdb_lock = True
+
+ if vdb_lock:
+ try:
+ if "noinfo" not in settings.features:
+ chk_updated_info_files(target_root,
+ infodirs, info_mtimes)
+ mtimedb.commit()
+ finally:
+ if vdb_lock:
+ vardbapi.unlock()
+
+ # Explicitly load and prune the PreservedLibsRegistry in order
+ # to ensure that we do not display stale data.
+ vardbapi._plib_registry.load()
+
+ if vardbapi._plib_registry.hasEntries():
+ if "--quiet" in myopts:
+ print()
+ print(colorize("WARN", "!!!") + " existing preserved libs found")
+ else:
+ print()
+ print(colorize("WARN", "!!!") + " existing preserved libs:")
+ display_preserved_libs(vardbapi)
+ print("Use " + colorize("GOOD", "emerge @preserved-rebuild") +
+ " to rebuild packages using these libraries")
+
+ chk_updated_cfg_files(settings['EROOT'], config_protect)
+
+ display_news_notification(root_config, myopts)
+
+ postemerge = os.path.join(settings["PORTAGE_CONFIGROOT"],
+ portage.USER_CONFIG_PATH, "bin", "post_emerge")
+ if os.access(postemerge, os.X_OK):
+ hook_retval = portage.process.spawn(
+ [postemerge], env=settings.environ())
+ if hook_retval != os.EX_OK:
+ portage.util.writemsg_level(
+ " %s spawn failed of %s\n" %
+ (colorize("BAD", "*"), postemerge,),
+ level=logging.ERROR, noiselevel=-1)
+
+ clean_logs(settings)
+
+ if "--quiet" not in myopts and \
+ myaction is None and "@world" in myfiles:
+ show_depclean_suggestion()
diff --git a/pym/_emerge/resolver/backtracking.py b/pym/_emerge/resolver/backtracking.py
index d8f49c679..c29b9d42a 100644
--- a/pym/_emerge/resolver/backtracking.py
+++ b/pym/_emerge/resolver/backtracking.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import copy
@@ -7,8 +7,8 @@ class BacktrackParameter(object):
__slots__ = (
"needed_unstable_keywords", "runtime_pkg_mask", "needed_use_config_changes", "needed_license_changes",
- "rebuild_list", "reinstall_list", "needed_p_mask_changes",
- "slot_abi_replace_installed"
+ "prune_rebuilds", "rebuild_list", "reinstall_list", "needed_p_mask_changes",
+ "slot_operator_mask_built", "slot_operator_replace_installed"
)
def __init__(self):
@@ -19,7 +19,9 @@ class BacktrackParameter(object):
self.needed_license_changes = {}
self.rebuild_list = set()
self.reinstall_list = set()
- self.slot_abi_replace_installed = set()
+ self.slot_operator_replace_installed = set()
+ self.slot_operator_mask_built = set()
+ self.prune_rebuilds = False
def __deepcopy__(self, memo=None):
if memo is None:
@@ -35,7 +37,9 @@ class BacktrackParameter(object):
result.needed_license_changes = copy.copy(self.needed_license_changes)
result.rebuild_list = copy.copy(self.rebuild_list)
result.reinstall_list = copy.copy(self.reinstall_list)
- result.slot_abi_replace_installed = copy.copy(self.slot_abi_replace_installed)
+ result.slot_operator_replace_installed = copy.copy(self.slot_operator_replace_installed)
+ result.slot_operator_mask_built = self.slot_operator_mask_built.copy()
+ result.prune_rebuilds = self.prune_rebuilds
# runtime_pkg_mask contains nested dicts that must also be copied
result.runtime_pkg_mask = {}
@@ -52,7 +56,9 @@ class BacktrackParameter(object):
self.needed_license_changes == other.needed_license_changes and \
self.rebuild_list == other.rebuild_list and \
self.reinstall_list == other.reinstall_list and \
- self.slot_abi_replace_installed == other.slot_abi_replace_installed
+ self.slot_operator_replace_installed == other.slot_operator_replace_installed and \
+ self.slot_operator_mask_built == other.slot_operator_mask_built and \
+ self.prune_rebuilds == other.prune_rebuilds
class _BacktrackNode(object):
@@ -125,7 +131,7 @@ class Backtracker(object):
for pkg, mask_info in runtime_pkg_mask.items():
if "missing dependency" in mask_info or \
- "slot_abi_mask_built" in mask_info:
+ "slot_operator_mask_built" in mask_info:
continue
entry_is_valid = False
@@ -192,16 +198,28 @@ class Backtracker(object):
para.needed_use_config_changes[pkg] = (new_use, new_changes)
elif change == "slot_conflict_abi":
new_node.terminal = False
- elif change == "slot_abi_mask_built":
+ elif change == "slot_operator_mask_built":
+ para.slot_operator_mask_built.update(data)
for pkg, mask_reasons in data.items():
para.runtime_pkg_mask.setdefault(pkg,
{}).update(mask_reasons)
- elif change == "slot_abi_replace_installed":
- para.slot_abi_replace_installed.update(data)
+ elif change == "slot_operator_replace_installed":
+ para.slot_operator_replace_installed.update(data)
elif change == "rebuild_list":
para.rebuild_list.update(data)
elif change == "reinstall_list":
para.reinstall_list.update(data)
+ elif change == "prune_rebuilds":
+ para.prune_rebuilds = True
+ para.slot_operator_replace_installed.clear()
+ for pkg in para.slot_operator_mask_built:
+ runtime_masks = para.runtime_pkg_mask.get(pkg)
+ if runtime_masks is None:
+ continue
+ runtime_masks.pop("slot_operator_mask_built", None)
+ if not runtime_masks:
+ para.runtime_pkg_mask.pop(pkg)
+ para.slot_operator_mask_built.clear()
self._add(new_node, explore=explore)
self._current_node = new_node
diff --git a/pym/_emerge/resolver/circular_dependency.py b/pym/_emerge/resolver/circular_dependency.py
index aca81face..b7106714a 100644
--- a/pym/_emerge/resolver/circular_dependency.py
+++ b/pym/_emerge/resolver/circular_dependency.py
@@ -1,7 +1,7 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from __future__ import print_function
+from __future__ import print_function, unicode_literals
from itertools import chain, product
import logging
@@ -11,6 +11,7 @@ from portage.exception import InvalidDependString
from portage.output import colorize
from portage.util import writemsg_level
from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
+from _emerge.Package import Package
class circular_dependency_handler(object):
@@ -61,8 +62,7 @@ class circular_dependency_handler(object):
node = nodes[0]
display_order.append(node)
tempgraph.remove(node)
- display_order.reverse()
- return display_order
+ return tuple(display_order)
def _prepare_circular_dep_message(self):
"""
@@ -113,9 +113,10 @@ class circular_dependency_handler(object):
parent_atoms = self.all_parent_atoms.get(pkg)
if priorities[-1].buildtime:
- dep = parent.metadata["DEPEND"]
+ dep = " ".join(parent._metadata[k]
+ for k in Package._buildtime_keys)
elif priorities[-1].runtime:
- dep = parent.metadata["RDEPEND"]
+ dep = parent._metadata["RDEPEND"]
for ppkg, atom in parent_atoms:
if ppkg == parent:
@@ -125,7 +126,7 @@ class circular_dependency_handler(object):
try:
affecting_use = extract_affecting_use(dep, parent_atom,
- eapi=parent.metadata["EAPI"])
+ eapi=parent.eapi)
except InvalidDependString:
if not parent.installed:
raise
@@ -144,7 +145,8 @@ class circular_dependency_handler(object):
#If any of the flags we're going to touch is in REQUIRED_USE, add all
#other flags in REQUIRED_USE to affecting_use, to not lose any solution.
required_use_flags = get_required_use_flags(
- parent.metadata.get("REQUIRED_USE", ""))
+ parent._metadata.get("REQUIRED_USE", ""),
+ eapi=parent.eapi)
if affecting_use.intersection(required_use_flags):
# TODO: Find out exactly which REQUIRED_USE flags are
@@ -186,9 +188,11 @@ class circular_dependency_handler(object):
parent_atom not in reduced_dep:
#We found an assignment that removes the atom from 'dep'.
#Make sure it doesn't conflict with REQUIRED_USE.
- required_use = parent.metadata.get("REQUIRED_USE", "")
+ required_use = parent._metadata.get("REQUIRED_USE", "")
- if check_required_use(required_use, current_use, parent.iuse.is_valid_flag):
+ if check_required_use(required_use, current_use,
+ parent.iuse.is_valid_flag,
+ eapi=parent.eapi):
use = self.depgraph._pkg_use_enabled(parent)
solution = set()
for flag, state in zip(affecting_use, use_state):
diff --git a/pym/_emerge/resolver/output.py b/pym/_emerge/resolver/output.py
index 61cfe9e98..5f550be0d 100644
--- a/pym/_emerge/resolver/output.py
+++ b/pym/_emerge/resolver/output.py
@@ -1,26 +1,31 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
"""Resolver output display operation.
"""
+from __future__ import unicode_literals
+
__all__ = (
- "Display",
+ "Display", "format_unmatched_atom",
)
import sys
+import portage
from portage import os
-from portage import _unicode_decode
from portage.dbapi.dep_expand import dep_expand
-from portage.dep import cpvequal, _repo_separator
+from portage.dep import cpvequal, _repo_separator, _slot_separator
+from portage.eapi import _get_eapi_attrs
from portage.exception import InvalidDependString, SignatureException
+from portage.package.ebuild.config import _get_feature_flags
from portage.package.ebuild._spawn_nofetch import spawn_nofetch
from portage.output import ( blue, colorize, create_color_func,
- darkblue, darkgreen, green, nc_len, red, teal, turquoise, yellow )
+ darkblue, darkgreen, green, nc_len, teal)
bad = create_color_func("BAD")
+from portage._sets.base import InternalPackageSet
from portage.util import writemsg_stdout
-from portage.versions import best, catpkgsplit
+from portage.versions import best, cpv_getversion
from _emerge.Blocker import Blocker
from _emerge.create_world_atom import create_world_atom
@@ -30,7 +35,9 @@ from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
if sys.hexversion >= 0x3000000:
basestring = str
-
+ _unicode = str
+else:
+ _unicode = unicode
class Display(object):
"""Formats and outputs the depgrah supplied it for merge/re-merge, etc.
@@ -54,11 +61,6 @@ class Display(object):
self.oldlp = None
self.myfetchlist = None
self.indent = ''
- self.is_new = True
- self.cur_use = None
- self.cur_iuse = None
- self.old_use = ''
- self.old_iuse = ''
self.use_expand = None
self.use_expand_hidden = None
self.pkgsettings = None
@@ -68,93 +70,54 @@ class Display(object):
self.blocker_style = None
- def _blockers(self, pkg, fetch_symbol):
- """Processes pkg for blockers and adds colorized strings to
+ def _blockers(self, blocker):
+ """Adds colorized strings to
self.print_msg and self.blockers
- @param pkg: _emerge.Package.Package instance
- @param fetch_symbol: string
+ @param blocker: _emerge.Blocker.Blocker instance
@rtype: bool
Modifies class globals: self.blocker_style, self.resolved,
self.print_msg
"""
- if pkg.satisfied:
+ if blocker.satisfied:
self.blocker_style = "PKG_BLOCKER_SATISFIED"
- addl = "%s %s " % (colorize(self.blocker_style, "b"),
- fetch_symbol)
+ addl = "%s " % (colorize(self.blocker_style, "b"),)
else:
self.blocker_style = "PKG_BLOCKER"
- addl = "%s %s " % (colorize(self.blocker_style, "B"),
- fetch_symbol)
+ addl = "%s " % (colorize(self.blocker_style, "B"),)
addl += self.empty_space_in_brackets()
self.resolved = dep_expand(
- str(pkg.atom).lstrip("!"), mydb=self.vardb,
+ _unicode(blocker.atom).lstrip("!"), mydb=self.vardb,
settings=self.pkgsettings
)
if self.conf.columns and self.conf.quiet:
- addl += " " + colorize(self.blocker_style, str(self.resolved))
+ addl += " " + colorize(self.blocker_style, _unicode(self.resolved))
else:
addl = "[%s %s] %s%s" % \
(colorize(self.blocker_style, "blocks"),
addl, self.indent,
- colorize(self.blocker_style, str(self.resolved))
+ colorize(self.blocker_style, _unicode(self.resolved))
)
- block_parents = self.conf.blocker_parents.parent_nodes(pkg)
- block_parents = set([pnode[2] for pnode in block_parents])
+ block_parents = self.conf.blocker_parents.parent_nodes(blocker)
+ block_parents = set(_unicode(pnode.cpv) for pnode in block_parents)
block_parents = ", ".join(block_parents)
- if self.resolved != pkg[2]:
+ if blocker.atom.blocker.overlap.forbid:
+ blocking_desc = "hard blocking"
+ else:
+ blocking_desc = "blocking"
+ if self.resolved != blocker.atom:
addl += colorize(self.blocker_style,
- " (\"%s\" is blocking %s)") % \
- (str(pkg.atom).lstrip("!"), block_parents)
+ " (\"%s\" is %s %s)" %
+ (_unicode(blocker.atom).lstrip("!"),
+ blocking_desc, block_parents))
else:
addl += colorize(self.blocker_style,
- " (is blocking %s)") % block_parents
- if isinstance(pkg, Blocker) and pkg.satisfied:
- if self.conf.columns:
- return True
- self.print_msg.append(addl)
+ " (is %s %s)" % (blocking_desc, block_parents))
+ if blocker.satisfied:
+ if not self.conf.columns:
+ self.print_msg.append(addl)
else:
self.blockers.append(addl)
- return False
-
-
- def _display_use(self, pkg, myoldbest, myinslotlist):
- """ USE flag display
-
- @param pkg: _emerge.Package.Package instance
- @param myoldbest: list of installed versions
- @param myinslotlist: list of installed slots
- Modifies class globals: self.forced_flags, self.cur_iuse,
- self.old_iuse, self.old_use, self.use_expand
- """
-
- self.forced_flags = set()
- self.forced_flags.update(pkg.use.force)
- self.forced_flags.update(pkg.use.mask)
-
- self.cur_use = [flag for flag in self.conf.pkg_use_enabled(pkg) \
- if flag in pkg.iuse.all]
- self.cur_iuse = sorted(pkg.iuse.all)
-
- if myoldbest and myinslotlist:
- previous_cpv = myoldbest[0].cpv
- else:
- previous_cpv = pkg.cpv
- if self.vardb.cpv_exists(previous_cpv):
- previous_pkg = self.vardb.match_pkgs('=' + previous_cpv)[0]
- self.old_iuse = sorted(previous_pkg.iuse.all)
- self.old_use = previous_pkg.use.enabled
- self.is_new = False
- else:
- self.old_iuse = []
- self.old_use = []
- self.is_new = True
-
- self.old_use = [flag for flag in self.old_use if flag in self.old_iuse]
-
- self.use_expand = pkg.use.expand
- self.use_expand_hidden = pkg.use.expand_hidden
- return
def include_mask_str(self):
return self.conf.verbosity > 1
@@ -219,13 +182,40 @@ class Display(object):
return ret
- def recheck_hidden(self, pkg):
- """ Prevent USE_EXPAND_HIDDEN flags from being hidden if they
- are the only thing that triggered reinstallation.
+ def _display_use(self, pkg, pkg_info):
+ """ USE flag display
@param pkg: _emerge.Package.Package instance
- Modifies self.use_expand_hidden, self.use_expand, self.verboseadd
+ @param pkg_info: PkgInfo instance
+ Modifies self.use_expand_hidden, self.use_expand, self.verboseadd,
+ self.forced_flags
"""
+
+ self.forced_flags = set()
+ self.forced_flags.update(pkg.use.force)
+ self.forced_flags.update(pkg.use.mask)
+
+ cur_use = [flag for flag in self.conf.pkg_use_enabled(pkg) \
+ if flag in pkg.iuse.all]
+ cur_iuse = sorted(pkg.iuse.all)
+
+ if pkg_info.previous_pkg is not None:
+ previous_pkg = pkg_info.previous_pkg
+ old_iuse = sorted(previous_pkg.iuse.all)
+ old_use = previous_pkg.use.enabled
+ is_new = False
+ else:
+ old_iuse = []
+ old_use = []
+ is_new = True
+
+ old_use = [flag for flag in old_use if flag in old_iuse]
+
+ self.use_expand = pkg.use.expand
+ self.use_expand_hidden = pkg.use.expand_hidden
+
+ # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
+ # are the only thing that triggered reinstallation.
reinst_flags_map = {}
reinstall_for_flags = self.conf.reinstall_nodes.get(pkg)
reinst_expand_map = None
@@ -246,13 +236,14 @@ class Display(object):
reinst_expand_map)
cur_iuse_map, iuse_forced = \
- self.map_to_use_expand(self.cur_iuse, forced_flags=True)
- cur_use_map = self.map_to_use_expand(self.cur_use)
- old_iuse_map = self.map_to_use_expand(self.old_iuse)
- old_use_map = self.map_to_use_expand(self.old_use)
+ self.map_to_use_expand(cur_iuse, forced_flags=True)
+ cur_use_map = self.map_to_use_expand(cur_use)
+ old_iuse_map = self.map_to_use_expand(old_iuse)
+ old_use_map = self.map_to_use_expand(old_use)
use_expand = sorted(self.use_expand)
use_expand.insert(0, "USE")
+ feature_flags = _get_feature_flags(_get_eapi_attrs(pkg.eapi))
for key in use_expand:
if key in self.use_expand_hidden:
@@ -260,7 +251,7 @@ class Display(object):
self.verboseadd += _create_use_string(self.conf, key.upper(),
cur_iuse_map[key], iuse_forced[key],
cur_use_map[key], old_iuse_map[key],
- old_use_map[key], self.is_new,
+ old_use_map[key], is_new, feature_flags,
reinst_flags_map.get(key))
return
@@ -318,13 +309,14 @@ class Display(object):
kwargs["myrepo"] = pkg.repo
myfilesdict = None
try:
- myfilesdict = db.getfetchsizes(pkg.cpv, **kwargs)
+ myfilesdict = db.getfetchsizes(pkg.cpv,
+ **portage._native_kwargs(kwargs))
except InvalidDependString as e:
# FIXME: validate SRC_URI earlier
depstr, = db.aux_get(pkg.cpv,
["SRC_URI"], myrepo=pkg.repo)
show_invalid_depstring_notice(
- pkg, depstr, str(e))
+ pkg, depstr, _unicode(e))
raise
except SignatureException:
# missing/invalid binary package SIZE signature
@@ -343,15 +335,13 @@ class Display(object):
if self.quiet_repo_display:
# overlay verbose
# assign index for a previous version in the same slot
- slot_matches = self.vardb.match(pkg.slot_atom)
- if slot_matches:
- repo_name_prev = self.vardb.aux_get(slot_matches[0],
- ["repository"])[0]
+ if pkg_info.previous_pkg is not None:
+ repo_name_prev = pkg_info.previous_pkg.repo
else:
repo_name_prev = None
# now use the data to generate output
- if pkg.installed or not slot_matches:
+ if pkg.installed or pkg_info.previous_pkg is None:
self.repoadd = self.conf.repo_display.repoStr(
pkg_info.repo_path_real)
else:
@@ -370,58 +360,86 @@ class Display(object):
repoadd_set.add(self.repoadd)
- def convert_myoldbest(self, pkg, myoldbest):
+ def convert_myoldbest(self, pkg, pkg_info):
"""converts and colorizes a version list to a string
@param pkg: _emerge.Package.Package instance
- @param myoldbest: list
+ @param pkg_info: dictionary
@rtype string.
"""
+ myoldbest = pkg_info.oldbest_list
# Convert myoldbest from a list to a string.
myoldbest_str = ""
if myoldbest:
versions = []
for pos, old_pkg in enumerate(myoldbest):
- key = catpkgsplit(old_pkg.cpv)[2] + "-" + catpkgsplit(old_pkg.cpv)[3]
+ key = old_pkg.version
if key[-3:] == "-r0":
key = key[:-3]
- if self.conf.verbosity == 3 and not self.quiet_repo_display and (self.verbose_main_repo_display or
- any(x.repo != self.portdb.repositories.mainRepo().name for x in myoldbest + [pkg])):
- key += _repo_separator + old_pkg.repo
+ if self.conf.verbosity == 3:
+ if pkg_info.attr_display.new_slot:
+ key += _slot_separator + old_pkg.slot
+ if old_pkg.slot != old_pkg.sub_slot:
+ key += "/" + old_pkg.sub_slot
+ elif any(x.slot + "/" + x.sub_slot != "0/0" for x in myoldbest + [pkg]):
+ key += _slot_separator + old_pkg.slot
+ if old_pkg.slot != old_pkg.sub_slot or \
+ old_pkg.slot == pkg.slot and old_pkg.sub_slot != pkg.sub_slot:
+ key += "/" + old_pkg.sub_slot
+ if not self.quiet_repo_display and (self.verbose_main_repo_display or
+ self.portdb.repositories.mainRepo() is None or
+ any(x.repo != self.portdb.repositories.mainRepo().name for x in myoldbest + [pkg])):
+ key += _repo_separator + old_pkg.repo
versions.append(key)
myoldbest_str = blue("["+", ".join(versions)+"]")
return myoldbest_str
+ def _append_slot(self, pkg_str, pkg, pkg_info):
+ """Potentially appends slot and subslot to package string.
- def set_interactive(self, pkg, ordered, addl):
- """Increments counters.interactive if the pkg is to
- be merged and it's metadata has interactive set True
+ @param pkg_str: string
+ @param pkg: _emerge.Package.Package instance
+ @param pkg_info: dictionary
+ @rtype string
+ """
+ if pkg_info.attr_display.new_slot:
+ pkg_str += _slot_separator + pkg_info.slot
+ if pkg_info.slot != pkg_info.sub_slot:
+ pkg_str += "/" + pkg_info.sub_slot
+ elif any(x.slot + "/" + x.sub_slot != "0/0" for x in pkg_info.oldbest_list + [pkg]):
+ pkg_str += _slot_separator + pkg_info.slot
+ if pkg_info.slot != pkg_info.sub_slot or \
+ any(x.slot == pkg_info.slot and x.sub_slot != pkg_info.sub_slot for x in pkg_info.oldbest_list):
+ pkg_str += "/" + pkg_info.sub_slot
+ return pkg_str
+
+ def _append_repository(self, pkg_str, pkg, pkg_info):
+ """Potentially appends repository to package string.
+ @param pkg_str: string
@param pkg: _emerge.Package.Package instance
- @param ordered: boolean
- @param addl: already defined string to add to
+ @param pkg_info: dictionary
+ @rtype string
"""
- if 'interactive' in pkg.metadata.properties and \
- pkg.operation == 'merge':
- addl = colorize("WARN", "I") + addl[1:]
- if ordered:
- self.counters.interactive += 1
- return addl
-
- def _set_non_root_columns(self, addl, pkg_info, pkg):
+ if not self.quiet_repo_display and (self.verbose_main_repo_display or
+ self.portdb.repositories.mainRepo() is None or
+ any(x.repo != self.portdb.repositories.mainRepo().name for x in pkg_info.oldbest_list + [pkg])):
+ pkg_str += _repo_separator + pkg.repo
+ return pkg_str
+
+ def _set_non_root_columns(self, pkg, pkg_info):
"""sets the indent level and formats the output
- @param addl: already defined string to add to
- @param pkg_info: dictionary
@param pkg: _emerge.Package.Package instance
+ @param pkg_info: dictionary
@rtype string
"""
ver_str = pkg_info.ver
- if self.conf.verbosity == 3 and not self.quiet_repo_display and (self.verbose_main_repo_display or
- any(x.repo != self.portdb.repositories.mainRepo().name for x in pkg_info.oldbest_list + [pkg])):
- ver_str += _repo_separator + pkg.repo
+ if self.conf.verbosity == 3:
+ ver_str = self._append_slot(ver_str, pkg, pkg_info)
+ ver_str = self._append_repository(ver_str, pkg, pkg_info)
if self.conf.quiet:
- myprint = addl + " " + self.indent + \
+ myprint = _unicode(pkg_info.attr_display) + " " + self.indent + \
self.pkgprint(pkg_info.cp, pkg_info)
myprint = myprint+darkblue(" "+ver_str)+" "
myprint = myprint+pkg_info.oldbest
@@ -434,7 +452,8 @@ class Display(object):
self.indent, self.pkgprint(pkg.cp, pkg_info))
else:
myprint = "[%s %s] %s%s" % \
- (self.pkgprint(pkg.type_name, pkg_info), addl,
+ (self.pkgprint(pkg.type_name, pkg_info),
+ pkg_info.attr_display,
self.indent, self.pkgprint(pkg.cp, pkg_info))
if (self.newlp-nc_len(myprint)) > 0:
myprint = myprint+(" "*(self.newlp-nc_len(myprint)))
@@ -446,21 +465,20 @@ class Display(object):
return myprint
- def _set_root_columns(self, addl, pkg_info, pkg):
+ def _set_root_columns(self, pkg, pkg_info):
"""sets the indent level and formats the output
- @param addl: already defined string to add to
- @param pkg_info: dictionary
@param pkg: _emerge.Package.Package instance
+ @param pkg_info: dictionary
@rtype string
Modifies self.verboseadd
"""
ver_str = pkg_info.ver
- if self.conf.verbosity == 3 and not self.quiet_repo_display and (self.verbose_main_repo_display or
- any(x.repo != self.portdb.repositories.mainRepo().name for x in pkg_info.oldbest_list + [pkg])):
- ver_str += _repo_separator + pkg.repo
+ if self.conf.verbosity == 3:
+ ver_str = self._append_slot(ver_str, pkg, pkg_info)
+ ver_str = self._append_repository(ver_str, pkg, pkg_info)
if self.conf.quiet:
- myprint = addl + " " + self.indent + \
+ myprint = _unicode(pkg_info.attr_display) + " " + self.indent + \
self.pkgprint(pkg_info.cp, pkg_info)
myprint = myprint+" "+green(ver_str)+" "
myprint = myprint+pkg_info.oldbest
@@ -473,7 +491,8 @@ class Display(object):
addl, self.indent, self.pkgprint(pkg.cp, pkg_info))
else:
myprint = "[%s %s] %s%s" % \
- (self.pkgprint(pkg.type_name, pkg_info), addl,
+ (self.pkgprint(pkg.type_name, pkg_info),
+ pkg_info.attr_display,
self.indent, self.pkgprint(pkg.cp, pkg_info))
if (self.newlp-nc_len(myprint)) > 0:
myprint = myprint+(" "*(self.newlp-nc_len(myprint)))
@@ -484,18 +503,17 @@ class Display(object):
return myprint
- def _set_no_columns(self, pkg, pkg_info, addl):
+ def _set_no_columns(self, pkg, pkg_info):
"""prints pkg info without column indentation.
@param pkg: _emerge.Package.Package instance
@param pkg_info: dictionary
- @param addl: the current text to add for the next line to output
@rtype the updated addl
"""
pkg_str = pkg.cpv
- if self.conf.verbosity == 3 and not self.quiet_repo_display and (self.verbose_main_repo_display or
- any(x.repo != self.portdb.repositories.mainRepo().name for x in pkg_info.oldbest_list + [pkg])):
- pkg_str += _repo_separator + pkg.repo
+ if self.conf.verbosity == 3:
+ pkg_str = self._append_slot(pkg_str, pkg, pkg_info)
+ pkg_str = self._append_repository(pkg_str, pkg, pkg_info)
if not pkg_info.merge:
addl = self.empty_space_in_brackets()
myprint = "[%s%s] %s%s %s" % \
@@ -506,46 +524,10 @@ class Display(object):
else:
myprint = "[%s %s] %s%s %s" % \
(self.pkgprint(pkg.type_name, pkg_info),
- addl, self.indent,
+ pkg_info.attr_display, self.indent,
self.pkgprint(pkg_str, pkg_info), pkg_info.oldbest)
return myprint
-
- def _insert_slot(self, pkg, pkg_info, myinslotlist):
- """Adds slot info to the message
-
- @return addl: formatted slot info
- @return myoldbest: installed version list
- Modifies self.counters.downgrades, self.counters.upgrades
- """
- addl = " " + pkg_info.fetch_symbol
- if not cpvequal(pkg.cpv,
- best([pkg.cpv] + [x.cpv for x in myinslotlist])):
- # Downgrade in slot
- addl += turquoise("U")+blue("D")
- if pkg_info.ordered:
- self.counters.downgrades += 1
- else:
- # Update in slot
- addl += turquoise("U") + " "
- if pkg_info.ordered:
- self.counters.upgrades += 1
- return addl
-
-
- def _new_slot(self, pkg, pkg_info):
- """New slot, mark it new.
-
- @return addl: formatted slot info
- @return myoldbest: installed version list
- Modifies self.counters.newslot
- """
- addl = " " + green("NS") + pkg_info.fetch_symbol + " "
- if pkg_info.ordered:
- self.counters.newslot += 1
- return addl
-
-
def print_messages(self, show_repos):
"""Performs the actual output printing of the pre-formatted
messages
@@ -581,9 +563,9 @@ class Display(object):
"""
writemsg_stdout('\n%s\n' % (self.counters,), noiselevel=-1)
if show_repos:
- # Use _unicode_decode() to force unicode format string so
+ # Use unicode_literals to force unicode format string so
# that RepoDisplay.__unicode__() is called in python2.
- writemsg_stdout(_unicode_decode("%s") % (self.conf.repo_display,),
+ writemsg_stdout("%s" % (self.conf.repo_display,),
noiselevel=-1)
return
@@ -635,15 +617,18 @@ class Display(object):
self.counters.restrict_fetch_satisfied
"""
pkg_info = PkgInfo()
+ pkg_info.cp = pkg.cp
+ pkg_info.ver = self.get_ver_str(pkg)
+ pkg_info.slot = pkg.slot
+ pkg_info.sub_slot = pkg.sub_slot
+ pkg_info.repo_name = pkg.repo
pkg_info.ordered = ordered
- pkg_info.fetch_symbol = " "
pkg_info.operation = pkg.operation
pkg_info.merge = ordered and pkg_info.operation == "merge"
if not pkg_info.merge and pkg_info.operation == "merge":
pkg_info.operation = "nomerge"
pkg_info.built = pkg.type_name != "ebuild"
pkg_info.ebuild_path = None
- pkg_info.repo_name = pkg.repo
if ordered:
if pkg_info.merge:
if pkg.type_name == "binary":
@@ -659,22 +644,30 @@ class Display(object):
pkg_info.repo_path_real = os.path.dirname(os.path.dirname(
os.path.dirname(pkg_info.ebuild_path)))
else:
- pkg_info.repo_path_real = \
- self.portdb.getRepositoryPath(pkg.metadata["repository"])
+ pkg_info.repo_path_real = self.portdb.getRepositoryPath(pkg.repo)
pkg_info.use = list(self.conf.pkg_use_enabled(pkg))
if not pkg.built and pkg.operation == 'merge' and \
- 'fetch' in pkg.metadata.restrict:
+ 'fetch' in pkg.restrict:
if pkg_info.ordered:
self.counters.restrict_fetch += 1
+ pkg_info.attr_display.fetch_restrict = True
if not self.portdb.getfetchsizes(pkg.cpv,
useflags=pkg_info.use, myrepo=pkg.repo):
- pkg_info.fetch_symbol = green("f")
+ pkg_info.attr_display.fetch_restrict_satisfied = True
if pkg_info.ordered:
self.counters.restrict_fetch_satisfied += 1
else:
- pkg_info.fetch_symbol = red("F")
if pkg_info.ebuild_path is not None:
self.restrict_fetch_list[pkg] = pkg_info
+
+ if self.vardb.cpv_exists(pkg.cpv):
+ # Do a cpv match first, in case the SLOT has changed.
+ pkg_info.previous_pkg = self.vardb.match_pkgs('=' + pkg.cpv)[0]
+ else:
+ slot_matches = self.vardb.match_pkgs(pkg.slot_atom)
+ if slot_matches:
+ pkg_info.previous_pkg = slot_matches[0]
+
return pkg_info
@@ -685,15 +678,14 @@ class Display(object):
@param pkg_info: dictionay
Modifies self.changelogs
"""
- inst_matches = self.vardb.match(pkg.slot_atom)
- if inst_matches:
+ if pkg_info.previous_pkg is not None:
ebuild_path_cl = pkg_info.ebuild_path
if ebuild_path_cl is None:
# binary package
ebuild_path_cl = self.portdb.findname(pkg.cpv, myrepo=pkg.repo)
if ebuild_path_cl is not None:
self.changelogs.extend(_calc_changelog(
- ebuild_path_cl, inst_matches[0], pkg.cpv))
+ ebuild_path_cl, pkg_info.previous_pkg, pkg.cpv))
return
@@ -733,12 +725,10 @@ class Display(object):
@param pkg: _emerge.Package.Package instance
@rtype string
"""
- ver_str = list(catpkgsplit(pkg.cpv)[2:])
- if ver_str[1] == "r0":
- ver_str[1] = ""
- else:
- ver_str[1] = "-" + ver_str[1]
- return ver_str[0]+ver_str[1]
+ ver_str = pkg.cpv.version
+ if ver_str.endswith("-r0"):
+ ver_str = ver_str[:-3]
+ return ver_str
def _get_installed_best(self, pkg, pkg_info):
@@ -757,9 +747,10 @@ class Display(object):
myinslotlist = None
installed_versions = self.vardb.match_pkgs(pkg.cp)
if self.vardb.cpv_exists(pkg.cpv):
- addl = " "+yellow("R")+pkg_info.fetch_symbol+" "
- installed_version = self.vardb.match_pkgs(pkg.cpv)[0]
- if not self.quiet_repo_display and installed_version.repo != pkg.repo:
+ pkg_info.attr_display.replace = True
+ installed_version = pkg_info.previous_pkg
+ if installed_version.slot != pkg.slot or installed_version.sub_slot != pkg.sub_slot or \
+ not self.quiet_repo_display and installed_version.repo != pkg.repo:
myoldbest = [installed_version]
if pkg_info.ordered:
if pkg_info.merge:
@@ -775,17 +766,31 @@ class Display(object):
myinslotlist = None
if myinslotlist:
myoldbest = myinslotlist[:]
- addl = self._insert_slot(pkg, pkg_info, myinslotlist)
+ if not cpvequal(pkg.cpv,
+ best([pkg.cpv] + [x.cpv for x in myinslotlist])):
+ # Downgrade in slot
+ pkg_info.attr_display.new_version = True
+ pkg_info.attr_display.downgrade = True
+ if pkg_info.ordered:
+ self.counters.downgrades += 1
+ else:
+ # Update in slot
+ pkg_info.attr_display.new_version = True
+ if pkg_info.ordered:
+ self.counters.upgrades += 1
else:
myoldbest = installed_versions
- addl = self._new_slot(pkg, pkg_info)
+ pkg_info.attr_display.new = True
+ pkg_info.attr_display.new_slot = True
+ if pkg_info.ordered:
+ self.counters.newslot += 1
if self.conf.changelog:
self.do_changelog(pkg, pkg_info)
else:
- addl = " " + green("N") + " " + pkg_info.fetch_symbol + " "
+ pkg_info.attr_display.new = True
if pkg_info.ordered:
self.counters.new += 1
- return addl, myoldbest, myinslotlist
+ return myoldbest, myinslotlist
def __call__(self, depgraph, mylist, favorites=None, verbosity=None):
@@ -806,7 +811,7 @@ class Display(object):
# files to fetch list - avoids counting a same file twice
# in size display (verbose mode)
self.myfetchlist = set()
-
+
self.quiet_repo_display = "--quiet-repo-display" in depgraph._frozen_config.myopts
if self.quiet_repo_display:
# Use this set to detect when all the "repoadd" strings are "[0]"
@@ -824,47 +829,52 @@ class Display(object):
self.indent = " " * depth
if isinstance(pkg, Blocker):
- if self._blockers(pkg, fetch_symbol=" "):
- continue
+ self._blockers(pkg)
else:
pkg_info = self.set_pkg_info(pkg, ordered)
- addl, pkg_info.oldbest_list, myinslotlist = \
+ pkg_info.oldbest_list, myinslotlist = \
self._get_installed_best(pkg, pkg_info)
+ if ordered and pkg_info.merge and \
+ not pkg_info.attr_display.new:
+ for arg, atom in depgraph._iter_atoms_for_pkg(pkg):
+ if arg.force_reinstall:
+ pkg_info.attr_display.force_reinstall = True
+ break
+
self.verboseadd = ""
if self.quiet_repo_display:
self.repoadd = None
- self._display_use(pkg, pkg_info.oldbest_list, myinslotlist)
- self.recheck_hidden(pkg)
+ self._display_use(pkg, pkg_info)
if self.conf.verbosity == 3:
if self.quiet_repo_display:
self.verbose_size(pkg, repoadd_set, pkg_info)
else:
self.verbose_size(pkg, None, pkg_info)
- pkg_info.cp = pkg.cp
- pkg_info.ver = self.get_ver_str(pkg)
-
self.oldlp = self.conf.columnwidth - 30
self.newlp = self.oldlp - 30
- pkg_info.oldbest = self.convert_myoldbest(pkg, pkg_info.oldbest_list)
+ pkg_info.oldbest = self.convert_myoldbest(pkg, pkg_info)
pkg_info.system, pkg_info.world = \
self.check_system_world(pkg)
- addl = self.set_interactive(pkg, pkg_info.ordered, addl)
+ if 'interactive' in pkg.properties and \
+ pkg.operation == 'merge':
+ pkg_info.attr_display.interactive = True
+ if ordered:
+ self.counters.interactive += 1
if self.include_mask_str():
- addl += self.gen_mask_str(pkg)
+ pkg_info.attr_display.mask = self.gen_mask_str(pkg)
if pkg.root_config.settings["ROOT"] != "/":
if pkg_info.oldbest:
pkg_info.oldbest += " "
if self.conf.columns:
- myprint = self._set_non_root_columns(
- addl, pkg_info, pkg)
+ myprint = self._set_non_root_columns(pkg, pkg_info)
else:
pkg_str = pkg.cpv
- if self.conf.verbosity == 3 and not self.quiet_repo_display and (self.verbose_main_repo_display or
- any(x.repo != self.portdb.repositories.mainRepo().name for x in pkg_info.oldbest_list + [pkg])):
- pkg_str += _repo_separator + pkg.repo
+ if self.conf.verbosity == 3:
+ pkg_str = self._append_slot(pkg_str, pkg, pkg_info)
+ pkg_str = self._append_repository(pkg_str, pkg, pkg_info)
if not pkg_info.merge:
addl = self.empty_space_in_brackets()
myprint = "[%s%s] " % (
@@ -873,17 +883,16 @@ class Display(object):
)
else:
myprint = "[%s %s] " % (
- self.pkgprint(pkg.type_name, pkg_info), addl)
+ self.pkgprint(pkg.type_name, pkg_info),
+ pkg_info.attr_display)
myprint += self.indent + \
self.pkgprint(pkg_str, pkg_info) + " " + \
pkg_info.oldbest + darkgreen("to " + pkg.root)
else:
if self.conf.columns:
- myprint = self._set_root_columns(
- addl, pkg_info, pkg)
+ myprint = self._set_root_columns(pkg, pkg_info)
else:
- myprint = self._set_no_columns(
- pkg, pkg_info, addl)
+ myprint = self._set_no_columns(pkg, pkg_info)
if self.conf.columns and pkg.operation == "uninstall":
continue
@@ -908,3 +917,105 @@ class Display(object):
self.print_changelog()
return os.EX_OK
+
+
+def format_unmatched_atom(pkg, atom, pkg_use_enabled):
+ """
+ Returns two strings. The first string contains the
+ 'atom' with parts of the atom colored, which 'pkg'
+ doesn't match. The second string has the same number
+ of characters as the first one, but consists of only
+ white space or ^. The ^ characters have the same position
+ as the colored parts of the first string.
+ """
+ # Things to check:
+ # 1. Version
+ # 2. cp
+ # 3. slot/sub_slot
+ # 4. repository
+ # 5. USE
+
+ highlight = set()
+
+ def perform_coloring():
+ atom_str = ""
+ marker_str = ""
+ for ii, x in enumerate(atom):
+ if ii in highlight:
+ atom_str += colorize("BAD", x)
+ marker_str += "^"
+ else:
+ atom_str += x
+ marker_str += " "
+ return atom_str, marker_str
+
+ if atom.cp != pkg.cp:
+ # Highlight the cp part only.
+ ii = atom.find(atom.cp)
+ highlight.update(range(ii, ii + len(atom.cp)))
+ return perform_coloring()
+
+ version_atom = atom.without_repo.without_slot.without_use
+ version_atom_set = InternalPackageSet(initial_atoms=(version_atom,))
+ highlight_version = not bool(version_atom_set.findAtomForPackage(pkg,
+ modified_use=pkg_use_enabled(pkg)))
+
+ highlight_slot = False
+ if (atom.slot and atom.slot != pkg.slot) or \
+ (atom.sub_slot and atom.sub_slot != pkg.sub_slot):
+ highlight_slot = True
+
+ if highlight_version:
+ op = atom.operator
+ ver = None
+ if atom.cp != atom.cpv:
+ ver = cpv_getversion(atom.cpv)
+
+ if op == "=*":
+ op = "="
+ ver += "*"
+
+ if op is not None:
+ highlight.update(range(len(op)))
+
+ if ver is not None:
+ start = atom.rfind(ver)
+ end = start + len(ver)
+ highlight.update(range(start, end))
+
+ if highlight_slot:
+ slot_str = ":" + atom.slot
+ if atom.sub_slot:
+ slot_str += "/" + atom.sub_slot
+ if atom.slot_operator:
+ slot_str += atom.slot_operator
+ start = atom.find(slot_str)
+ end = start + len(slot_str)
+ highlight.update(range(start, end))
+
+ highlight_use = set()
+ if atom.use:
+ use_atom = "%s[%s]" % (atom.cp, str(atom.use))
+ use_atom_set = InternalPackageSet(initial_atoms=(use_atom,))
+ if not use_atom_set.findAtomForPackage(pkg, \
+ modified_use=pkg_use_enabled(pkg)):
+ missing_iuse = pkg.iuse.get_missing_iuse(
+ atom.unevaluated_atom.use.required)
+ if missing_iuse:
+ highlight_use = set(missing_iuse)
+ else:
+ #Use conditionals not met.
+ violated_atom = atom.violated_conditionals(
+ pkg_use_enabled(pkg), pkg.iuse.is_valid_flag)
+ if violated_atom.use is not None:
+ highlight_use = set(violated_atom.use.enabled.union(
+ violated_atom.use.disabled))
+
+ if highlight_use:
+ ii = atom.find("[") + 1
+ for token in atom.use.tokens:
+ if token.lstrip("-!").rstrip("=?") in highlight_use:
+ highlight.update(range(ii, ii + len(token)))
+ ii += len(token) + 1
+
+ return perform_coloring()
diff --git a/pym/_emerge/resolver/output_helpers.py b/pym/_emerge/resolver/output_helpers.py
index e751dd8e4..58b26945a 100644
--- a/pym/_emerge/resolver/output_helpers.py
+++ b/pym/_emerge/resolver/output_helpers.py
@@ -1,9 +1,12 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
"""Contains private support functions for the Display class
in output.py
"""
+
+from __future__ import unicode_literals
+
__all__ = (
)
@@ -15,9 +18,10 @@ from portage import os
from portage import _encodings, _unicode_encode
from portage._sets.base import InternalPackageSet
from portage.output import (blue, bold, colorize, create_color_func,
- green, red, teal, yellow)
+ green, red, teal, turquoise, yellow)
bad = create_color_func("BAD")
from portage.util import shlex_split, writemsg
+from portage.util.SlotObject import SlotObject
from portage.versions import catpkgsplit
from _emerge.Blocker import Blocker
@@ -223,7 +227,7 @@ class _DisplayConfig(object):
self.reinstall_nodes = dynamic_config._reinstall_nodes
self.digraph = dynamic_config.digraph
self.blocker_uninstalls = dynamic_config._blocker_uninstalls
- self.slot_pkg_map = dynamic_config._slot_pkg_map
+ self.package_tracker = dynamic_config._package_tracker
self.set_nodes = dynamic_config._set_nodes
self.pkg_use_enabled = depgraph._pkg_use_enabled
@@ -245,10 +249,9 @@ def _format_size(mysize):
mystr=mystr[:mycount]+","+mystr[mycount:]
return mystr+" kB"
-
def _create_use_string(conf, name, cur_iuse, iuse_forced, cur_use,
old_iuse, old_use,
- is_new, reinst_flags):
+ is_new, feature_flags, reinst_flags):
if not conf.print_use_string:
return ""
@@ -266,6 +269,7 @@ def _create_use_string(conf, name, cur_iuse, iuse_forced, cur_use,
any_iuse = cur_iuse.union(old_iuse)
any_iuse = list(any_iuse)
any_iuse.sort()
+
for flag in any_iuse:
flag_str = None
isEnabled = False
@@ -299,7 +303,9 @@ def _create_use_string(conf, name, cur_iuse, iuse_forced, cur_use,
elif flag in old_use:
flag_str = green("-" + flag) + "*"
if flag_str:
- if flag in iuse_forced:
+ if flag in feature_flags:
+ flag_str = "{" + flag_str + "}"
+ elif flag in iuse_forced:
flag_str = "(" + flag_str + ")"
if isEnabled:
enabled.append(flag_str)
@@ -364,8 +370,9 @@ def _tree_display(conf, mylist):
# If the uninstall task did not need to be executed because
# of an upgrade, display Blocker -> Upgrade edges since the
# corresponding Blocker -> Uninstall edges will not be shown.
- upgrade_node = \
- conf.slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
+ upgrade_node = next(conf.package_tracker.match(
+ uninstall.root, uninstall.slot_atom), None)
+
if upgrade_node is not None and \
uninstall not in executed_uninstalls:
for blocker in uninstall_parents:
@@ -611,9 +618,10 @@ class PkgInfo(object):
information about the pkg being printed.
"""
- __slots__ = ("built", "cp", "ebuild_path", "fetch_symbol", "merge",
- "oldbest", "oldbest_list", "operation", "ordered",
- "repo_name", "repo_path_real", "system", "use", "ver", "world")
+ __slots__ = ("attr_display", "built", "cp",
+ "ebuild_path", "fetch_symbol", "merge",
+ "oldbest", "oldbest_list", "operation", "ordered", "previous_pkg",
+ "repo_name", "repo_path_real", "slot", "sub_slot", "system", "use", "ver", "world")
def __init__(self):
@@ -626,9 +634,74 @@ class PkgInfo(object):
self.oldbest_list = []
self.operation = ''
self.ordered = False
+ self.previous_pkg = None
self.repo_path_real = ''
self.repo_name = ''
+ self.slot = ''
+ self.sub_slot = ''
self.system = False
self.use = ''
self.ver = ''
self.world = False
+ self.attr_display = PkgAttrDisplay()
+
+class PkgAttrDisplay(SlotObject):
+
+ __slots__ = ("downgrade", "fetch_restrict", "fetch_restrict_satisfied",
+ "force_reinstall",
+ "interactive", "mask", "new", "new_slot", "new_version", "replace")
+
+ def __str__(self):
+ output = []
+
+ if self.interactive:
+ output.append(colorize("WARN", "I"))
+ else:
+ output.append(" ")
+
+ if self.new or self.force_reinstall:
+ if self.force_reinstall:
+ output.append(red("r"))
+ else:
+ output.append(green("N"))
+ else:
+ output.append(" ")
+
+ if self.new_slot or self.replace:
+ if self.replace:
+ output.append(yellow("R"))
+ else:
+ output.append(green("S"))
+ else:
+ output.append(" ")
+
+ if self.fetch_restrict or self.fetch_restrict_satisfied:
+ if self.fetch_restrict_satisfied:
+ output.append(green("f"))
+ else:
+ output.append(red("F"))
+ else:
+ output.append(" ")
+
+ if self.new_version:
+ output.append(turquoise("U"))
+ else:
+ output.append(" ")
+
+ if self.downgrade:
+ output.append(blue("D"))
+ else:
+ output.append(" ")
+
+ if self.mask is not None:
+ output.append(self.mask)
+
+ return "".join(output)
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['content'])
diff --git a/pym/_emerge/resolver/package_tracker.py b/pym/_emerge/resolver/package_tracker.py
new file mode 100644
index 000000000..5982750a0
--- /dev/null
+++ b/pym/_emerge/resolver/package_tracker.py
@@ -0,0 +1,301 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import collections
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.dep:Atom,match_from_list',
+ 'portage.util:cmp_sort_key',
+ 'portage.versions:vercmp',
+)
+
+_PackageConflict = collections.namedtuple("_PackageConflict", ["root", "pkgs", "atom", "description"])
+
+class PackageConflict(_PackageConflict):
+ """
+ Class to track the reason for a conflict and the conflicting packages.
+ """
+ def __iter__(self):
+ return iter(self.pkgs)
+
+ def __contains__(self, pkg):
+ return pkg in self.pkgs
+
+ def __len__(self):
+ return len(self.pkgs)
+
+
+class PackageTracker(object):
+ """
+ This class tracks packages which are currently
+ installed and packages which have been pulled into
+ the dependency graph.
+
+ It automatically tracks conflicts between packages.
+
+ Possible conflicts:
+ 1) Packages that share the same SLOT.
+ 2) Packages with the same cpv.
+ Not yet implemented:
+ 3) Packages that block each other.
+ """
+
+ def __init__(self):
+ # Mapping from package keys to set of packages.
+ self._cp_pkg_map = collections.defaultdict(list)
+ self._cp_vdb_pkg_map = collections.defaultdict(list)
+ # List of package keys that may contain conflicts.
+ # The insetation order must be preserved.
+ self._multi_pkgs = []
+
+ # Cache for result of conflicts().
+ self._conflicts_cache = None
+
+ # Records for each pulled package which installed package
+ # are replaced.
+ self._replacing = collections.defaultdict(list)
+ # Records which pulled packages replace this package.
+ self._replaced_by = collections.defaultdict(list)
+
+ self._match_cache = collections.defaultdict(dict)
+
+ def add_pkg(self, pkg):
+ """
+ Add a new package to the tracker. Records conflicts as necessary.
+ """
+ cp_key = pkg.root, pkg.cp
+
+ if any(other is pkg for other in self._cp_pkg_map[cp_key]):
+ return
+
+ self._cp_pkg_map[cp_key].append(pkg)
+
+ if len(self._cp_pkg_map[cp_key]) > 1:
+ self._conflicts_cache = None
+ if len(self._cp_pkg_map[cp_key]) == 2:
+ self._multi_pkgs.append(cp_key)
+
+ self._replacing[pkg] = []
+ for installed in self._cp_vdb_pkg_map.get(cp_key, []):
+ if installed.slot_atom == pkg.slot_atom or \
+ installed.cpv == pkg.cpv:
+ self._replacing[pkg].append(installed)
+ self._replaced_by[installed].append(pkg)
+
+ self._match_cache.pop(cp_key, None)
+
+ def add_installed_pkg(self, installed):
+ """
+ Add an installed package during vdb load. These packages
+ are not returned by matched_pull as long as add_pkg hasn't
+ been called with them. They are only returned by match_final.
+ """
+ cp_key = installed.root, installed.cp
+ if any(other is installed for other in self._cp_vdb_pkg_map[cp_key]):
+ return
+
+ self._cp_vdb_pkg_map[cp_key].append(installed)
+
+ for pkg in self._cp_pkg_map.get(cp_key, []):
+ if installed.slot_atom == pkg.slot_atom or \
+ installed.cpv == pkg.cpv:
+ self._replacing[pkg].append(installed)
+ self._replaced_by[installed].append(pkg)
+
+ self._match_cache.pop(cp_key, None)
+
+ def remove_pkg(self, pkg):
+ """
+ Removes the package from the tracker.
+ Raises KeyError if it isn't present.
+ """
+ cp_key = pkg.root, pkg.cp
+ try:
+ self._cp_pkg_map.get(cp_key, []).remove(pkg)
+ except ValueError:
+ raise KeyError(pkg)
+
+ if self._cp_pkg_map[cp_key]:
+ self._conflicts_cache = None
+
+ if not self._cp_pkg_map[cp_key]:
+ del self._cp_pkg_map[cp_key]
+ elif len(self._cp_pkg_map[cp_key]) == 1:
+ self._multi_pkgs = [other_cp_key for other_cp_key in self._multi_pkgs \
+ if other_cp_key != cp_key]
+
+ for installed in self._replacing[pkg]:
+ self._replaced_by[installed].remove(pkg)
+ if not self._replaced_by[installed]:
+ del self._replaced_by[installed]
+ del self._replacing[pkg]
+
+ self._match_cache.pop(cp_key, None)
+
+ def discard_pkg(self, pkg):
+ """
+ Removes the package from the tracker.
+ Does not raises KeyError if it is not present.
+ """
+ try:
+ self.remove_pkg(pkg)
+ except KeyError:
+ pass
+
+ def match(self, root, atom, installed=True):
+ """
+ Iterates over the packages matching 'atom'.
+ If 'installed' is True, installed non-replaced
+ packages may also be returned.
+ """
+ cp_key = root, atom.cp
+ cache_key = root, atom, installed
+ try:
+ return iter(self._match_cache.get(cp_key, {})[cache_key])
+ except KeyError:
+ pass
+
+ candidates = self._cp_pkg_map.get(cp_key, [])[:]
+
+ if installed:
+ for installed in self._cp_vdb_pkg_map.get(cp_key, []):
+ if installed not in self._replaced_by:
+ candidates.append(installed)
+
+ ret = match_from_list(atom, candidates)
+ ret.sort(key=cmp_sort_key(lambda x, y: vercmp(x.version, y.version)))
+ self._match_cache[cp_key][cache_key] = ret
+
+ return iter(ret)
+
+ def conflicts(self):
+ """
+ Iterates over the curently existing conflicts.
+ """
+ if self._conflicts_cache is None:
+ self._conflicts_cache = []
+
+ for cp_key in self._multi_pkgs:
+
+ # Categorize packages according to cpv and slot.
+ slot_map = collections.defaultdict(list)
+ cpv_map = collections.defaultdict(list)
+ for pkg in self._cp_pkg_map[cp_key]:
+ slot_key = pkg.root, pkg.slot_atom
+ cpv_key = pkg.root, pkg.cpv
+ slot_map[slot_key].append(pkg)
+ cpv_map[cpv_key].append(pkg)
+
+ # Slot conflicts.
+ for slot_key in slot_map:
+ slot_pkgs = slot_map[slot_key]
+ if len(slot_pkgs) > 1:
+ self._conflicts_cache.append(PackageConflict(
+ description = "slot conflict",
+ root = slot_key[0],
+ atom = slot_key[1],
+ pkgs = tuple(slot_pkgs),
+ ))
+
+ # CPV conflicts.
+ for cpv_key in cpv_map:
+ cpv_pkgs = cpv_map[cpv_key]
+ if len(cpv_pkgs) > 1:
+ # Make sure this cpv conflict is not a slot conflict at the same time.
+ # Ignore it if it is.
+ slots = set(pkg.slot for pkg in cpv_pkgs)
+ if len(slots) > 1:
+ self._conflicts_cache.append(PackageConflict(
+ description = "cpv conflict",
+ root = cpv_key[0],
+ atom = cpv_key[1],
+ pkgs = tuple(cpv_pkgs),
+ ))
+
+ return iter(self._conflicts_cache)
+
+ def slot_conflicts(self):
+ """
+ Iterates over present slot conflicts.
+ This is only intended for consumers that haven't been
+ updated to deal with other kinds of conflicts.
+ This funcion should be removed once all consumers are updated.
+ """
+ return (conflict for conflict in self.conflicts() \
+ if conflict.description == "slot conflict")
+
+ def all_pkgs(self, root):
+ """
+ Iterates over all packages for the given root
+ present in the tracker, including the installed
+ packages.
+ """
+ for cp_key in self._cp_pkg_map:
+ if cp_key[0] == root:
+ for pkg in self._cp_pkg_map[cp_key]:
+ yield pkg
+
+ for cp_key in self._cp_vdb_pkg_map:
+ if cp_key[0] == root:
+ for installed in self._cp_vdb_pkg_map[cp_key]:
+ if installed not in self._replaced_by:
+ yield installed
+
+ def contains(self, pkg, installed=True):
+ """
+ Checks if the package is in the tracker.
+ If 'installed' is True, returns True for
+ non-replaced installed packages.
+ """
+ cp_key = pkg.root, pkg.cp
+ for other in self._cp_pkg_map.get(cp_key, []):
+ if other is pkg:
+ return True
+
+ if installed:
+ for installed in self._cp_vdb_pkg_map.get(cp_key, []):
+ if installed is pkg and \
+ installed not in self._replaced_by:
+ return True
+
+ return False
+
+ def __contains__(self, pkg):
+ """
+ Checks if the package is in the tracker.
+ Returns True for non-replaced installed packages.
+ """
+ return self.contains(pkg, installed=True)
+
+
+class PackageTrackerDbapiWrapper(object):
+ """
+ A wrpper class that provides parts of the legacy
+ dbapi interface. Remove it once all consumers have
+ died.
+ """
+ def __init__(self, root, package_tracker):
+ self._root = root
+ self._package_tracker = package_tracker
+
+ def cpv_inject(self, pkg):
+ self._package_tracker.add_pkg(pkg)
+
+ def match_pkgs(self, atom):
+ if not isinstance(atom, Atom):
+ atom = Atom(atom)
+ ret = sorted(self._package_tracker.match(self._root, atom),
+ key=cmp_sort_key(lambda x, y: vercmp(x.version, y.version)))
+ return ret
+
+ def __iter__(self):
+ return self._package_tracker.all_pkgs(self._root)
+
+ def match(self, atom, use_cache=None):
+ return self.match_pkgs(atom)
+
+ def cp_list(self, cp):
+ return self.match_pkgs(cp)
diff --git a/pym/_emerge/resolver/slot_collision.py b/pym/_emerge/resolver/slot_collision.py
index 783a6483d..baeab080a 100644
--- a/pym/_emerge/resolver/slot_collision.py
+++ b/pym/_emerge/resolver/slot_collision.py
@@ -1,10 +1,11 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from __future__ import print_function
+from __future__ import print_function, unicode_literals
import sys
+from portage import _encodings, _unicode_encode
from _emerge.AtomArg import AtomArg
from _emerge.Package import Package
from _emerge.PackageArg import PackageArg
@@ -88,10 +89,11 @@ class slot_conflict_handler(object):
self.debug = "--debug" in self.myopts
if self.debug:
writemsg("Starting slot conflict handler\n", noiselevel=-1)
- #slot_collision_info is a dict mapping (slot atom, root) to set
- #of packages. The packages in the set all belong to the same
- #slot.
- self.slot_collision_info = depgraph._dynamic_config._slot_collision_info
+
+ # List of tuples, where each tuple represents a slot conflict.
+ self.all_conflicts = []
+ for conflict in depgraph._dynamic_config._package_tracker.slot_conflicts():
+ self.all_conflicts.append((conflict.root, conflict.atom, conflict.pkgs))
#A dict mapping packages to pairs of parent package
#and parent atom
@@ -108,8 +110,7 @@ class slot_conflict_handler(object):
all_conflict_atoms_by_slotatom = []
#fill conflict_pkgs, all_conflict_atoms_by_slotatom
- for (atom, root), pkgs \
- in self.slot_collision_info.items():
+ for root, atom, pkgs in self.all_conflicts:
conflict_pkgs.append(list(pkgs))
all_conflict_atoms_by_slotatom.append(set())
@@ -150,7 +151,7 @@ class slot_conflict_handler(object):
if self.debug:
writemsg("\nNew configuration:\n", noiselevel=-1)
for pkg in config:
- writemsg(" " + str(pkg) + "\n", noiselevel=-1)
+ writemsg(" %s\n" % (pkg,), noiselevel=-1)
writemsg("\n", noiselevel=-1)
new_solutions = self._check_configuration(config, all_conflict_atoms_by_slotatom, conflict_nodes)
@@ -225,10 +226,14 @@ class slot_conflict_handler(object):
new_change = {}
for pkg in solution:
for flag, state in solution[pkg].items():
+ real_flag = pkg.iuse.get_real_flag(flag)
+ if real_flag is None:
+ # Triggered by use-dep defaults.
+ continue
if state == "enabled" and flag not in _pkg_use_enabled(pkg):
- new_change.setdefault(pkg, {})[flag] = True
+ new_change.setdefault(pkg, {})[real_flag] = True
elif state == "disabled" and flag in _pkg_use_enabled(pkg):
- new_change.setdefault(pkg, {})[flag] = False
+ new_change.setdefault(pkg, {})[real_flag] = False
return new_change
def _prepare_conflict_msg_and_check_for_specificity(self):
@@ -236,6 +241,7 @@ class slot_conflict_handler(object):
Print all slot conflicts in a human readable way.
"""
_pkg_use_enabled = self.depgraph._pkg_use_enabled
+ verboseconflicts = "--verbose-conflicts" in self.myopts
msg = self.conflict_msg
indent = " "
msg.append("\n!!! Multiple package instances within a single " + \
@@ -243,16 +249,15 @@ class slot_conflict_handler(object):
msg.append("!!! into the dependency graph, resulting" + \
" in a slot conflict:\n\n")
- for (slot_atom, root), pkgs \
- in self.slot_collision_info.items():
- msg.append(str(slot_atom))
+ for root, slot_atom, pkgs in self.all_conflicts:
+ msg.append("%s" % (slot_atom,))
if root != self.depgraph._frozen_config._running_root.root:
msg.append(" for %s" % (root,))
msg.append("\n\n")
for pkg in pkgs:
msg.append(indent)
- msg.append(str(pkg))
+ msg.append("%s" % (pkg,))
parent_atoms = self.all_parents.get(pkg)
if parent_atoms:
#Create a list of collision reasons and map them to sets
@@ -268,12 +273,14 @@ class slot_conflict_handler(object):
for ppkg, atom in parent_atoms:
atom_set = InternalPackageSet(initial_atoms=(atom,))
atom_without_use_set = InternalPackageSet(initial_atoms=(atom.without_use,))
+ atom_without_use_and_slot_set = InternalPackageSet(initial_atoms=(
+ atom.without_use.without_slot,))
for other_pkg in pkgs:
if other_pkg == pkg:
continue
- if not atom_without_use_set.findAtomForPackage(other_pkg, \
+ if not atom_without_use_and_slot_set.findAtomForPackage(other_pkg, \
modified_use=_pkg_use_enabled(other_pkg)):
if atom.operator is not None:
# The version range does not match.
@@ -290,9 +297,11 @@ class slot_conflict_handler(object):
atoms.add((ppkg, atom, other_pkg))
num_all_specific_atoms += 1
collision_reasons[key] = atoms
- else:
- # The slot_abi does not match.
- key = ("sub-slot", atom.slot_abi)
+
+ elif not atom_without_use_set.findAtomForPackage(other_pkg, \
+ modified_use=_pkg_use_enabled(other_pkg)):
+ # The slot and/or sub_slot does not match.
+ key = ("slot", (atom.slot, atom.sub_slot, atom.slot_operator))
atoms = collision_reasons.get(key, set())
atoms.add((ppkg, atom, other_pkg))
num_all_specific_atoms += 1
@@ -312,11 +321,36 @@ class slot_conflict_handler(object):
#Use conditionals not met.
violated_atom = atom.violated_conditionals(_pkg_use_enabled(other_pkg), \
other_pkg.iuse.is_valid_flag)
+ if violated_atom.use is None:
+ # Something like bug #453400 caused the
+ # above findAtomForPackage call to
+ # return None unexpectedly.
+ msg = ("\n\n!!! BUG: Detected "
+ "USE dep match inconsistency:\n"
+ "\tppkg: %s\n"
+ "\tviolated_atom: %s\n"
+ "\tatom: %s unevaluated: %s\n"
+ "\tother_pkg: %s IUSE: %s USE: %s\n" %
+ (ppkg,
+ violated_atom,
+ atom,
+ atom.unevaluated_atom,
+ other_pkg,
+ sorted(other_pkg.iuse.all),
+ sorted(_pkg_use_enabled(other_pkg))))
+ writemsg(msg, noiselevel=-2)
+ raise AssertionError(
+ 'BUG: USE dep match inconsistency')
for flag in violated_atom.use.enabled.union(violated_atom.use.disabled):
atoms = collision_reasons.get(("use", flag), set())
atoms.add((ppkg, atom, other_pkg))
collision_reasons[("use", flag)] = atoms
num_all_specific_atoms += 1
+ elif isinstance(ppkg, AtomArg) and other_pkg.installed:
+ parent_atoms = collision_reasons.get(("AtomArg", None), set())
+ parent_atoms.add((ppkg, atom))
+ collision_reasons[("AtomArg", None)] = parent_atoms
+ num_all_specific_atoms += 1
msg.append(" pulled in by\n")
@@ -342,10 +376,16 @@ class slot_conflict_handler(object):
best_matches[atom.cp] = (ppkg, atom)
else:
best_matches[atom.cp] = (ppkg, atom)
- selected_for_display.update(best_matches.values())
- elif type == "sub-slot":
+ if verboseconflicts:
+ selected_for_display.add((ppkg, atom))
+ if not verboseconflicts:
+ selected_for_display.update(
+ best_matches.values())
+ elif type == "slot":
for ppkg, atom, other_pkg in parents:
selected_for_display.add((ppkg, atom))
+ if not verboseconflicts:
+ break
elif type == "use":
#Prefer atoms with unconditional use deps over, because it's
#not possible to change them on the parent, which means there
@@ -387,21 +427,50 @@ class slot_conflict_handler(object):
# If the list is long, people can simply
# use a pager.
selected_for_display.add((ppkg, atom))
+ elif type == "AtomArg":
+ for ppkg, atom in parents:
+ selected_for_display.add((ppkg, atom))
- def highlight_violations(atom, version, use=[]):
+ def highlight_violations(atom, version, use, slot_violated):
"""Colorize parts of an atom"""
- atom_str = str(atom)
+ atom_str = "%s" % (atom,)
+ colored_idx = set()
if version:
op = atom.operator
ver = None
if atom.cp != atom.cpv:
ver = cpv_getversion(atom.cpv)
slot = atom.slot
+ sub_slot = atom.sub_slot
+ slot_operator = atom.slot_operator
if op == "=*":
op = "="
ver += "*"
+ slot_str = ""
+ if slot:
+ slot_str = ":" + slot
+ if sub_slot:
+ slot_str += "/" + sub_slot
+ if slot_operator:
+ slot_str += slot_operator
+
+ # Compute color_idx before adding the color codes
+ # as these change the indices of the letters.
+ if op is not None:
+ colored_idx.update(range(len(op)))
+
+ if ver is not None:
+ start = atom_str.rfind(ver)
+ end = start + len(ver)
+ colored_idx.update(range(start, end))
+
+ if slot_str:
+ ii = atom_str.find(slot_str)
+ colored_idx.update(range(ii, ii + len(slot_str)))
+
+
if op is not None:
atom_str = atom_str.replace(op, colorize("BAD", op), 1)
@@ -411,25 +480,48 @@ class slot_conflict_handler(object):
atom_str = atom_str[:start] + \
colorize("BAD", ver) + \
atom_str[end:]
+
+ if slot_str:
+ atom_str = atom_str.replace(slot_str, colorize("BAD", slot_str), 1)
+
+ elif slot_violated:
+ slot = atom.slot
+ sub_slot = atom.sub_slot
+ slot_operator = atom.slot_operator
+
+ slot_str = ""
if slot:
- atom_str = atom_str.replace(":" + slot, colorize("BAD", ":" + slot))
+ slot_str = ":" + slot
+ if sub_slot:
+ slot_str += "/" + sub_slot
+ if slot_operator:
+ slot_str += slot_operator
+
+ if slot_str:
+ ii = atom_str.find(slot_str)
+ colored_idx.update(range(ii, ii + len(slot_str)))
+ atom_str = atom_str.replace(slot_str, colorize("BAD", slot_str), 1)
if use and atom.use.tokens:
use_part_start = atom_str.find("[")
use_part_end = atom_str.find("]")
new_tokens = []
+ # Compute start index in non-colored atom.
+ ii = str(atom).find("[") + 1
for token in atom.use.tokens:
if token.lstrip("-!").rstrip("=?") in use:
new_tokens.append(colorize("BAD", token))
+ colored_idx.update(range(ii, ii + len(token)))
else:
new_tokens.append(token)
+ ii += 1 + len(token)
atom_str = atom_str[:use_part_start] \
+ "[%s]" % (",".join(new_tokens),) + \
atom_str[use_part_end+1:]
- return atom_str
+ return atom_str, colored_idx
# Show unconditional use deps first, since those
# are more problematic than the conditional kind.
@@ -440,37 +532,49 @@ class slot_conflict_handler(object):
ordered_list.append(parent_atom)
for parent_atom in ordered_list:
parent, atom = parent_atom
- msg.append(2*indent)
- if isinstance(parent,
- (PackageArg, AtomArg)):
- # For PackageArg and AtomArg types, it's
+ if isinstance(parent, PackageArg):
+ # For PackageArg it's
# redundant to display the atom attribute.
- msg.append(str(parent))
+ msg.append("%s\n" % (parent,))
+ elif isinstance(parent, AtomArg):
+ msg.append(2*indent)
+ msg.append("%s (Argument)\n" % (atom,))
else:
# Display the specific atom from SetArg or
# Package types.
version_violated = False
- sub_slot_violated = False
+ slot_violated = False
use = []
for (type, sub_type), parents in collision_reasons.items():
for x in parents:
if parent == x[0] and atom == x[1]:
if type == "version":
version_violated = True
- elif type == "sub-slot":
- sub_slot_violated = True
+ elif type == "slot":
+ slot_violated = True
elif type == "use":
use.append(sub_type)
break
- atom_str = highlight_violations(atom.unevaluated_atom, version_violated, use)
+ atom_str, colored_idx = highlight_violations(atom.unevaluated_atom,
+ version_violated, use, slot_violated)
- if version_violated or sub_slot_violated:
+ if version_violated or slot_violated:
self.is_a_version_conflict = True
- msg.append("%s required by %s" % (atom_str, parent))
- msg.append("\n")
-
+ cur_line = "%s required by %s\n" % (atom_str, parent)
+ marker_line = ""
+ for ii in range(len(cur_line)):
+ if ii in colored_idx:
+ marker_line += "^"
+ else:
+ marker_line += " "
+ marker_line += "\n"
+ msg.append(2*indent)
+ msg.append(cur_line)
+ msg.append(2*indent)
+ msg.append(marker_line)
+
if not selected_for_display:
msg.append(2*indent)
msg.append("(no parents that aren't satisfied by other packages in this slot)\n")
@@ -490,7 +594,6 @@ class slot_conflict_handler(object):
def get_explanation(self):
msg = ""
- _pkg_use_enabled = self.depgraph._pkg_use_enabled
if self.is_a_version_conflict:
return None
@@ -506,13 +609,13 @@ class slot_conflict_handler(object):
return None
if len(solutions)==1:
- if len(self.slot_collision_info)==1:
+ if len(self.all_conflicts) == 1:
msg += "It might be possible to solve this slot collision\n"
else:
msg += "It might be possible to solve these slot collisions\n"
msg += "by applying all of the following changes:\n"
else:
- if len(self.slot_collision_info)==1:
+ if len(self.all_conflicts) == 1:
msg += "It might be possible to solve this slot collision\n"
else:
msg += "It might be possible to solve these slot collisions\n"
@@ -553,8 +656,7 @@ class slot_conflict_handler(object):
if not pkg.installed:
continue
- for (atom, root), pkgs \
- in self.slot_collision_info.items():
+ for root, atom, pkgs in self.all_conflicts:
if pkg not in pkgs:
continue
for other_pkg in pkgs:
@@ -563,7 +665,9 @@ class slot_conflict_handler(object):
if pkg.iuse.all.symmetric_difference(other_pkg.iuse.all) \
or _pkg_use_enabled(pkg).symmetric_difference(_pkg_use_enabled(other_pkg)):
if self.debug:
- writemsg(str(pkg) + " has pending USE changes. Rejecting configuration.\n", noiselevel=-1)
+ writemsg(("%s has pending USE changes. "
+ "Rejecting configuration.\n") % (pkg,),
+ noiselevel=-1)
return False
#A list of dicts. Keeps one dict per slot conflict. [ { flag1: "enabled" }, { flag2: "disabled" } ]
@@ -586,16 +690,18 @@ class slot_conflict_handler(object):
if not i.findAtomForPackage(pkg, modified_use=_pkg_use_enabled(pkg)):
#Version range does not match.
if self.debug:
- writemsg(str(pkg) + " does not satify all version requirements." + \
- " Rejecting configuration.\n", noiselevel=-1)
+ writemsg(("%s does not satify all version "
+ "requirements. Rejecting configuration.\n") %
+ (pkg,), noiselevel=-1)
return False
if not pkg.iuse.is_valid_flag(atom.unevaluated_atom.use.required):
#Missing IUSE.
#FIXME: This needs to support use dep defaults.
if self.debug:
- writemsg(str(pkg) + " misses needed flags from IUSE." + \
- " Rejecting configuration.\n", noiselevel=-1)
+ writemsg(("%s misses needed flags from IUSE."
+ " Rejecting configuration.\n") % (pkg,),
+ noiselevel=-1)
return False
if not isinstance(ppkg, Package) or ppkg.installed:
@@ -620,8 +726,9 @@ class slot_conflict_handler(object):
#We can't change USE of an installed package (only of an ebuild, but that is already
#part of the conflict, isn't it?
if self.debug:
- writemsg(str(pkg) + ": installed package would need USE changes." + \
- " Rejecting configuration.\n", noiselevel=-1)
+ writemsg(("%s: installed package would need USE"
+ " changes. Rejecting configuration.\n") % (pkg,),
+ noiselevel=-1)
return False
#Compute the required USE changes. A flag can be forced to "enabled" or "disabled",
@@ -675,7 +782,7 @@ class slot_conflict_handler(object):
if self.debug:
writemsg("All involved flags:\n", noiselevel=-1)
for id, involved_flags in enumerate(all_involved_flags):
- writemsg(" " + str(config[id]) + "\n", noiselevel=-1)
+ writemsg(" %s\n" % (config[id],), noiselevel=-1)
for flag, state in involved_flags.items():
writemsg(" " + flag + ": " + state + "\n", noiselevel=-1)
@@ -758,7 +865,7 @@ class slot_conflict_handler(object):
inner_first = False
else:
msg += ", "
- msg += flag + ": " + str(state)
+ msg += flag + ": %s" % (state,)
msg += "}"
msg += "]\n"
writemsg(msg, noiselevel=-1)
@@ -862,8 +969,9 @@ class slot_conflict_handler(object):
#We managed to create a new problem with our changes.
is_valid_solution = False
if self.debug:
- writemsg("new conflict introduced: " + str(pkg) + \
- " does not match " + new_atom + " from " + str(ppkg) + "\n", noiselevel=-1)
+ writemsg(("new conflict introduced: %s"
+ " does not match %s from %s\n") %
+ (pkg, new_atom, ppkg), noiselevel=-1)
break
if not is_valid_solution:
@@ -871,7 +979,7 @@ class slot_conflict_handler(object):
#Make sure the changes don't violate REQUIRED_USE
for pkg in required_changes:
- required_use = pkg.metadata.get("REQUIRED_USE")
+ required_use = pkg._metadata.get("REQUIRED_USE")
if not required_use:
continue
@@ -950,8 +1058,16 @@ class _solution_candidate_generator(object):
else:
return self.value == other.value
def __str__(self):
- return str(self.value)
-
+ return "%s" % (self.value,)
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['content'], errors='backslashreplace')
+
def __init__(self, all_involved_flags):
#A copy of all_involved_flags with all "cond" values
#replaced by a _value_helper object.
diff --git a/pym/_emerge/search.py b/pym/_emerge/search.py
index 5abc8a00c..bd74fb7b1 100644
--- a/pym/_emerge/search.py
+++ b/pym/_emerge/search.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -69,7 +69,7 @@ class search(object):
return db.aux_get(*args, **kwargs)
except KeyError:
pass
- raise
+ raise KeyError(args[0])
def _findname(self, *args, **kwargs):
for db in self._dbs:
diff --git a/pym/_emerge/stdout_spinner.py b/pym/_emerge/stdout_spinner.py
index 5ad31f001..670686adf 100644
--- a/pym/_emerge/stdout_spinner.py
+++ b/pym/_emerge/stdout_spinner.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2009 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import platform
@@ -53,17 +53,18 @@ class stdout_spinner(object):
def update_basic(self):
self.spinpos = (self.spinpos + 1) % 500
if self._return_early():
- return
+ return True
if (self.spinpos % 100) == 0:
if self.spinpos == 0:
sys.stdout.write(". ")
else:
sys.stdout.write(".")
sys.stdout.flush()
+ return True
def update_scroll(self):
if self._return_early():
- return
+ return True
if(self.spinpos >= len(self.scroll_sequence)):
sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
@@ -71,13 +72,15 @@ class stdout_spinner(object):
sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
sys.stdout.flush()
self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
+ return True
def update_twirl(self):
self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
if self._return_early():
- return
+ return True
sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
sys.stdout.flush()
+ return True
def update_quiet(self):
- return
+ return True
diff --git a/pym/_emerge/unmerge.py b/pym/_emerge/unmerge.py
index b46b89cb8..b04f8f376 100644
--- a/pym/_emerge/unmerge.py
+++ b/pym/_emerge/unmerge.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -457,9 +457,6 @@ def _unmerge_display(root_config, myopts, unmerge_action,
writemsg_level(colorize("WARN","!!! Unmerging it may " + \
"be damaging to your system.\n\n"),
level=logging.WARNING, noiselevel=-1)
- if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
- countdown(int(settings["EMERGE_WARNING_DELAY"]),
- colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
if not quiet:
writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
else: