aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFabian Groffen <grobian@gentoo.org>2021-04-16 15:36:20 +0200
committerFabian Groffen <grobian@gentoo.org>2021-04-16 15:36:20 +0200
commitcc3c972cfcafc20187ee631af4d766a7e4027593 (patch)
treeeb37aef86c8efbd6fb7b305ca44caf267cbca81d
parentMerge remote-tracking branch 'origin/master' into prefix (diff)
parentSimpleRepomanTestCase: update portage.const.EPREFIX after fork (diff)
downloadportage-prefix.tar.gz
portage-prefix.tar.bz2
portage-prefix.zip
Merge remote-tracking branch 'origin/master' into prefixprefix
Signed-off-by: Fabian Groffen <grobian@gentoo.org>
-rw-r--r--.github/workflows/ci.yml2
-rw-r--r--MANIFEST.in3
-rw-r--r--NEWS12
-rw-r--r--RELEASE-NOTES33
l---------[-rwxr-xr-x]bin/chmod-lite11
l---------[-rwxr-xr-x]bin/ebuild-ipc9
-rwxr-xr-xbin/ebuild-pyhelper21
-rwxr-xr-xbin/egencache2
-rwxr-xr-xbin/glsa-check3
-rwxr-xr-xbin/shelve-utils36
-rw-r--r--cnf/make.globals1
-rw-r--r--lib/_emerge/BlockerCache.py6
-rw-r--r--lib/_emerge/EbuildPhase.py28
-rw-r--r--lib/_emerge/Package.py9
-rw-r--r--lib/_emerge/Scheduler.py2
-rw-r--r--lib/_emerge/UseFlagDisplay.py2
-rw-r--r--lib/_emerge/create_depgraph_params.py30
-rw-r--r--lib/_emerge/help.py2
-rw-r--r--lib/_emerge/main.py2
-rw-r--r--lib/_emerge/resolver/output.py2
-rw-r--r--lib/portage/__init__.py18
-rw-r--r--lib/portage/_compat_upgrade/binpkg_multi_instance.py33
-rw-r--r--lib/portage/_emirrordist/Config.py39
-rw-r--r--lib/portage/_emirrordist/ContentDB.py196
-rw-r--r--lib/portage/_emirrordist/DeletionIterator.py25
-rw-r--r--lib/portage/_emirrordist/DeletionTask.py8
-rw-r--r--lib/portage/_emirrordist/FetchIterator.py3
-rw-r--r--lib/portage/_emirrordist/FetchTask.py5
-rw-r--r--lib/portage/_emirrordist/main.py15
-rw-r--r--lib/portage/_sets/ProfilePackageSet.py9
-rw-r--r--lib/portage/_sets/profiles.py10
-rw-r--r--lib/portage/cache/flat_hash.py3
-rw-r--r--lib/portage/dbapi/_VdbMetadataDelta.py11
-rw-r--r--lib/portage/dbapi/bintree.py1
-rw-r--r--lib/portage/dbapi/vartree.py48
-rw-r--r--lib/portage/dep/__init__.py7
-rw-r--r--lib/portage/eapi.py107
-rw-r--r--lib/portage/emaint/modules/merges/__init__.py14
-rw-r--r--lib/portage/emaint/modules/merges/merges.py11
-rw-r--r--lib/portage/emaint/modules/sync/sync.py40
-rw-r--r--lib/portage/locks.py125
-rw-r--r--lib/portage/package/ebuild/_config/KeywordsManager.py7
-rw-r--r--lib/portage/package/ebuild/_config/LocationsManager.py11
-rw-r--r--lib/portage/package/ebuild/_config/MaskManager.py7
-rw-r--r--lib/portage/package/ebuild/_config/UseManager.py12
-rw-r--r--lib/portage/package/ebuild/config.py28
-rw-r--r--lib/portage/package/ebuild/fetch.py195
-rw-r--r--lib/portage/repository/config.py36
-rw-r--r--lib/portage/tests/dep/test_isvalidatom.py26
-rw-r--r--lib/portage/tests/ebuild/test_fetch.py332
-rw-r--r--lib/portage/tests/emerge/test_simple.py4
-rw-r--r--lib/portage/tests/resolver/ResolverPlayground.py10
-rw-r--r--lib/portage/tests/resolver/binpkg_multi_instance/test_build_id_profile_format.py14
-rw-r--r--lib/portage/tests/resolver/test_autounmask.py25
-rw-r--r--lib/portage/tests/resolver/test_autounmask_use_slot_conflict.py51
-rw-r--r--lib/portage/tests/resolver/test_unpack_dependencies.py65
-rw-r--r--lib/portage/tests/resolver/test_use_aliases.py131
-rw-r--r--lib/portage/tests/resolver/test_useflags.py28
-rw-r--r--lib/portage/tests/sync/test_sync_local.py15
-rw-r--r--lib/portage/tests/unicode/test_string_format.py9
-rw-r--r--lib/portage/tests/util/test_shelve.py60
-rw-r--r--lib/portage/util/_async/BuildLogger.py12
-rw-r--r--lib/portage/util/_async/PipeLogger.py12
-rw-r--r--lib/portage/util/_async/PopenProcess.py4
-rw-r--r--lib/portage/util/_async/SchedulerInterface.py10
-rw-r--r--lib/portage/util/_eventloop/EventLoop.py1153
-rw-r--r--lib/portage/util/_eventloop/PollConstants.py17
-rw-r--r--lib/portage/util/_eventloop/PollSelectAdapter.py74
-rw-r--r--lib/portage/util/_eventloop/asyncio_event_loop.py7
-rw-r--r--lib/portage/util/bin_entry_point.py35
-rw-r--r--lib/portage/util/digraph.py3
-rw-r--r--lib/portage/util/futures/_asyncio/__init__.py50
-rw-r--r--lib/portage/util/futures/_asyncio/process.py116
-rw-r--r--lib/portage/util/futures/_asyncio/streams.py13
-rw-r--r--lib/portage/util/futures/_asyncio/tasks.py96
-rw-r--r--lib/portage/util/futures/events.py186
-rw-r--r--lib/portage/util/futures/futures.py156
-rw-r--r--lib/portage/util/futures/transports.py87
-rw-r--r--lib/portage/util/futures/unix_events.py626
-rw-r--r--lib/portage/util/path.py4
-rw-r--r--lib/portage/util/shelve.py58
-rw-r--r--lib/portage/versions.py10
-rw-r--r--man/emaint.16
-rw-r--r--man/emerge.120
-rw-r--r--man/emirrordist.16
-rw-r--r--man/make.conf.517
-rw-r--r--man/portage.56
-rw-r--r--pyproject.toml6
-rw-r--r--repoman/RELEASE-NOTES6
-rwxr-xr-xrepoman/bin/repoman4
-rw-r--r--repoman/cnf/linechecks/linechecks.yaml2
-rw-r--r--repoman/cnf/qa_data/qa_data.yaml1
-rw-r--r--repoman/cnf/repository/qa_data.yaml1
-rw-r--r--repoman/cnf/repository/repository.yaml1
-rw-r--r--repoman/lib/repoman/actions.py1
-rw-r--r--repoman/lib/repoman/argparser.py5
-rwxr-xr-xrepoman/lib/repoman/main.py43
-rw-r--r--repoman/lib/repoman/modules/linechecks/deprecated/deprecated.py2
-rw-r--r--repoman/lib/repoman/modules/linechecks/phases/__init__.py6
-rw-r--r--repoman/lib/repoman/modules/linechecks/phases/phase.py132
-rw-r--r--repoman/lib/repoman/modules/scan/module.py4
-rw-r--r--repoman/lib/repoman/repos.py8
-rw-r--r--repoman/lib/repoman/tests/simple/test_simple.py207
-rw-r--r--repoman/man/repoman.15
-rwxr-xr-xrepoman/setup.py2
-rwxr-xr-xsetup.py107
-rw-r--r--tox.ini2
107 files changed, 2145 insertions, 3194 deletions
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index d4b960dc3..9188c9c28 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -18,7 +18,7 @@ jobs:
- '3.8'
- '3.9'
- '3.10.0-alpha.3'
- - 'pypy-3.6'
+ - 'pypy-3.7-v7.3.3'
steps:
- uses: actions/checkout@v2
diff --git a/MANIFEST.in b/MANIFEST.in
index c862b044f..4a5248084 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -20,3 +20,6 @@ include .portage_not_installed
# extra scripts
include misc/*
+
+# extensions
+include src/*
diff --git a/NEWS b/NEWS
index 685e42b2c..2812f8a18 100644
--- a/NEWS
+++ b/NEWS
@@ -1,5 +1,17 @@
News (mainly features/major bug fixes)
+portage-3.0.18
+--------------
+* profile-repo-deps is supported in layout.conf profile-formats.
+
+portage-3.0.16
+--------------
+* emirrordist supports the content-hash distfiles mirror layout.
+
+portage-3.0.15
+--------------
+* FEATURES=binpkg-multi-instance is now enabled by default.
+
portage-3.0.13
--------------
* FETCHCOMMAND now supports a \${DIGESTS} placeholder which expands
diff --git a/RELEASE-NOTES b/RELEASE-NOTES
index 430b24a64..8c74f266c 100644
--- a/RELEASE-NOTES
+++ b/RELEASE-NOTES
@@ -1,6 +1,39 @@
Release Notes; upgrade information mainly.
Features/major bugfixes are listed in NEWS
+portage-3.0.18
+==================================
+* Bug Fixes:
+ - Bug 651208 profile-repo-deps profile-formats support
+ - Bug 719260 remove glsa --quiet blank line
+ - Bug 722748 suppress emerge --oneshot portage suggestion
+
+portage-3.0.17
+==================================
+* Bug Fixes:
+ - Bug 773469 emerge --binpkg-respect-use=y implies --autounmask-use=n
+
+portage-3.0.16
+==================================
+* Bug Fixes:
+ - Bug 721680 Add emirrordist shelve dump/restore
+ - Bug 749333 PORTAGE_REPOSITORIES: don't override repos with aliases
+ - Bug 756778 emirrordist --content-db for content-hash layout support
+ - Bug 772785 don't enable binpkg-multi-instance for existing installs
+ - Bug 772806 prevent infinite loop after empty os.path.dirname result
+
+portage-3.0.15
+==================================
+* Bug Fixes:
+ - Bug 715112 default enable FEATURES=binpkg-multi-instance
+ - Bug 756778 content-hash distfiles layout
+ - Bug 766459 emirrordist: prevent distfiles_db _pkg_str pickle problems
+ - Bug 766767 emaint --fix merges: add -y, --yes option
+ - Bug 766773 emerge: disable --autounmask-license by default
+ - Bug 767913 portage.getpid: call os.getpid() lazily
+ - Bug 770712 PopenProcess: use call_soon for _async_waipid in _start
+ - Bug 771549 prevent USE="${USE} ..." misbehavior
+
portage-3.0.14
==================================
* Bug Fixes:
diff --git a/bin/chmod-lite b/bin/chmod-lite
index 8c62e2de7..d64cd615b 100755..120000
--- a/bin/chmod-lite
+++ b/bin/chmod-lite
@@ -1,10 +1 @@
-#!@PORTAGE_BASH@
-# Copyright 2015 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-export __PORTAGE_HELPER_CWD=${PWD}
-
-# Use safe cwd, avoiding unsafe import for bug #469338.
-cd "${PORTAGE_PYM_PATH}" || exit 1
-PYTHONPATH=${PORTAGE_PYTHONPATH:-${PORTAGE_PYM_PATH}} \
- exec "${PORTAGE_PYTHON:-@PREFIX_PORTAGE_PYTHON@}" "$PORTAGE_BIN_PATH/chmod-lite.py" "$@"
+ebuild-pyhelper \ No newline at end of file
diff --git a/bin/ebuild-ipc b/bin/ebuild-ipc
index 739564d15..d64cd615b 100755..120000
--- a/bin/ebuild-ipc
+++ b/bin/ebuild-ipc
@@ -1,8 +1 @@
-#!@PORTAGE_BASH@
-# Copyright 2010-2013 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-# Use safe cwd, avoiding unsafe import for bug #469338.
-cd "${PORTAGE_PYM_PATH}" || exit 1
-PYTHONPATH=${PORTAGE_PYTHONPATH:-${PORTAGE_PYM_PATH}} \
- exec "${PORTAGE_PYTHON:-@PREFIX_PORTAGE_PYTHON@}" "$PORTAGE_BIN_PATH/ebuild-ipc.py" "$@"
+ebuild-pyhelper \ No newline at end of file
diff --git a/bin/ebuild-pyhelper b/bin/ebuild-pyhelper
new file mode 100755
index 000000000..d32bc6f6f
--- /dev/null
+++ b/bin/ebuild-pyhelper
@@ -0,0 +1,21 @@
+#!/bin/bash
+# Copyright 2010-2021 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+export __PORTAGE_HELPER_CWD=${PWD}
+
+if [[ ${0##*/} == "ebuild-pyhelper" ]]; then
+ echo "ebuild-pyhelper: must be called via symlink" &>2
+ exit 1
+fi
+
+# Use safe cwd, avoiding unsafe import for bug #469338.
+cd "${PORTAGE_PYM_PATH}" || exit 1
+for path in "${PORTAGE_BIN_PATH}/${0##*/}"{.py,}; do
+ if [[ -x "${path}" ]]; then
+ PYTHONPATH=${PORTAGE_PYTHONPATH:-${PORTAGE_PYM_PATH}} \
+ exec "${PORTAGE_PYTHON:-/usr/bin/python}" "${path}" "$@"
+ fi
+done
+echo "File not found: ${path}" >&2
+exit 1
diff --git a/bin/egencache b/bin/egencache
index e083b78d7..99028203b 100755
--- a/bin/egencache
+++ b/bin/egencache
@@ -2,8 +2,6 @@
# Copyright 2009-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
-# unicode_literals for compat with TextIOWrapper in Python 2
-
import argparse
import platform
import signal
diff --git a/bin/glsa-check b/bin/glsa-check
index 2aada5bee..04312a236 100755
--- a/bin/glsa-check
+++ b/bin/glsa-check
@@ -290,7 +290,8 @@ if mode in ["dump", "fix", "inject", "pretend"]:
elif mode == "inject":
sys.stdout.write("injecting " + myid + "\n")
myglsa.inject()
- sys.stdout.write("\n")
+ if not quiet:
+ sys.stdout.write("\n")
sys.exit(0)
# test is a bit different as Glsa.test() produces no output
diff --git a/bin/shelve-utils b/bin/shelve-utils
new file mode 100755
index 000000000..83daeccbc
--- /dev/null
+++ b/bin/shelve-utils
@@ -0,0 +1,36 @@
+#!/usr/bin/python -b
+# Copyright 2020-2021 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+import argparse
+import sys
+
+from os import path as osp
+if osp.isfile(osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), ".portage_not_installed")):
+ sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "lib"))
+import portage
+portage._internal_caller = True
+from portage.util.shelve import dump, restore
+
+
+def main(argv=None):
+ parser = argparse.ArgumentParser(prog="shelve-utils")
+ subparsers = parser.add_subparsers(help="sub-command help")
+
+ dump_command = subparsers.add_parser("dump", help="dump shelve database")
+ dump_command.add_argument("src", help="input shelve file")
+ dump_command.add_argument("dest", help="output pickle file")
+ dump_command.set_defaults(func=dump)
+
+ restore_command = subparsers.add_parser("restore", help="restore shelve database")
+ restore_command.add_argument("src", help="input pickle file")
+ restore_command.add_argument("dest", help="output shelve file")
+ restore_command.set_defaults(func=restore)
+
+ args = parser.parse_args(args=portage._decode_argv(argv or sys.argv)[1:])
+ args.func(args)
+
+
+if __name__ == "__main__":
+ portage.util.initialize_logger()
+ main(argv=sys.argv)
diff --git a/cnf/make.globals b/cnf/make.globals
index d3ba98513..d2ffa1847 100644
--- a/cnf/make.globals
+++ b/cnf/make.globals
@@ -54,6 +54,7 @@ FETCHCOMMAND_SFTP="bash -c \"x=\\\${2#sftp://} ; host=\\\${x%%/*} ; port=\\\${ho
# Default user options
FEATURES="assume-digests binpkg-docompress binpkg-dostrip binpkg-logs
+ binpkg-multi-instance
config-protect-if-modified distlocks ebuild-locks
fixlafiles ipc-sandbox merge-sync multilib-strict
network-sandbox news parallel-fetch pid-sandbox
diff --git a/lib/_emerge/BlockerCache.py b/lib/_emerge/BlockerCache.py
index 8154d9ade..035f2212d 100644
--- a/lib/_emerge/BlockerCache.py
+++ b/lib/_emerge/BlockerCache.py
@@ -133,9 +133,9 @@ class BlockerCache(portage.cache.mappings.MutableMapping):
if len(self._modified) >= self._cache_threshold and \
secpass >= 2:
try:
- f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
- pickle.dump(self._cache_data, f, protocol=2)
- f.close()
+ with portage.util.atomic_ofstream(self._cache_filename, mode='wb') as f:
+ pickle.dump(self._cache_data, f, protocol=2)
+
portage.util.apply_secpass_permissions(
self._cache_filename, gid=portage.portage_gid, mode=0o644)
except (IOError, OSError):
diff --git a/lib/_emerge/EbuildPhase.py b/lib/_emerge/EbuildPhase.py
index 496db33ec..bf6c2f372 100644
--- a/lib/_emerge/EbuildPhase.py
+++ b/lib/_emerge/EbuildPhase.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2020 Gentoo Authors
+# Copyright 1999-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import functools
@@ -21,7 +21,6 @@ from portage.util._dyn_libs.soname_deps_qa import (
)
from portage.package.ebuild.prepare_build_dirs import (_prepare_workdir,
_prepare_fake_distdir, _prepare_fake_filesdir)
-from portage.util.futures.compat_coroutine import coroutine
from portage.util import writemsg
from portage.util._async.AsyncTaskFuture import AsyncTaskFuture
from portage.util._async.BuildLogger import BuildLogger
@@ -72,11 +71,10 @@ class EbuildPhase(CompositeTask):
_locked_phases = ("setup", "preinst", "postinst", "prerm", "postrm")
def _start(self):
- future = asyncio.ensure_future(self._async_start(loop=self.scheduler), loop=self.scheduler)
+ future = asyncio.ensure_future(self._async_start(), loop=self.scheduler)
self._start_task(AsyncTaskFuture(future=future), self._async_start_exit)
- @coroutine
- def _async_start(self, loop=None):
+ async def _async_start(self):
need_builddir = self.phase not in EbuildProcess._phases_without_builddir
@@ -134,7 +132,7 @@ class EbuildPhase(CompositeTask):
# Force background=True for this header since it's intended
# for the log and it doesn't necessarily need to be visible
# elsewhere.
- yield self._elog('einfo', msg, background=True, loop=self.scheduler)
+ await self._elog('einfo', msg, background=True)
if self.phase == 'package':
if 'PORTAGE_BINPKG_TMPFILE' not in self.settings:
@@ -404,8 +402,7 @@ class EbuildPhase(CompositeTask):
self.returncode = 1
self.wait()
- @coroutine
- def _elog(self, elog_funcname, lines, background=None, loop=None):
+ async def _elog(self, elog_funcname, lines, background=None):
if background is None:
background = self.background
out = io.StringIO()
@@ -436,12 +433,12 @@ class EbuildPhase(CompositeTask):
_set_nonblocking(build_logger.stdin.fileno())
log_file = build_logger.stdin
- yield self.scheduler.async_output(msg, log_file=log_file,
- background=background, loop=self.scheduler)
+ await self.scheduler.async_output(msg, log_file=log_file,
+ background=background)
if build_logger is not None:
build_logger.stdin.close()
- yield build_logger.async_wait()
+ await build_logger.async_wait()
except asyncio.CancelledError:
if build_logger is not None:
build_logger.cancel()
@@ -489,7 +486,7 @@ class _PostPhaseCommands(CompositeTask):
if 'qa-unresolved-soname-deps' in self.settings.features:
# This operates on REQUIRES metadata generated by the above function call.
- future = asyncio.ensure_future(self._soname_deps_qa(loop=self.scheduler), loop=self.scheduler)
+ future = asyncio.ensure_future(self._soname_deps_qa(), loop=self.scheduler)
# If an unexpected exception occurs, then this will raise it.
future.add_done_callback(lambda future: future.cancelled() or future.result())
self._start_task(AsyncTaskFuture(future=future), self._default_final_exit)
@@ -498,12 +495,11 @@ class _PostPhaseCommands(CompositeTask):
else:
self._default_final_exit(task)
- @coroutine
- def _soname_deps_qa(self, loop=None):
+ async def _soname_deps_qa(self):
vardb = QueryCommand.get_db()[self.settings['EROOT']]['vartree'].dbapi
- all_provides = (yield self.scheduler.run_in_executor(ForkExecutor(loop=self.scheduler), _get_all_provides, vardb))
+ all_provides = (await self.scheduler.run_in_executor(ForkExecutor(loop=self.scheduler), _get_all_provides, vardb))
unresolved = _get_unresolved_soname_deps(os.path.join(self.settings['PORTAGE_BUILDDIR'], 'build-info'), all_provides)
@@ -531,4 +527,4 @@ class _PostPhaseCommands(CompositeTask):
qa_msg.extend("\t%s: %s" % (filename, " ".join(sorted(soname_deps)))
for filename, soname_deps in unresolved)
qa_msg.append("")
- yield self.elog("eqawarn", qa_msg, loop=self.scheduler)
+ await self.elog("eqawarn", qa_msg)
diff --git a/lib/_emerge/Package.py b/lib/_emerge/Package.py
index 7ce338a4f..f970d87f5 100644
--- a/lib/_emerge/Package.py
+++ b/lib/_emerge/Package.py
@@ -453,15 +453,6 @@ class Package(Task):
else:
qacat = k + ".syntax"
- # For unicode safety with python-2.x we need to avoid
- # using the string format operator with a non-unicode
- # format string, since that will result in the
- # PortageException.__str__() method being invoked,
- # followed by unsafe decoding that may result in a
- # UnicodeDecodeError. Therefore, use unicode_literals
- # to ensure that format strings are unicode, so that
- # PortageException.__unicode__() is used when necessary
- # in python-2.x.
if not self.installed:
categorized_error = False
if e.errors:
diff --git a/lib/_emerge/Scheduler.py b/lib/_emerge/Scheduler.py
index 465f928a0..0ed2ee530 100644
--- a/lib/_emerge/Scheduler.py
+++ b/lib/_emerge/Scheduler.py
@@ -1188,8 +1188,6 @@ class Scheduler(PollScheduler):
printer.eerror(line)
printer.eerror("")
for failed_pkg in self._failed_pkgs_all:
- # Use unicode_literals to force unicode format string so
- # that Package.__unicode__() is called in python2.
msg = " %s" % (failed_pkg.pkg,)
if failed_pkg.postinst_failure:
msg += " (postinst failed)"
diff --git a/lib/_emerge/UseFlagDisplay.py b/lib/_emerge/UseFlagDisplay.py
index 5e3ba400d..fffc8144a 100644
--- a/lib/_emerge/UseFlagDisplay.py
+++ b/lib/_emerge/UseFlagDisplay.py
@@ -111,8 +111,6 @@ def pkg_use_display(pkg, opts, modified_use=None):
flags.sort(key=UseFlagDisplay.sort_combined)
else:
flags.sort(key=UseFlagDisplay.sort_separated)
- # Use unicode_literals to force unicode format string so
- # that UseFlagDisplay.__unicode__() is called in python2.
flag_displays.append('%s="%s"' % (varname,
' '.join("%s" % (f,) for f in flags)))
diff --git a/lib/_emerge/create_depgraph_params.py b/lib/_emerge/create_depgraph_params.py
index 0d0e07b9c..267600fb6 100644
--- a/lib/_emerge/create_depgraph_params.py
+++ b/lib/_emerge/create_depgraph_params.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2018 Gentoo Foundation
+# Copyright 1999-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import logging
@@ -41,19 +41,29 @@ def create_depgraph_params(myopts, myaction):
# binpkg_changed_deps: reject binary packages with outdated deps
myparams = {"recurse" : True}
+ binpkg_respect_use = myopts.get("--binpkg-respect-use")
+ if binpkg_respect_use is not None:
+ myparams["binpkg_respect_use"] = binpkg_respect_use
+ elif "--usepkgonly" not in myopts:
+ # If --binpkg-respect-use is not explicitly specified, we enable
+ # the behavior automatically (like requested in bug #297549), as
+ # long as it doesn't strongly conflict with other options that
+ # have been specified.
+ myparams["binpkg_respect_use"] = "auto"
+
autounmask_keep_keywords = myopts.get("--autounmask-keep-keywords")
autounmask_keep_masks = myopts.get("--autounmask-keep-masks")
autounmask = myopts.get("--autounmask")
- autounmask_license = myopts.get('--autounmask-license')
- autounmask_use = myopts.get('--autounmask-use')
+ autounmask_license = myopts.get('--autounmask-license', 'y' if autounmask is True else 'n')
+ autounmask_use = 'n' if myparams.get('binpkg_respect_use') == 'y' else myopts.get('--autounmask-use')
if autounmask == 'n':
autounmask = False
else:
if autounmask is None:
if autounmask_use in (None, 'y'):
autounmask = True
- elif autounmask_license in (None, 'y'):
+ if autounmask_license in ('y',):
autounmask = True
# Do not enable package.accept_keywords or package.mask
@@ -67,7 +77,7 @@ def create_depgraph_params(myopts, myaction):
myparams['autounmask'] = autounmask
myparams['autounmask_keep_use'] = True if autounmask_use == 'n' else False
- myparams['autounmask_keep_license'] = True if autounmask_license == 'n' else False
+ myparams['autounmask_keep_license'] = False if autounmask_license == 'y' else True
myparams['autounmask_keep_keywords'] = False if autounmask_keep_keywords in (None, 'n') else True
myparams['autounmask_keep_masks'] = False if autounmask_keep_masks in (None, 'n') else True
@@ -153,16 +163,6 @@ def create_depgraph_params(myopts, myaction):
'--update' in myopts:
myparams['rebuilt_binaries'] = True
- binpkg_respect_use = myopts.get('--binpkg-respect-use')
- if binpkg_respect_use is not None:
- myparams['binpkg_respect_use'] = binpkg_respect_use
- elif '--usepkgonly' not in myopts:
- # If --binpkg-respect-use is not explicitly specified, we enable
- # the behavior automatically (like requested in bug #297549), as
- # long as it doesn't strongly conflict with other options that
- # have been specified.
- myparams['binpkg_respect_use'] = 'auto'
-
binpkg_changed_deps = myopts.get('--binpkg-changed-deps')
if binpkg_changed_deps is not None:
myparams['binpkg_changed_deps'] = binpkg_changed_deps
diff --git a/lib/_emerge/help.py b/lib/_emerge/help.py
index 15e1941ca..d9faf0c4b 100644
--- a/lib/_emerge/help.py
+++ b/lib/_emerge/help.py
@@ -11,7 +11,7 @@ def emerge_help():
print(" "+turquoise("emerge")+" < "+turquoise("--sync")+" | "+turquoise("--metadata")+" | "+turquoise("--info")+" >")
print(" "+turquoise("emerge")+" "+turquoise("--resume")+" [ "+green("--pretend")+" | "+green("--ask")+" | "+green("--skipfirst")+" ]")
print(" "+turquoise("emerge")+" "+turquoise("--help"))
- print(bold("Options:")+" "+green("-")+"["+green("abBcCdDefgGhjkKlnNoOpPqrsStuUvVw")+"]")
+ print(bold("Options:")+" "+green("-")+"["+green("abBcCdDefgGhjkKlnNoOpPqrsStuUvVwW")+"]")
print(" [ " + green("--color")+" < " + turquoise("y") + " | "+ turquoise("n")+" > ] [ "+green("--columns")+" ]")
print(" [ "+green("--complete-graph")+" ] [ "+green("--deep")+" ]")
print(" [ "+green("--jobs") + " " + turquoise("JOBS")+" ] [ "+green("--keep-going")+" ] [ " + green("--load-average")+" " + turquoise("LOAD") + " ]")
diff --git a/lib/_emerge/main.py b/lib/_emerge/main.py
index 0ac25ea36..31e690584 100644
--- a/lib/_emerge/main.py
+++ b/lib/_emerge/main.py
@@ -187,6 +187,7 @@ def insert_optional_args(args):
'q' : y_or_n,
'v' : y_or_n,
'w' : y_or_n,
+ 'W' : y_or_n,
}
arg_stack = args[:]
@@ -470,6 +471,7 @@ def parse_opts(tmpcmdline, silent=False):
},
"--deselect": {
+ "shortopt" : "-W",
"help" : "remove atoms/sets from the world file",
"choices" : true_y_or_n
},
diff --git a/lib/_emerge/resolver/output.py b/lib/_emerge/resolver/output.py
index 0c90abefb..dea8a4be8 100644
--- a/lib/_emerge/resolver/output.py
+++ b/lib/_emerge/resolver/output.py
@@ -554,8 +554,6 @@ class Display:
"""
writemsg_stdout('\n%s\n' % (self.counters,), noiselevel=-1)
if show_repos:
- # Use unicode_literals to force unicode format string so
- # that RepoDisplay.__unicode__() is called in python2.
writemsg_stdout("%s" % (self.conf.repo_display,),
noiselevel=-1)
diff --git a/lib/portage/__init__.py b/lib/portage/__init__.py
index 0b369a89b..296e93471 100644
--- a/lib/portage/__init__.py
+++ b/lib/portage/__init__.py
@@ -388,7 +388,7 @@ _sync_mode = False
class _ForkWatcher:
@staticmethod
def hook(_ForkWatcher):
- _ForkWatcher.current_pid = _os.getpid()
+ _ForkWatcher.current_pid = None
# Force instantiation of a new event loop policy as a workaround
# for https://bugs.python.org/issue22087.
asyncio.set_event_loop_policy(None)
@@ -401,6 +401,8 @@ def getpid():
"""
Cached version of os.getpid(). ForkProcess updates the cache.
"""
+ if _ForkWatcher.current_pid is None:
+ _ForkWatcher.current_pid = _os.getpid()
return _ForkWatcher.current_pid
def _get_stdin():
@@ -476,16 +478,16 @@ def abssymlink(symlink, target=None):
_doebuild_manifest_exempt_depend = 0
_testing_eapis = frozenset([
- "4-python",
- "5-progress",
])
_deprecated_eapis = frozenset([
+ "3_pre1",
+ "3_pre2",
"4_pre1",
+ "4-python",
"4-slot-abi",
- "3_pre2",
- "3_pre1",
"5_pre1",
"5_pre2",
+ "5-progress",
"6_pre1",
"7_pre1",
])
@@ -495,11 +497,7 @@ def _eapi_is_deprecated(eapi):
return eapi in _deprecated_eapis
def eapi_is_supported(eapi):
- if not isinstance(eapi, str):
- # Only call str() when necessary since with python2 it
- # can trigger UnicodeEncodeError if EAPI is corrupt.
- eapi = str(eapi)
- eapi = eapi.strip()
+ eapi = str(eapi).strip()
return eapi in _supported_eapis
diff --git a/lib/portage/_compat_upgrade/binpkg_multi_instance.py b/lib/portage/_compat_upgrade/binpkg_multi_instance.py
new file mode 100644
index 000000000..b4aabe8b2
--- /dev/null
+++ b/lib/portage/_compat_upgrade/binpkg_multi_instance.py
@@ -0,0 +1,33 @@
+# Copyright 2021 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.const import GLOBAL_CONFIG_PATH
+
+COMPAT_FEATURES = 'FEATURES="${FEATURES} -binpkg-multi-instance"'
+
+
+def main():
+ """
+ If the current installation is still has binpkg-multi-instance
+ disabled, then patch make.globals inside ${ED} to maintain backward
+ compatibility. This is intended to be called from the ebuild as
+ follows:
+
+ pkg_preinst() {
+ python_setup
+ env -u FEATURES -u PORTAGE_REPOSITORIES \
+ PYTHONPATH="${D}$(python_get_sitedir)${PYTHONPATH:+:${PYTHONPATH}}" \
+ "${PYTHON}" -m portage._compat_upgrade.binpkg_multi_instance || die
+ }
+ """
+ if 'binpkg-multi-instance' not in portage.settings.features:
+ portage.output.EOutput().einfo('Setting make.globals default {} for backward compatibility'.format(COMPAT_FEATURES))
+ config_path = os.path.join(os.environ['ED'], GLOBAL_CONFIG_PATH.lstrip(os.sep), 'make.globals')
+ with open(config_path, 'at') as f:
+ f.write("{}\n".format(COMPAT_FEATURES))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/portage/_emirrordist/Config.py b/lib/portage/_emirrordist/Config.py
index 4bee4f45e..a4b75809f 100644
--- a/lib/portage/_emirrordist/Config.py
+++ b/lib/portage/_emirrordist/Config.py
@@ -1,4 +1,4 @@
-# Copyright 2013-2020 Gentoo Authors
+# Copyright 2013-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import copy
@@ -10,6 +10,7 @@ import time
from portage import os
from portage.package.ebuild.fetch import MirrorLayoutConfig
from portage.util import grabdict, grablines
+from .ContentDB import ContentDB
class Config:
def __init__(self, options, portdb, event_loop):
@@ -25,24 +26,24 @@ class Config:
self.start_time = time.time()
self._open_files = []
- self.log_success = self._open_log('success', options.success_log, 'a')
- self.log_failure = self._open_log('failure', options.failure_log, 'a')
+ self.log_success = self._open_log('success', getattr(options, 'success_log', None), 'a')
+ self.log_failure = self._open_log('failure', getattr(options, 'failure_log', None), 'a')
self.distfiles = None
- if options.distfiles is not None:
+ if getattr(options, 'distfiles', None) is not None:
self.distfiles = options.distfiles
self.mirrors = copy.copy(portdb.settings.thirdpartymirrors())
- if options.mirror_overrides is not None:
+ if getattr(options, 'mirror_overrides', None) is not None:
self.mirrors.update(grabdict(options.mirror_overrides))
- if options.mirror_skip is not None:
+ if getattr(options, 'mirror_skip', None) is not None:
for x in options.mirror_skip.split(","):
self.mirrors[x] = []
self.whitelist = None
- if options.whitelist_from is not None:
+ if getattr(options, 'whitelist_from', None) is not None:
self.whitelist = set()
for filename in options.whitelist_from:
for line in grablines(filename):
@@ -51,27 +52,32 @@ class Config:
self.whitelist.add(line)
self.restrict_mirror_exemptions = None
- if options.restrict_mirror_exemptions is not None:
+ if getattr(options, 'restrict_mirror_exemptions', None) is not None:
self.restrict_mirror_exemptions = frozenset(
options.restrict_mirror_exemptions.split(","))
self.recycle_db = None
- if options.recycle_db is not None:
+ if getattr(options, 'recycle_db', None) is not None:
self.recycle_db = self._open_shelve(
options.recycle_db, 'recycle')
self.distfiles_db = None
- if options.distfiles_db is not None:
+ if getattr(options, 'distfiles_db', None) is not None:
self.distfiles_db = self._open_shelve(
options.distfiles_db, 'distfiles')
+ self.content_db = None
+ if getattr(options, 'content_db', None) is not None:
+ self.content_db = ContentDB(self._open_shelve(
+ options.content_db, 'content'))
+
self.deletion_db = None
- if options.deletion_db is not None:
+ if getattr(options, 'deletion_db', None) is not None:
self.deletion_db = self._open_shelve(
options.deletion_db, 'deletion')
self.layout_conf = MirrorLayoutConfig()
- if options.layout_conf is None:
+ if getattr(options, 'layout_conf', None) is None:
options.layout_conf = os.path.join(self.distfiles,
'layout.conf')
self.layout_conf.read_from_file(options.layout_conf)
@@ -79,7 +85,7 @@ class Config:
def _open_log(self, log_desc, log_path, mode):
- if log_path is None or self.options.dry_run:
+ if log_path is None or getattr(self.options, 'dry_run', False):
log_func = logging.info
line_format = "%s: %%s" % log_desc
add_newline = False
@@ -106,12 +112,13 @@ class Config:
self._log_func(self._line_format % (msg,))
def _open_shelve(self, db_file, db_desc):
- if self.options.dry_run:
+ dry_run = getattr(self.options, 'dry_run', False)
+ if dry_run:
open_flag = "r"
else:
open_flag = "c"
- if self.options.dry_run and not os.path.exists(db_file):
+ if dry_run and not os.path.exists(db_file):
db = {}
else:
try:
@@ -123,7 +130,7 @@ class Config:
from bsddb3 import dbshelve
db = dbshelve.open(db_file, flags=open_flag)
- if self.options.dry_run:
+ if dry_run:
logging.warning("dry-run: %s db opened in readonly mode" % db_desc)
if not isinstance(db, dict):
volatile_db = dict((k, db[k]) for k in db)
diff --git a/lib/portage/_emirrordist/ContentDB.py b/lib/portage/_emirrordist/ContentDB.py
new file mode 100644
index 000000000..d9ce3cc45
--- /dev/null
+++ b/lib/portage/_emirrordist/ContentDB.py
@@ -0,0 +1,196 @@
+# Copyright 2021 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+import operator
+import shelve
+import typing
+
+from portage.package.ebuild.fetch import DistfileName
+
+
+class ContentDB:
+ """
+ The content db serves to translate content digests to distfiles
+ names, and distfiles names to content digests. All keys have one or
+ more prefixes separated by colons. For a digest key, the first
+ prefix is "digest" and the second prefix is the hash algorithm name.
+ For a filename key, the prefix is "filename".
+
+ The value associated with a digest key is a set of file names. The
+ value associated with a distfile key is a set of content revisions.
+ Each content revision is expressed as a dictionary of digests which
+ is suitable for construction of a DistfileName instance.
+ """
+
+ def __init__(self, shelve_instance: shelve.Shelf):
+ self._shelve = shelve_instance
+
+ def add(self, filename: DistfileName):
+ """
+ Add file name and digests, creating a new content revision, or
+ incrementing the reference count to an identical content revision
+ if one exists. If the file name had previous content revisions,
+ then they continue to exist independently of the new one.
+
+ @param filename: file name with digests attribute
+ """
+ distfile_str = str(filename)
+ distfile_key = "filename:{}".format(distfile_str)
+ for k, v in filename.digests.items():
+ if k != "size":
+ digest_key = "digest:{}:{}".format(k.upper(), v.lower())
+ try:
+ digest_files = self._shelve[digest_key]
+ except KeyError:
+ digest_files = set()
+ digest_files.add(distfile_str)
+ self._shelve[digest_key] = digest_files
+ try:
+ content_revisions = self._shelve[distfile_key]
+ except KeyError:
+ content_revisions = set()
+
+ revision_key = tuple(
+ sorted(
+ (
+ (algo.upper(), filename.digests[algo.upper()].lower())
+ for algo in filename.digests
+ if algo != "size"
+ ),
+ key=operator.itemgetter(0),
+ )
+ )
+ content_revisions.add(revision_key)
+ self._shelve[distfile_key] = content_revisions
+
+ def remove(self, filename: DistfileName):
+ """
+ Remove a file name and digests from the database. If identical
+ content is still referenced by one or more other file names,
+ then those references are preserved (like removing one of many
+ hardlinks). Also, this file name may reference other content
+ revisions with different digests, and those content revisions
+ will remain as well.
+
+ @param filename: file name with digests attribute
+ """
+ distfile_key = "filename:{}".format(filename)
+ try:
+ content_revisions = self._shelve[distfile_key]
+ except KeyError:
+ pass
+ else:
+ remaining = set()
+ for revision_key in content_revisions:
+ if not any(digest_item in revision_key for digest_item in filename.digests.items()):
+ remaining.add(revision_key)
+ continue
+ for k, v in revision_key:
+ digest_key = "digest:{}:{}".format(k, v)
+ try:
+ digest_files = self._shelve[digest_key]
+ except KeyError:
+ digest_files = set()
+
+ try:
+ digest_files.remove(filename)
+ except KeyError:
+ pass
+
+ if digest_files:
+ self._shelve[digest_key] = digest_files
+ else:
+ try:
+ del self._shelve[digest_key]
+ except KeyError:
+ pass
+
+ if remaining:
+ logging.debug(("drop '%s' revision(s) from content db") % filename)
+ self._shelve[distfile_key] = remaining
+ else:
+ logging.debug(("drop '%s' from content db") % filename)
+ try:
+ del self._shelve[distfile_key]
+ except KeyError:
+ pass
+
+ def get_filenames_translate(
+ self, filename: typing.Union[str, DistfileName]
+ ) -> typing.Generator[DistfileName, None, None]:
+ """
+ Translate distfiles content digests to zero or more distfile names.
+ If filename is already a distfile name, then it will pass
+ through unchanged.
+
+ A given content digest will translate to multiple distfile names if
+ multiple associations have been created via the add method. The
+ relationship between a content digest and a distfile name is similar
+ to the relationship between an inode and a hardlink.
+
+ @param filename: A filename listed by layout get_filenames
+ """
+ if not isinstance(filename, DistfileName):
+ filename = DistfileName(filename)
+
+ # Match content digests with zero or more content revisions.
+ matched_revisions = {}
+
+ for k, v in filename.digests.items():
+ digest_item = (k.upper(), v.lower())
+ digest_key = "digest:{}:{}".format(*digest_item)
+ try:
+ digest_files = self._shelve[digest_key]
+ except KeyError:
+ continue
+
+ for distfile_str in digest_files:
+ matched_revisions.setdefault(distfile_str, set())
+ try:
+ content_revisions = self._shelve["filename:{}".format(distfile_str)]
+ except KeyError:
+ pass
+ else:
+ for revision_key in content_revisions:
+ if (
+ digest_item in revision_key
+ and revision_key not in matched_revisions[distfile_str]
+ ):
+ matched_revisions[distfile_str].add(revision_key)
+ yield DistfileName(distfile_str, digests=dict(revision_key))
+
+ if not any(matched_revisions.values()):
+ # Since filename matched zero content revisions, allow
+ # it to pass through unchanged (on the path toward deletion).
+ yield filename
+
+ def __len__(self):
+ return len(self._shelve)
+
+ def __contains__(self, k):
+ return k in self._shelve
+
+ def __iter__(self):
+ return self._shelve.__iter__()
+
+ def items(self):
+ return self._shelve.items()
+
+ def __setitem__(self, k, v):
+ self._shelve[k] = v
+
+ def __getitem__(self, k):
+ return self._shelve[k]
+
+ def __delitem__(self, k):
+ del self._shelve[k]
+
+ def get(self, k, *args):
+ return self._shelve.get(k, *args)
+
+ def close(self):
+ self._shelve.close()
+
+ def clear(self):
+ self._shelve.clear()
diff --git a/lib/portage/_emirrordist/DeletionIterator.py b/lib/portage/_emirrordist/DeletionIterator.py
index 08985ed6c..ab4309f9a 100644
--- a/lib/portage/_emirrordist/DeletionIterator.py
+++ b/lib/portage/_emirrordist/DeletionIterator.py
@@ -1,10 +1,12 @@
-# Copyright 2013-2019 Gentoo Authors
+# Copyright 2013-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
+import itertools
import logging
import stat
from portage import os
+from portage.package.ebuild.fetch import DistfileName
from .DeletionTask import DeletionTask
class DeletionIterator:
@@ -21,8 +23,25 @@ class DeletionIterator:
deletion_delay = self._config.options.deletion_delay
start_time = self._config.start_time
distfiles_set = set()
- for layout in self._config.layouts:
- distfiles_set.update(layout.get_filenames(distdir))
+ distfiles_set.update(
+ (
+ filename
+ if isinstance(filename, DistfileName)
+ else DistfileName(filename)
+ for filename in itertools.chain.from_iterable(
+ layout.get_filenames(distdir) for layout in self._config.layouts
+ )
+ )
+ if self._config.content_db is None
+ else itertools.chain.from_iterable(
+ (
+ self._config.content_db.get_filenames_translate(filename)
+ for filename in itertools.chain.from_iterable(
+ layout.get_filenames(distdir) for layout in self._config.layouts
+ )
+ )
+ )
+ )
for filename in distfiles_set:
# require at least one successful stat()
exceptions = []
diff --git a/lib/portage/_emirrordist/DeletionTask.py b/lib/portage/_emirrordist/DeletionTask.py
index 5eb01d840..73493c5a1 100644
--- a/lib/portage/_emirrordist/DeletionTask.py
+++ b/lib/portage/_emirrordist/DeletionTask.py
@@ -5,6 +5,7 @@ import errno
import logging
from portage import os
+from portage.package.ebuild.fetch import ContentHashLayout
from portage.util._async.FileCopier import FileCopier
from _emerge.CompositeTask import CompositeTask
@@ -99,6 +100,10 @@ class DeletionTask(CompositeTask):
def _delete_links(self):
success = True
for layout in self.config.layouts:
+ if isinstance(layout, ContentHashLayout) and not self.distfile.digests:
+ logging.debug(("_delete_links: '%s' has "
+ "no digests") % self.distfile)
+ continue
distfile_path = os.path.join(
self.config.options.distfiles,
layout.get_path(self.distfile))
@@ -134,6 +139,9 @@ class DeletionTask(CompositeTask):
logging.debug(("drop '%s' from "
"distfiles db") % self.distfile)
+ if self.config.content_db is not None:
+ self.config.content_db.remove(self.distfile)
+
if self.config.deletion_db is not None:
try:
del self.config.deletion_db[self.distfile]
diff --git a/lib/portage/_emirrordist/FetchIterator.py b/lib/portage/_emirrordist/FetchIterator.py
index fe521c346..8ca6a2eac 100644
--- a/lib/portage/_emirrordist/FetchIterator.py
+++ b/lib/portage/_emirrordist/FetchIterator.py
@@ -8,6 +8,7 @@ from portage.checksum import (_apply_hash_filter,
_filter_unaccelarated_hashes, _hash_filter)
from portage.dep import use_reduce
from portage.exception import PortageException, PortageKeyError
+from portage.package.ebuild.fetch import DistfileName
from portage.util._async.AsyncTaskFuture import AsyncTaskFuture
from portage.util._async.TaskScheduler import TaskScheduler
from portage.util.futures.iter_completed import iter_gather
@@ -259,7 +260,7 @@ def _async_fetch_tasks(config, hash_filter, repo_config, digests_future, cpv,
cpv=cpv,
background=True,
digests=file_digests,
- distfile=filename,
+ distfile=DistfileName(filename, digests=file_digests),
restrict=restrict,
uri_tuple=uri_tuple,
config=config))
diff --git a/lib/portage/_emirrordist/FetchTask.py b/lib/portage/_emirrordist/FetchTask.py
index 997762082..5a48f91cd 100644
--- a/lib/portage/_emirrordist/FetchTask.py
+++ b/lib/portage/_emirrordist/FetchTask.py
@@ -1,4 +1,4 @@
-# Copyright 2013-2020 Gentoo Authors
+# Copyright 2013-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import collections
@@ -47,6 +47,9 @@ class FetchTask(CompositeTask):
# Convert _pkg_str to str in order to prevent pickle problems.
self.config.distfiles_db[self.distfile] = str(self.cpv)
+ if self.config.content_db is not None:
+ self.config.content_db.add(self.distfile)
+
if not self._have_needed_digests():
msg = "incomplete digests: %s" % " ".join(self.digests)
self.scheduler.output(msg, background=self.background,
diff --git a/lib/portage/_emirrordist/main.py b/lib/portage/_emirrordist/main.py
index 8d00a05f5..2200ec715 100644
--- a/lib/portage/_emirrordist/main.py
+++ b/lib/portage/_emirrordist/main.py
@@ -1,4 +1,4 @@
-# Copyright 2013-2020 Gentoo Authors
+# Copyright 2013-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import argparse
@@ -7,6 +7,7 @@ import sys
import portage
from portage import os
+from portage.package.ebuild.fetch import ContentHashLayout
from portage.util import normalize_path, _recursive_file_list
from portage.util._async.run_main_scheduler import run_main_scheduler
from portage.util._async.SchedulerInterface import SchedulerInterface
@@ -152,6 +153,12 @@ common_options = (
"metavar" : "FILE"
},
{
+ "longopt" : "--content-db",
+ "help" : "database file used to map content digests to"
+ "distfiles names (required for content-hash layout)",
+ "metavar" : "FILE"
+ },
+ {
"longopt" : "--recycle-dir",
"help" : "directory for extended retention of files that "
"are removed from distdir with the --delete option",
@@ -441,6 +448,12 @@ def emirrordist_main(args):
if not options.mirror:
parser.error('No action specified')
+ if options.delete and config.content_db is None:
+ for layout in config.layouts:
+ if isinstance(layout, ContentHashLayout):
+ parser.error("content-hash layout requires "
+ "--content-db to be specified")
+
returncode = os.EX_OK
if options.mirror:
diff --git a/lib/portage/_sets/ProfilePackageSet.py b/lib/portage/_sets/ProfilePackageSet.py
index fec937391..7a304c578 100644
--- a/lib/portage/_sets/ProfilePackageSet.py
+++ b/lib/portage/_sets/ProfilePackageSet.py
@@ -1,7 +1,8 @@
-# Copyright 2014 Gentoo Foundation
+# Copyright 2014-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from portage import os
+from portage.repository.config import allow_profile_repo_deps
from portage.util import grabfile_package, stack_lists
from portage._sets.base import PackageSet
@@ -9,7 +10,9 @@ class ProfilePackageSet(PackageSet):
_operations = ["merge"]
def __init__(self, profiles, debug=False):
- super(ProfilePackageSet, self).__init__()
+ super(ProfilePackageSet, self).__init__(
+ allow_repo=any(allow_profile_repo_deps(y) for y in profiles)
+ )
self._profiles = profiles
if profiles:
desc_profile = profiles[-1]
@@ -24,7 +27,7 @@ class ProfilePackageSet(PackageSet):
self._setAtoms(x for x in stack_lists(
[grabfile_package(os.path.join(y.location, "packages"),
verify_eapi=True, eapi=y.eapi, eapi_default=None,
- allow_build_id=y.allow_build_id)
+ allow_build_id=y.allow_build_id, allow_repo=allow_profile_repo_deps(y))
for y in self._profiles
if "profile-set" in y.profile_formats],
incremental=1) if x[:1] != "*")
diff --git a/lib/portage/_sets/profiles.py b/lib/portage/_sets/profiles.py
index bccc02e7c..95831f705 100644
--- a/lib/portage/_sets/profiles.py
+++ b/lib/portage/_sets/profiles.py
@@ -1,9 +1,10 @@
-# Copyright 2007-2014 Gentoo Foundation
+# Copyright 2007-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import logging
from portage import os
+from portage.repository.config import allow_profile_repo_deps
from portage.util import grabfile_package, stack_lists
from portage._sets.base import PackageSet
from portage._sets import get_boolean
@@ -15,7 +16,9 @@ class PackagesSystemSet(PackageSet):
_operations = ["merge"]
def __init__(self, profiles, debug=False):
- super(PackagesSystemSet, self).__init__()
+ super(PackagesSystemSet, self).__init__(
+ allow_repo=any(allow_profile_repo_deps(x) for x in profiles)
+ )
self._profiles = profiles
self._debug = debug
if profiles:
@@ -35,7 +38,8 @@ class PackagesSystemSet(PackageSet):
mylist = [grabfile_package(os.path.join(x.location, "packages"),
verify_eapi=True, eapi=x.eapi, eapi_default=None,
- allow_build_id=x.allow_build_id)
+ allow_build_id=x.allow_build_id,
+ allow_repo=allow_profile_repo_deps(x))
for x in self._profiles]
if debug:
diff --git a/lib/portage/cache/flat_hash.py b/lib/portage/cache/flat_hash.py
index 7d48bae81..25930f0a4 100644
--- a/lib/portage/cache/flat_hash.py
+++ b/lib/portage/cache/flat_hash.py
@@ -73,9 +73,6 @@ class database(fs_template.FsBased):
v = values.get(k)
if not v:
continue
- # NOTE: This format string requires unicode_literals, so that
- # k and v are coerced to unicode, in order to prevent TypeError
- # when writing raw bytes to TextIOWrapper with Python 2.
myf.write("%s=%s\n" % (k, v))
self._ensure_access(fp)
diff --git a/lib/portage/dbapi/_VdbMetadataDelta.py b/lib/portage/dbapi/_VdbMetadataDelta.py
index ffdc0b361..568e1964a 100644
--- a/lib/portage/dbapi/_VdbMetadataDelta.py
+++ b/lib/portage/dbapi/_VdbMetadataDelta.py
@@ -18,13 +18,12 @@ class VdbMetadataDelta:
self._vardb = vardb
def initialize(self, timestamp):
- f = atomic_ofstream(self._vardb._cache_delta_filename, 'w',
- encoding=_encodings['repo.content'], errors='strict')
- json.dump({
- "version": self._format_version,
- "timestamp": timestamp
+ with atomic_ofstream(self._vardb._cache_delta_filename, 'w',
+ encoding=_encodings['repo.content'], errors='strict') as f:
+ json.dump({
+ "version": self._format_version,
+ "timestamp": timestamp
}, f, ensure_ascii=False)
- f.close()
def load(self):
diff --git a/lib/portage/dbapi/bintree.py b/lib/portage/dbapi/bintree.py
index 592f46f0a..6839a7dd9 100644
--- a/lib/portage/dbapi/bintree.py
+++ b/lib/portage/dbapi/bintree.py
@@ -1256,6 +1256,7 @@ class binarytree:
# process) and then updated it, all while holding a lock.
pkgindex_lock = None
try:
+ os.makedirs(self.pkgdir, exist_ok=True)
pkgindex_lock = lockfile(self._pkgindex_file,
wantnewlockfile=1)
if filename is not None:
diff --git a/lib/portage/dbapi/vartree.py b/lib/portage/dbapi/vartree.py
index 12e6b0d53..b1b9ce0fc 100644
--- a/lib/portage/dbapi/vartree.py
+++ b/lib/portage/dbapi/vartree.py
@@ -628,9 +628,9 @@ class vardbapi(dbapi):
timestamp = time.time()
self._aux_cache["timestamp"] = timestamp
- f = atomic_ofstream(self._aux_cache_filename, 'wb')
- pickle.dump(self._aux_cache, f, protocol=2)
- f.close()
+ with atomic_ofstream(self._aux_cache_filename, 'wb') as f:
+ pickle.dump(self._aux_cache, f, protocol=2)
+
apply_secpass_permissions(
self._aux_cache_filename, mode=0o644)
@@ -5067,7 +5067,14 @@ class dblink:
% (relative_path, myabsto)])
showMessage("%s %s -> %s\n" % (zing, mydest, myto))
- outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime // 1000000000)+"\n")
+ outfile.write(
+ self._format_contents_line(
+ node_type="sym",
+ abs_path=myrealdest,
+ symlink_target=myto,
+ mtime_ns=mymtime,
+ )
+ )
else:
showMessage(_("!!! Failed to move file.\n"),
level=logging.ERROR, noiselevel=-1)
@@ -5168,7 +5175,9 @@ class dblink:
except OSError:
pass
- outfile.write("dir "+myrealdest+"\n")
+ outfile.write(
+ self._format_contents_line(node_type="dir", abs_path=myrealdest)
+ )
# recurse and merge this directory
mergelist.extend(join(relative_path, child) for child in
os.listdir(join(srcroot, relative_path)))
@@ -5216,7 +5225,14 @@ class dblink:
pass
if mymtime != None:
- outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime // 1000000000)+"\n")
+ outfile.write(
+ self._format_contents_line(
+ node_type="obj",
+ abs_path=myrealdest,
+ md5_digest=mymd5,
+ mtime_ns=mymtime,
+ )
+ )
showMessage("%s %s\n" % (zing,mydest))
else:
# we are merging a fifo or device node
@@ -5236,9 +5252,13 @@ class dblink:
else:
return 1
if stat.S_ISFIFO(mymode):
- outfile.write("fif %s\n" % myrealdest)
+ outfile.write(
+ self._format_contents_line(node_type="fif", abs_path=myrealdest)
+ )
else:
- outfile.write("dev %s\n" % myrealdest)
+ outfile.write(
+ self._format_contents_line(node_type="dev", abs_path=myrealdest)
+ )
showMessage(zing + " " + mydest + "\n")
def _protect(self, cfgfiledict, protect_if_modified, src_md5,
@@ -5300,6 +5320,18 @@ class dblink:
return dest, protected, move_me
+ def _format_contents_line(
+ self, node_type, abs_path, md5_digest=None, symlink_target=None, mtime_ns=None
+ ):
+ fields = [node_type, abs_path]
+ if md5_digest is not None:
+ fields.append(md5_digest)
+ elif symlink_target is not None:
+ fields.append("-> {}".format(symlink_target))
+ if mtime_ns is not None:
+ fields.append(str(mtime_ns // 1000000000))
+ return "{}\n".format(" ".join(fields))
+
def _merged_path(self, path, lstatobj, exists=True):
previous_path = self._device_path_map.get(lstatobj.st_dev)
if previous_path is None or previous_path is False or \
diff --git a/lib/portage/dep/__init__.py b/lib/portage/dep/__init__.py
index 3f0ef7ece..e32f01fc0 100644
--- a/lib/portage/dep/__init__.py
+++ b/lib/portage/dep/__init__.py
@@ -1,5 +1,5 @@
# deps.py -- Portage dependency resolution functions
-# Copyright 2003-2020 Gentoo Authors
+# Copyright 2003-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
__all__ = [
@@ -1259,8 +1259,9 @@ class Atom(str):
self.__dict__['eapi'] = eapi
if eapi is not None:
- # Ignore allow_repo when eapi is specified.
- allow_repo = eapi_attrs.repo_deps
+ # If allow_repo is not set, use default from eapi
+ if allow_repo is None:
+ allow_repo = eapi_attrs.repo_deps
else:
# These parameters have "smart" defaults that are only
# applied when the caller does not explicitly pass in a
diff --git a/lib/portage/eapi.py b/lib/portage/eapi.py
index aca571ebd..796184644 100644
--- a/lib/portage/eapi.py
+++ b/lib/portage/eapi.py
@@ -1,7 +1,9 @@
-# Copyright 2010-2018 Gentoo Foundation
+# Copyright 2010-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import collections
+import operator
+import types
from portage import eapi_is_supported
@@ -127,15 +129,63 @@ def eapi_has_sysroot(eapi):
_eapi_attrs = collections.namedtuple('_eapi_attrs',
'allows_package_provided '
- 'bdepend broot dots_in_PN dots_in_use_flags exports_EBUILD_PHASE_FUNC '
- 'exports_PORTDIR exports_ECLASSDIR '
+ 'bdepend '
+ 'broot '
+ 'dots_in_PN dots_in_use_flags '
+ 'exports_AA '
+ 'exports_EBUILD_PHASE_FUNC '
+ 'exports_ECLASSDIR '
+ 'exports_KV '
+ 'exports_merge_type '
+ 'exports_PORTDIR '
+ 'exports_replace_vars '
'feature_flag_test '
'iuse_defaults iuse_effective posixish_locale '
'path_variables_end_with_trailing_slash '
+ 'prefix '
'repo_deps required_use required_use_at_most_one_of slot_operator slot_deps '
'src_uri_arrows strong_blocks use_deps use_dep_defaults '
'empty_groups_always_true sysroot')
+
+_eapi_attr_func_prefixes = (
+ 'eapi_allows_',
+ 'eapi_has_',
+ 'eapi_requires_',
+ 'eapi_supports_',
+ 'eapi_',
+)
+
+
+def _eapi_func_decorator(func, attr_getter):
+ def wrapper(eapi):
+ return attr_getter(_get_eapi_attrs(eapi))
+ wrapper.func = func
+ wrapper.__doc__ = func.__doc__
+ return wrapper
+
+
+def _decorate_eapi_funcs():
+ """
+ Decorate eapi_* functions so that they use _get_eapi_attrs(eapi)
+ to cache results.
+ """
+ decorated = {}
+ for k, v in globals().items():
+ if not (isinstance(v, types.FunctionType) and k.startswith(_eapi_attr_func_prefixes)):
+ continue
+ for prefix in _eapi_attr_func_prefixes:
+ if k.startswith(prefix):
+ attr_name = k[len(prefix):]
+ if hasattr(_eapi_attrs, attr_name):
+ decorated[k] = _eapi_func_decorator(v, operator.attrgetter(attr_name))
+ break
+ globals().update(decorated)
+
+
+_decorate_eapi_funcs()
+
+
_eapi_attrs_cache = {}
def _get_eapi_attrs(eapi):
@@ -155,31 +205,36 @@ def _get_eapi_attrs(eapi):
eapi = None
eapi_attrs = _eapi_attrs(
- allows_package_provided=(eapi is None or eapi_allows_package_provided(eapi)),
- bdepend = (eapi is not None and eapi_has_bdepend(eapi)),
- broot = (eapi is None or eapi_has_broot(eapi)),
- dots_in_PN = (eapi is None or eapi_allows_dots_in_PN(eapi)),
- dots_in_use_flags = (eapi is None or eapi_allows_dots_in_use_flags(eapi)),
- empty_groups_always_true = (eapi is not None and eapi_empty_groups_always_true(eapi)),
- exports_EBUILD_PHASE_FUNC = (eapi is None or eapi_exports_EBUILD_PHASE_FUNC(eapi)),
- exports_PORTDIR = (eapi is None or eapi_exports_PORTDIR(eapi)),
- exports_ECLASSDIR = (eapi is not None and eapi_exports_ECLASSDIR(eapi)),
+ allows_package_provided=(eapi is None or eapi_allows_package_provided.func(eapi)),
+ bdepend = (eapi is not None and eapi_has_bdepend.func(eapi)),
+ broot = (eapi is None or eapi_has_broot.func(eapi)),
+ dots_in_PN = (eapi is None or eapi_allows_dots_in_PN.func(eapi)),
+ dots_in_use_flags = (eapi is None or eapi_allows_dots_in_use_flags.func(eapi)),
+ empty_groups_always_true = (eapi is not None and eapi_empty_groups_always_true.func(eapi)),
+ exports_AA = (eapi is not None and eapi_exports_AA.func(eapi)),
+ exports_EBUILD_PHASE_FUNC = (eapi is None or eapi_exports_EBUILD_PHASE_FUNC.func(eapi)),
+ exports_ECLASSDIR = (eapi is not None and eapi_exports_ECLASSDIR.func(eapi)),
+ exports_KV = (eapi is not None and eapi_exports_KV.func(eapi)),
+ exports_merge_type = (eapi is None or eapi_exports_merge_type.func(eapi)),
+ exports_PORTDIR = (eapi is None or eapi_exports_PORTDIR.func(eapi)),
+ exports_replace_vars = (eapi is None or eapi_exports_replace_vars.func(eapi)),
feature_flag_test = False,
- iuse_defaults = (eapi is None or eapi_has_iuse_defaults(eapi)),
- iuse_effective = (eapi is not None and eapi_has_iuse_effective(eapi)),
+ iuse_defaults = (eapi is None or eapi_has_iuse_defaults.func(eapi)),
+ iuse_effective = (eapi is not None and eapi_has_iuse_effective.func(eapi)),
path_variables_end_with_trailing_slash = (eapi is not None and
- eapi_path_variables_end_with_trailing_slash(eapi)),
- posixish_locale = (eapi is not None and eapi_requires_posixish_locale(eapi)),
- repo_deps = (eapi is None or eapi_has_repo_deps(eapi)),
- required_use = (eapi is None or eapi_has_required_use(eapi)),
- required_use_at_most_one_of = (eapi is None or eapi_has_required_use_at_most_one_of(eapi)),
- slot_deps = (eapi is None or eapi_has_slot_deps(eapi)),
- slot_operator = (eapi is None or eapi_has_slot_operator(eapi)),
- src_uri_arrows = (eapi is None or eapi_has_src_uri_arrows(eapi)),
- strong_blocks = (eapi is None or eapi_has_strong_blocks(eapi)),
- sysroot = (eapi is None or eapi_has_sysroot(eapi)),
- use_deps = (eapi is None or eapi_has_use_deps(eapi)),
- use_dep_defaults = (eapi is None or eapi_has_use_dep_defaults(eapi))
+ eapi_path_variables_end_with_trailing_slash.func(eapi)),
+ posixish_locale = (eapi is not None and eapi_requires_posixish_locale.func(eapi)),
+ prefix = (eapi is None or eapi_supports_prefix.func(eapi)),
+ repo_deps = (eapi is None or eapi_has_repo_deps.func(eapi)),
+ required_use = (eapi is None or eapi_has_required_use.func(eapi)),
+ required_use_at_most_one_of = (eapi is None or eapi_has_required_use_at_most_one_of.func(eapi)),
+ slot_deps = (eapi is None or eapi_has_slot_deps.func(eapi)),
+ slot_operator = (eapi is None or eapi_has_slot_operator.func(eapi)),
+ src_uri_arrows = (eapi is None or eapi_has_src_uri_arrows.func(eapi)),
+ strong_blocks = (eapi is None or eapi_has_strong_blocks.func(eapi)),
+ sysroot = (eapi is None or eapi_has_sysroot.func(eapi)),
+ use_deps = (eapi is None or eapi_has_use_deps.func(eapi)),
+ use_dep_defaults = (eapi is None or eapi_has_use_dep_defaults.func(eapi))
)
_eapi_attrs_cache[orig_eapi] = eapi_attrs
diff --git a/lib/portage/emaint/modules/merges/__init__.py b/lib/portage/emaint/modules/merges/__init__.py
index 89aa758a0..449f39dce 100644
--- a/lib/portage/emaint/modules/merges/__init__.py
+++ b/lib/portage/emaint/modules/merges/__init__.py
@@ -1,4 +1,4 @@
-# Copyright 2005-2014 Gentoo Foundation
+# Copyright 2005-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
doc = """Scan for failed merges and fix them."""
@@ -26,7 +26,17 @@ module_spec = {
'action': 'store_true',
'func': 'purge'
}
- }
+ },
+ 'opt_desc': {
+ 'yes': {
+ "short": "-y",
+ "long": "--yes",
+ "help": ("(merges submodule only): Do not prompt for "
+ "emerge invocations"),
+ "action": "store_true",
+ "dest": "yes",
+ }
+ },
}
}
}
diff --git a/lib/portage/emaint/modules/merges/merges.py b/lib/portage/emaint/modules/merges/merges.py
index 775dc59d2..d60916f1e 100644
--- a/lib/portage/emaint/modules/merges/merges.py
+++ b/lib/portage/emaint/modules/merges/merges.py
@@ -1,4 +1,4 @@
-# Copyright 2005-2020 Gentoo Authors
+# Copyright 2005-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import portage
@@ -186,7 +186,7 @@ class MergesHandler:
pkg_atoms.add(pkg_atom)
- def _emerge_pkg_atoms(self, module_output, pkg_atoms):
+ def _emerge_pkg_atoms(self, module_output, pkg_atoms, yes=False):
"""
Emerge the specified packages atoms.
@@ -194,6 +194,8 @@ class MergesHandler:
@type module_output: Class
@param pkg_atoms: packages atoms to emerge
@type pkg_atoms: set
+ @param yes: do not prompt for emerge invocations
+ @type yes: bool
@rtype: list
@return: List of results
"""
@@ -206,7 +208,7 @@ class MergesHandler:
portage._python_interpreter,
'-b',
os.path.join(EPREFIX or '/', 'usr', 'bin', 'emerge'),
- '--ask',
+ '--ask=n' if yes else '--ask',
'--quiet',
'--oneshot',
'--complete-graph=y'
@@ -265,7 +267,8 @@ class MergesHandler:
errors.append(', '.join(sorted(failed_pkgs)))
return (False, errors)
self._remove_failed_dirs(failed_pkgs)
- results = self._emerge_pkg_atoms(module_output, pkg_atoms)
+ results = self._emerge_pkg_atoms(module_output, pkg_atoms,
+ yes=kwargs.get('options', {}).get("yes", False))
# list any new failed merges
for pkg in sorted(self._scan()):
results.append("'%s' still found as a failed merge." % pkg)
diff --git a/lib/portage/emaint/modules/sync/sync.py b/lib/portage/emaint/modules/sync/sync.py
index ce9c0da39..442973142 100644
--- a/lib/portage/emaint/modules/sync/sync.py
+++ b/lib/portage/emaint/modules/sync/sync.py
@@ -277,13 +277,45 @@ class SyncRepos:
mypvs = portage.best(
self.emerge_config.target_config.trees['vartree'].dbapi.match(
portage.const.PORTAGE_PACKAGE_ATOM))
-
- chk_updated_cfg_files(self.emerge_config.target_config.root,
+ try:
+ old_use = (
+ self.emerge_config.target_config.trees["vartree"]
+ .dbapi.aux_get(mypvs, ["USE"])[0]
+ .split()
+ )
+ except KeyError:
+ old_use = ()
+
+ chk_updated_cfg_files(
+ self.emerge_config.target_config.root,
portage.util.shlex_split(
- self.emerge_config.target_config.settings.get("CONFIG_PROTECT", "")))
+ self.emerge_config.target_config.settings.get("CONFIG_PROTECT", "")
+ ),
+ )
msgs = []
- if mybestpv != mypvs and "--quiet" not in self.emerge_config.opts:
+ if not (mybestpv and mypvs) or mybestpv == mypvs or "--quiet" in self.emerge_config.opts:
+ return msgs
+
+ # Suggest to update to the latest available version of portage.
+ # Since changes to PYTHON_TARGETS cause complications, this message
+ # is suppressed if the new version has different PYTHON_TARGETS enabled
+ # than previous version.
+ portdb = self.emerge_config.target_config.trees["porttree"].dbapi
+ portdb.doebuild_settings.setcpv(mybestpv, mydb=portdb)
+ usemask = portdb.doebuild_settings.usemask
+ useforce = portdb.doebuild_settings.useforce
+ new_use = (
+ frozenset(portdb.doebuild_settings["PORTAGE_USE"].split()) | useforce
+ ) - usemask
+ new_python_targets = frozenset(
+ x for x in new_use if x.startswith("python_targets_")
+ )
+ old_python_targets = frozenset(
+ x for x in old_use if x.startswith("python_targets_")
+ )
+
+ if new_python_targets == old_python_targets:
msgs.append('')
msgs.append(warn(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended")
msgs.append(warn(" * ")+"that you update portage now, before any other packages are updated.")
diff --git a/lib/portage/locks.py b/lib/portage/locks.py
index 193045c03..d0218d7bc 100644
--- a/lib/portage/locks.py
+++ b/lib/portage/locks.py
@@ -1,5 +1,5 @@
# portage: Lock management code
-# Copyright 2004-2020 Gentoo Authors
+# Copyright 2004-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
__all__ = ["lockdir", "unlockdir", "lockfile", "unlockfile", \
@@ -8,10 +8,12 @@ __all__ = ["lockdir", "unlockdir", "lockfile", "unlockfile", \
import errno
import fcntl
+import functools
import multiprocessing
import sys
import tempfile
import time
+import typing
import warnings
import portage
@@ -33,64 +35,6 @@ _quiet = False
_lock_fn = None
-
-
-def _get_lock_fn():
- """
- Returns fcntl.lockf if proven to work, and otherwise returns fcntl.flock.
- On some platforms fcntl.lockf is known to be broken.
- """
- global _lock_fn
- if _lock_fn is not None:
- return _lock_fn
-
-
-
- fd, lock_path = tempfile.mkstemp()
- try:
- try:
- fcntl.lockf(fd, fcntl.LOCK_EX)
- except EnvironmentError:
- pass
- else:
- proc = multiprocessing.Process(
- target=_subprocess_test_lock,
- args=(
- # Since file descriptors are not inherited unless the fork start
- # method is used, the subprocess should only try to close an
- # inherited file descriptor for the fork start method.
- fd if multiprocessing.get_start_method() == "fork" else None,
- lock_path,
- ),
- )
- proc.start()
- proc.join()
- if proc.exitcode == os.EX_OK:
- # Use fcntl.lockf because the test passed.
- _lock_fn = fcntl.lockf
- return _lock_fn
- finally:
- os.close(fd)
- os.unlink(lock_path)
-
- # Fall back to fcntl.flock.
- _lock_fn = fcntl.flock
- return _lock_fn
-
-def _subprocess_test_lock(fd, lock_path):
- if fd is not None:
- os.close(fd)
- try:
- with open(lock_path, 'a') as f:
- fcntl.lockf(f.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB)
- except EnvironmentError as e:
- if e.errno == errno.EAGAIN:
- # Parent process holds lock, as expected.
- sys.exit(0)
-
- # Something went wrong.
- sys.exit(1)
-
_open_fds = {}
_open_inodes = {}
@@ -115,6 +59,69 @@ class _lock_manager:
del _open_inodes[self.inode_key]
+def _get_lock_fn():
+ """
+ Returns fcntl.lockf if proven to work, and otherwise returns fcntl.flock.
+ On some platforms fcntl.lockf is known to be broken.
+ """
+ global _lock_fn
+ if _lock_fn is not None:
+ return _lock_fn
+
+ if _test_lock_fn(
+ lambda path, fd, flags: fcntl.lockf(fd, flags) and functools.partial(
+ unlockfile, (path, fd, flags, fcntl.lockf)
+ )
+ ):
+ _lock_fn = fcntl.lockf
+ return _lock_fn
+
+ # Fall back to fcntl.flock.
+ _lock_fn = fcntl.flock
+ return _lock_fn
+
+
+def _test_lock_fn(lock_fn: typing.Callable[[str, int, int], typing.Callable[[], None]]) -> bool:
+ def _test_lock(fd, lock_path):
+ os.close(fd)
+ try:
+ with open(lock_path, 'a') as f:
+ lock_fn(lock_path, f.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB)
+ except (TryAgain, EnvironmentError) as e:
+ if isinstance(e, TryAgain) or e.errno == errno.EAGAIN:
+ # Parent process holds lock, as expected.
+ sys.exit(0)
+
+
+ # Something went wrong.
+ sys.exit(1)
+
+ fd, lock_path = tempfile.mkstemp()
+ unlock_fn = None
+ try:
+ try:
+ unlock_fn = lock_fn(lock_path, fd, fcntl.LOCK_EX)
+ except (TryAgain, EnvironmentError):
+ pass
+ else:
+ _lock_manager(fd, os.fstat(fd), lock_path)
+ proc = multiprocessing.Process(target=_test_lock,
+ args=(fd, lock_path))
+ proc.start()
+ proc.join()
+ if proc.exitcode == os.EX_OK:
+ # the test passed
+ return True
+ finally:
+ try:
+ os.unlink(lock_path)
+ except OSError:
+ pass
+ if unlock_fn is not None:
+ unlock_fn()
+ return False
+
+
def _close_fds():
"""
This is intended to be called after a fork, in order to close file
diff --git a/lib/portage/package/ebuild/_config/KeywordsManager.py b/lib/portage/package/ebuild/_config/KeywordsManager.py
index bf68a88ac..8dcaee0d9 100644
--- a/lib/portage/package/ebuild/_config/KeywordsManager.py
+++ b/lib/portage/package/ebuild/_config/KeywordsManager.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2020 Gentoo Authors
+# Copyright 2010-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
__all__ = (
@@ -12,6 +12,7 @@ from portage import os
from portage.dep import ExtendedAtomDict
from portage.localization import _
from portage.package.ebuild._config.helper import ordered_by_atom_specificity
+from portage.repository.config import allow_profile_repo_deps
from portage.util import grabdict_package, stack_lists
from portage.versions import _pkg_str
@@ -25,6 +26,7 @@ class KeywordsManager:
os.path.join(x.location, "package.keywords"),
recursive=x.portage1_directories,
verify_eapi=True, eapi=x.eapi, eapi_default=None,
+ allow_repo=allow_profile_repo_deps(x),
allow_build_id=x.allow_build_id)
for x in profiles]
for pkeyworddict in rawpkeywords:
@@ -41,7 +43,8 @@ class KeywordsManager:
raw_p_accept_keywords = [grabdict_package(
os.path.join(x.location, "package.accept_keywords"),
recursive=x.portage1_directories,
- verify_eapi=True, eapi=x.eapi, eapi_default=None)
+ verify_eapi=True, eapi=x.eapi, eapi_default=None,
+ allow_repo=allow_profile_repo_deps(x))
for x in profiles]
for d in raw_p_accept_keywords:
if not d:
diff --git a/lib/portage/package/ebuild/_config/LocationsManager.py b/lib/portage/package/ebuild/_config/LocationsManager.py
index 327400ad6..433232a7a 100644
--- a/lib/portage/package/ebuild/_config/LocationsManager.py
+++ b/lib/portage/package/ebuild/_config/LocationsManager.py
@@ -1,11 +1,10 @@
-# Copyright 2010-2018 Gentoo Foundation
+# Copyright 2010-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
__all__ = (
'LocationsManager',
)
-import collections
import io
import warnings
@@ -20,7 +19,7 @@ from portage.util import ensure_dirs, grabfile, \
normalize_path, read_corresponding_eapi_file, shlex_split, writemsg
from portage.util._path import exists_raise_eaccess, isdir_raise_eaccess
from portage.repository.config import parse_layout_conf, \
- _portage1_profiles_allow_directories
+ _portage1_profiles_allow_directories, _profile_node
_PORTAGE1_DIRECTORIES = frozenset([
@@ -28,12 +27,6 @@ _PORTAGE1_DIRECTORIES = frozenset([
'package.use', 'package.use.mask', 'package.use.force',
'use.mask', 'use.force'])
-_profile_node = collections.namedtuple('_profile_node',
- ('location', 'portage1_directories', 'user_config',
- 'profile_formats', 'eapi', 'allow_build_id',
- 'show_deprecated_warning',
-))
-
_allow_parent_colon = frozenset(
["portage-2"])
diff --git a/lib/portage/package/ebuild/_config/MaskManager.py b/lib/portage/package/ebuild/_config/MaskManager.py
index 7714456e1..b0c2b55da 100644
--- a/lib/portage/package/ebuild/_config/MaskManager.py
+++ b/lib/portage/package/ebuild/_config/MaskManager.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2018 Gentoo Foundation
+# Copyright 2010-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
__all__ = (
@@ -10,6 +10,7 @@ import warnings
from portage import os
from portage.dep import ExtendedAtomDict, match_from_list
from portage.localization import _
+from portage.repository.config import allow_profile_repo_deps
from portage.util import append_repo, grabfile_package, stack_lists, writemsg
from portage.versions import _pkg_str
@@ -41,6 +42,7 @@ class MaskManager:
recursive=repo_config.portage1_profiles,
remember_source_file=True, verify_eapi=True,
eapi_default=repo_config.eapi,
+ allow_repo=allow_profile_repo_deps(repo_config),
allow_build_id=("build-id"
in repo_config.profile_formats))
if repo_config.portage1_profiles_compat and os.path.isdir(path):
@@ -110,6 +112,7 @@ class MaskManager:
repo_lines = grabfile_package(os.path.join(repo.location, "profiles", "package.unmask"), \
recursive=1, remember_source_file=True,
verify_eapi=True, eapi_default=repo.eapi,
+ allow_repo=allow_profile_repo_deps(repo),
allow_build_id=("build-id" in repo.profile_formats))
lines = stack_lists([repo_lines], incremental=1, \
remember_source_file=True, warn_for_unmatched_removal=True,
@@ -126,6 +129,7 @@ class MaskManager:
recursive=x.portage1_directories,
remember_source_file=True, verify_eapi=True,
eapi=x.eapi, eapi_default=None,
+ allow_repo=allow_profile_repo_deps(x),
allow_build_id=x.allow_build_id))
if x.portage1_directories:
profile_pkgunmasklines.append(grabfile_package(
@@ -133,6 +137,7 @@ class MaskManager:
recursive=x.portage1_directories,
remember_source_file=True, verify_eapi=True,
eapi=x.eapi, eapi_default=None,
+ allow_repo=allow_profile_repo_deps(x),
allow_build_id=x.allow_build_id))
profile_pkgmasklines = stack_lists(profile_pkgmasklines, incremental=1, \
remember_source_file=True, warn_for_unmatched_removal=True,
diff --git a/lib/portage/package/ebuild/_config/UseManager.py b/lib/portage/package/ebuild/_config/UseManager.py
index 882b0efa9..656c6199b 100644
--- a/lib/portage/package/ebuild/_config/UseManager.py
+++ b/lib/portage/package/ebuild/_config/UseManager.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2014 Gentoo Foundation
+# Copyright 2010-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
__all__ = (
@@ -11,6 +11,7 @@ from portage.dep import Atom, dep_getrepo, dep_getslot, ExtendedAtomDict, remove
from portage.eapi import eapi_has_use_aliases, eapi_supports_stable_use_forcing_and_masking
from portage.exception import InvalidAtom
from portage.localization import _
+from portage.repository.config import allow_profile_repo_deps
from portage.util import grabfile, grabdict, grabdict_package, read_corresponding_eapi_file, stack_lists, writemsg
from portage.versions import _pkg_str
@@ -154,7 +155,7 @@ class UseManager:
def _parse_file_to_dict(self, file_name, juststrings=False, recursive=True,
eapi_filter=None, user_config=False, eapi=None, eapi_default="0",
- allow_build_id=False):
+ allow_repo=False, allow_build_id=False):
"""
@param file_name: input file name
@type file_name: str
@@ -193,8 +194,9 @@ class UseManager:
ret = ExtendedAtomDict(dict)
else:
ret = {}
+ allow_repo = allow_repo or extended_syntax
file_dict = grabdict_package(file_name, recursive=recursive,
- allow_wildcard=extended_syntax, allow_repo=extended_syntax,
+ allow_wildcard=extended_syntax, allow_repo=allow_repo,
verify_eapi=(not extended_syntax), eapi=eapi,
eapi_default=eapi_default, allow_build_id=allow_build_id,
allow_use=False)
@@ -277,6 +279,7 @@ class UseManager:
ret[repo.name] = self._parse_file_to_dict(
os.path.join(repo.location, "profiles", file_name),
eapi_filter=eapi_filter, eapi_default=repo.eapi,
+ allow_repo=allow_profile_repo_deps(repo),
allow_build_id=("build-id" in repo.profile_formats))
return ret
@@ -294,7 +297,8 @@ class UseManager:
os.path.join(profile.location, file_name), juststrings,
recursive=profile.portage1_directories, eapi_filter=eapi_filter,
user_config=profile.user_config, eapi=profile.eapi,
- eapi_default=None, allow_build_id=profile.allow_build_id)
+ eapi_default=None, allow_build_id=profile.allow_build_id,
+ allow_repo=allow_profile_repo_deps(profile))
for profile in locations)
def _parse_repository_usealiases(self, repositories):
diff --git a/lib/portage/package/ebuild/config.py b/lib/portage/package/ebuild/config.py
index 4a43eaf7b..f56e39c47 100644
--- a/lib/portage/package/ebuild/config.py
+++ b/lib/portage/package/ebuild/config.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2020 Gentoo Authors
+# Copyright 2010-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
__all__ = [
@@ -42,7 +42,10 @@ from portage.exception import InvalidDependString, PortageException
from portage.localization import _
from portage.output import colorize
from portage.process import fakeroot_capable, sandbox_capable, macossandbox_capable
-from portage.repository.config import load_repository_config
+from portage.repository.config import (
+ allow_profile_repo_deps,
+ load_repository_config,
+)
from portage.util import ensure_dirs, getconfig, grabdict, \
grabdict_package, grabfile, grabfile_package, LazyItemsDict, \
normalize_path, shlex_split, stack_dictlist, stack_dicts, stack_lists, \
@@ -593,6 +596,7 @@ class config:
packages_list = [grabfile_package(
os.path.join(x.location, "packages"),
verify_eapi=True, eapi=x.eapi, eapi_default=None,
+ allow_repo=allow_profile_repo_deps(x),
allow_build_id=x.allow_build_id)
for x in profiles_complex]
except EnvironmentError as e:
@@ -612,9 +616,20 @@ class config:
mygcfg = {}
if profiles_complex:
- mygcfg_dlists = [getconfig(os.path.join(x.location, "make.defaults"),
- tolerant=tolerant, expand=expand_map, recursive=x.portage1_directories)
- for x in profiles_complex]
+ mygcfg_dlists = []
+ for x in profiles_complex:
+ # Prevent accidents triggered by USE="${USE} ..." settings
+ # at the top of make.defaults which caused parent profile
+ # USE to override parent profile package.use settings.
+ # It would be nice to guard USE_EXPAND variables like
+ # this too, but unfortunately USE_EXPAND is not known
+ # until after make.defaults has been evaluated, so that
+ # will require some form of make.defaults preprocessing.
+ expand_map.pop("USE", None)
+ mygcfg_dlists.append(
+ getconfig(os.path.join(x.location, "make.defaults"),
+ tolerant=tolerant, expand=expand_map,
+ recursive=x.portage1_directories))
self._make_defaults = mygcfg_dlists
mygcfg = stack_dicts(mygcfg_dlists,
incrementals=self.incrementals)
@@ -801,7 +816,8 @@ class config:
portage.dep.ExtendedAtomDict(dict)
bashrc = grabdict_package(os.path.join(profile.location,
"package.bashrc"), recursive=1, allow_wildcard=True,
- allow_repo=True, verify_eapi=True,
+ allow_repo=allow_profile_repo_deps(profile),
+ verify_eapi=True,
eapi=profile.eapi, eapi_default=None,
allow_build_id=profile.allow_build_id)
if not bashrc:
diff --git a/lib/portage/package/ebuild/fetch.py b/lib/portage/package/ebuild/fetch.py
index c47817bd1..eebd03dcb 100644
--- a/lib/portage/package/ebuild/fetch.py
+++ b/lib/portage/package/ebuild/fetch.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2020 Gentoo Authors
+# Copyright 2010-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
__all__ = ['fetch']
@@ -345,6 +345,57 @@ _size_suffix_map = {
}
+class DistfileName(str):
+ """
+ The DistfileName type represents a distfile name and associated
+ content digests, used by MirrorLayoutConfig and related layout
+ implementations.
+
+ The path of a distfile within a layout must be dependent on
+ nothing more than the distfile name and its associated content
+ digests. For filename-hash layout, path is dependent on distfile
+ name alone, and the get_filenames implementation yields strings
+ corresponding to distfile names. For content-hash layout, path is
+ dependent on content digest alone, and the get_filenames
+ implementation yields DistfileName instances whose names are equal
+ to content digest values. The content-hash layout simply lacks
+ the filename-hash layout's innate ability to translate a distfile
+ path to a distfile name, and instead caries an innate ability
+ to translate a distfile path to a content digest.
+
+ In order to prepare for a migration from filename-hash to
+ content-hash layout, all consumers of the layout get_filenames
+ method need to be updated to work with content digests as a
+ substitute for distfile names. For example, emirrordist requires
+ the --content-db option when working with a content-hash layout,
+ which serves as a means to associate distfile names
+ with content digest values yielded by the content-hash get_filenames
+ implementation.
+ """
+ def __new__(cls, s, digests=None):
+ return str.__new__(cls, s)
+
+ def __init__(self, s, digests=None):
+ super().__init__()
+ self.digests = {} if digests is None else digests
+
+ def digests_equal(self, other):
+ """
+ Test if digests compare equal to those of another instance.
+ """
+ if not isinstance(other, DistfileName):
+ return False
+ matches = []
+ for algo, digest in self.digests.items():
+ other_digest = other.digests.get(algo)
+ if other_digest is not None:
+ if other_digest == digest:
+ matches.append(algo)
+ else:
+ return False
+ return bool(matches)
+
+
class FlatLayout:
def get_path(self, filename):
return filename
@@ -414,6 +465,97 @@ class FilenameHashLayout:
return False
+class ContentHashLayout(FilenameHashLayout):
+ """
+ The content-hash layout is identical to the filename-hash layout,
+ except for these three differences:
+
+ 1) A content digest is used instead of a filename digest.
+
+ 2) The final element of the path returned from the get_path method
+ corresponds to the complete content digest. The path is a function
+ of the content digest alone.
+
+ 3) Because the path is a function of content digest alone, the
+ get_filenames implementation cannot derive distfiles names from
+ paths, so it instead yields DistfileName instances whose names are
+ equal to content digest values. The DistfileName documentation
+ discusses resulting implications.
+
+ Motivations to use the content-hash layout instead of the
+ filename-hash layout may include:
+
+ 1) Since the file path is independent of the file name, file
+ name collisions cannot occur. This makes the content-hash
+ layout suitable for storage of multiple types of files (not
+ only gentoo distfiles). For example, it can be used to store
+ distfiles for multiple linux distros within the same tree,
+ with automatic deduplication based on content digest. This
+ layout can be used to store and distribute practically anything
+ (including binary packages for example).
+
+ 2) Allows multiple revisions for the same distfiles name. An
+ existing distfile can be updated, and if a user still has an
+ older copy of an ebuild repository (or an overlay), then a user
+ can successfully fetch a desired revision of the distfile as
+ long as it has not been purged from the mirror.
+
+ 3) File integrity data is integrated into the layout itself,
+ making it very simple to verify the integrity of any file that
+ it contains. The only tool required is an implementation of
+ the chosen hash algorithm.
+ """
+
+ def get_path(self, filename):
+ """
+ For content-hash, the path is a function of the content digest alone.
+ The final element of the path returned from the get_path method
+ corresponds to the complete content digest.
+ """
+ fnhash = remaining = filename.digests[self.algo]
+ ret = ""
+ for c in self.cutoffs:
+ assert c % 4 == 0
+ c = c // 4
+ ret += remaining[:c] + "/"
+ remaining = remaining[c:]
+ return ret + fnhash
+
+ def get_filenames(self, distdir):
+ """
+ Yields DistfileName instances each with filename corresponding
+ to a digest value for self.algo, and which can be compared to
+ other DistfileName instances with their digests_equal method.
+ """
+ for filename in super(ContentHashLayout, self).get_filenames(distdir):
+ yield DistfileName(
+ filename, digests=dict([(self.algo, filename)])
+ )
+
+ @staticmethod
+ def verify_args(args, filename=None):
+ """
+ If the filename argument is given, then supported hash
+ algorithms are constrained by digests available in the filename
+ digests attribute.
+
+ @param args: layout.conf entry args
+ @param filename: filename with digests attribute
+ @return: True if args are valid for available digest algorithms,
+ and False otherwise
+ """
+ if len(args) != 3:
+ return False
+ if filename is None:
+ supported_algos = get_valid_checksum_keys()
+ else:
+ supported_algos = filename.digests
+ algo = args[1].upper()
+ if algo not in supported_algos:
+ return False
+ return FilenameHashLayout.verify_args(args)
+
+
class MirrorLayoutConfig:
"""
Class to read layout.conf from a mirror.
@@ -440,20 +582,41 @@ class MirrorLayoutConfig:
self.structure = data
@staticmethod
- def validate_structure(val):
+ def validate_structure(val, filename=None):
+ """
+ If the filename argument is given, then supported hash
+ algorithms are constrained by digests available in the filename
+ digests attribute.
+
+ @param val: layout.conf entry args
+ @param filename: filename with digests attribute
+ @return: True if args are valid for available digest algorithms,
+ and False otherwise
+ """
if val[0] == 'flat':
return FlatLayout.verify_args(val)
- if val[0] == 'filename-hash':
+ elif val[0] == 'filename-hash':
return FilenameHashLayout.verify_args(val)
+ elif val[0] == 'content-hash':
+ return ContentHashLayout.verify_args(val, filename=filename)
return False
- def get_best_supported_layout(self):
+ def get_best_supported_layout(self, filename=None):
+ """
+ If the filename argument is given, then acceptable hash
+ algorithms are constrained by digests available in the filename
+ digests attribute.
+
+ @param filename: filename with digests attribute
+ """
for val in self.structure:
- if self.validate_structure(val):
+ if self.validate_structure(val, filename=filename):
if val[0] == 'flat':
return FlatLayout(*val[1:])
- if val[0] == 'filename-hash':
+ elif val[0] == 'filename-hash':
return FilenameHashLayout(*val[1:])
+ elif val[0] == 'content-hash':
+ return ContentHashLayout(*val[1:])
# fallback
return FlatLayout()
@@ -466,6 +629,8 @@ class MirrorLayoutConfig:
ret.append(FlatLayout(*val[1:]))
elif val[0] == 'filename-hash':
ret.append(FilenameHashLayout(*val[1:]))
+ elif val[0] == 'content-hash':
+ ret.append(ContentHashLayout(*val[1:]))
if not ret:
ret.append(FlatLayout())
return ret
@@ -516,7 +681,7 @@ def get_mirror_url(mirror_url, filename, mysettings, cache_path=None):
# For some protocols, urlquote is required for correct behavior,
# and it must not be used for other protocols like rsync and sftp.
- path = mirror_conf.get_best_supported_layout().get_path(filename)
+ path = mirror_conf.get_best_supported_layout(filename=filename).get_path(filename)
if urlparse(mirror_url).scheme in ('ftp', 'http', 'https'):
path = urlquote(path)
return mirror_url + "/distfiles/" + path
@@ -723,15 +888,23 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
if hasattr(myuris, 'items'):
for myfile, uri_set in myuris.items():
for myuri in uri_set:
- file_uri_tuples.append((myfile, myuri))
+ file_uri_tuples.append(
+ (DistfileName(myfile, digests=mydigests.get(myfile)), myuri)
+ )
if not uri_set:
- file_uri_tuples.append((myfile, None))
+ file_uri_tuples.append(
+ (DistfileName(myfile, digests=mydigests.get(myfile)), None)
+ )
else:
for myuri in myuris:
if urlparse(myuri).scheme:
- file_uri_tuples.append((os.path.basename(myuri), myuri))
+ file_uri_tuples.append(
+ (DistfileName(os.path.basename(myuri), digests=mydigests.get(os.path.basename(myuri))), myuri)
+ )
else:
- file_uri_tuples.append((os.path.basename(myuri), None))
+ file_uri_tuples.append(
+ (DistfileName(os.path.basename(myuri), digests=mydigests.get(os.path.basename(myuri))), None)
+ )
filedict = OrderedDict()
primaryuri_dict = {}
diff --git a/lib/portage/repository/config.py b/lib/portage/repository/config.py
index f7c956dd8..d81559e3a 100644
--- a/lib/portage/repository/config.py
+++ b/lib/portage/repository/config.py
@@ -1,16 +1,21 @@
-# Copyright 2010-2020 Gentoo Authors
+# Copyright 2010-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
+import collections
import io
import logging
import warnings
import re
+import typing
import portage
from portage import eclass_cache, os
from portage.checksum import get_valid_checksum_keys
from portage.const import (PORTAGE_BASE_PATH, REPO_NAME_LOC, USER_CONFIG_PATH)
-from portage.eapi import eapi_allows_directories_on_profile_level_and_repository_level
+from portage.eapi import (
+ eapi_allows_directories_on_profile_level_and_repository_level,
+ eapi_has_repo_deps,
+)
from portage.env.loaders import KeyValuePairFileLoader
from portage.util import (normalize_path, read_corresponding_eapi_file, shlex_split,
stack_lists, writemsg, writemsg_level, _recursive_file_list)
@@ -25,13 +30,25 @@ from portage import _encodings
from portage import manifest
import portage.sync
+_profile_node = collections.namedtuple(
+ "_profile_node",
+ (
+ "location",
+ "portage1_directories",
+ "user_config",
+ "profile_formats",
+ "eapi",
+ "allow_build_id",
+ "show_deprecated_warning",
+ ),
+)
# Characters prohibited by repoman's file.name check.
_invalid_path_char_re = re.compile(r'[^a-zA-Z0-9._\-+/]')
_valid_profile_formats = frozenset(
['pms', 'portage-1', 'portage-2', 'profile-bashrcs', 'profile-set',
- 'profile-default-eapi', 'build-id'])
+ 'profile-default-eapi', 'build-id', 'profile-repo-deps'])
_portage1_profiles_allow_directories = frozenset(
["portage-1-compat", "portage-1", 'portage-2'])
@@ -1076,6 +1093,8 @@ class RepoConfigLoader:
keys = bool_keys + str_or_int_keys + str_tuple_keys + repo_config_tuple_keys
config_string = ""
for repo_name, repo in sorted(self.prepos.items(), key=lambda x: (x[0] != "DEFAULT", x[0])):
+ if repo_name != repo.name:
+ continue
config_string += "\n[%s]\n" % repo_name
for key in sorted(keys):
if key == "main_repo" and repo_name != "DEFAULT":
@@ -1094,6 +1113,17 @@ class RepoConfigLoader:
config_string += "%s = %s\n" % (o, v)
return config_string.lstrip("\n")
+def allow_profile_repo_deps(
+ repo: typing.Union[RepoConfig, _profile_node],
+) -> bool:
+ if eapi_has_repo_deps(repo.eapi):
+ return True
+
+ if 'profile-repo-deps' in repo.profile_formats:
+ return True
+
+ return False
+
def load_repository_config(settings, extra_files=None):
repoconfigpaths = []
if "PORTAGE_REPOSITORIES" in settings:
diff --git a/lib/portage/tests/dep/test_isvalidatom.py b/lib/portage/tests/dep/test_isvalidatom.py
index 58d999646..4203be07b 100644
--- a/lib/portage/tests/dep/test_isvalidatom.py
+++ b/lib/portage/tests/dep/test_isvalidatom.py
@@ -1,4 +1,4 @@
-# Copyright 2006-2013 Gentoo Foundation
+# Copyright 2006-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
@@ -6,12 +6,13 @@ from portage.dep import isvalidatom
class IsValidAtomTestCase:
def __init__(self, atom, expected, allow_wildcard=False,
- allow_repo=False, allow_build_id=False):
+ allow_repo=False, allow_build_id=False, eapi=None):
self.atom = atom
self.expected = expected
self.allow_wildcard = allow_wildcard
self.allow_repo = allow_repo
self.allow_build_id = allow_build_id
+ self.eapi = eapi
class IsValidAtom(TestCase):
@@ -137,6 +138,24 @@ class IsValidAtom(TestCase):
IsValidAtomTestCase("=sys-apps/portage-2.2*:foo::repo[doc?]", False, allow_repo=False),
IsValidAtomTestCase("null/portage::repo", False, allow_repo=False),
+ # Testing repo atoms with eapi
+
+ # If allow_repo is None, it should be overwritten by eapi
+ IsValidAtomTestCase("sys-apps/portage::repo", True, allow_repo=None),
+ IsValidAtomTestCase("sys-apps/portage::repo", False, allow_repo=None, eapi="5"),
+ IsValidAtomTestCase("sys-apps/portage::repo", True, allow_repo=None, eapi="5-progress"),
+ IsValidAtomTestCase("sys-apps/portage::repo", False, allow_repo=None, eapi="7"),
+
+ # If allow_repo is not None, it should not be overwritten by eapi
+ IsValidAtomTestCase("sys-apps/portage::repo", False, allow_repo=False),
+ IsValidAtomTestCase("sys-apps/portage::repo", False, allow_repo=False, eapi="5"),
+ IsValidAtomTestCase("sys-apps/portage::repo", False, allow_repo=False, eapi="5-progress"),
+ IsValidAtomTestCase("sys-apps/portage::repo", False, allow_repo=False, eapi="7"),
+ IsValidAtomTestCase("sys-apps/portage::repo", True, allow_repo=True),
+ IsValidAtomTestCase("sys-apps/portage::repo", True, allow_repo=True, eapi="5"),
+ IsValidAtomTestCase("sys-apps/portage::repo", True, allow_repo=True, eapi="5-progress"),
+ IsValidAtomTestCase("sys-apps/portage::repo", True, allow_repo=True, eapi="7"),
+
IsValidAtomTestCase("virtual/ffmpeg:0/53", True),
IsValidAtomTestCase("virtual/ffmpeg:0/53=", True),
IsValidAtomTestCase("virtual/ffmpeg:0/53*", False),
@@ -157,6 +176,7 @@ class IsValidAtom(TestCase):
atom_type = "invalid"
self.assertEqual(bool(isvalidatom(test_case.atom, allow_wildcard=test_case.allow_wildcard,
allow_repo=test_case.allow_repo,
- allow_build_id=test_case.allow_build_id)),
+ allow_build_id=test_case.allow_build_id,
+ eapi=test_case.eapi)),
test_case.expected,
msg="isvalidatom(%s) != %s" % (test_case.atom, test_case.expected))
diff --git a/lib/portage/tests/ebuild/test_fetch.py b/lib/portage/tests/ebuild/test_fetch.py
index 5b67dc519..24990e4db 100644
--- a/lib/portage/tests/ebuild/test_fetch.py
+++ b/lib/portage/tests/ebuild/test_fetch.py
@@ -1,13 +1,15 @@
-# Copyright 2019-2020 Gentoo Authors
+# Copyright 2019-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import functools
import io
import tempfile
+import types
import portage
from portage import shutil, os
-from portage.const import BASH_BINARY, PORTAGE_PYM_PATH
+from portage.checksum import checksum_str
+from portage.const import BASH_BINARY, MANIFEST2_HASH_DEFAULTS, PORTAGE_PYM_PATH
from portage.tests import TestCase
from portage.tests.resolver.ResolverPlayground import ResolverPlayground
from portage.tests.util.test_socks5 import AsyncHTTPServer
@@ -18,8 +20,16 @@ from portage.util._async.SchedulerInterface import SchedulerInterface
from portage.util._eventloop.global_event_loop import global_event_loop
from portage.package.ebuild.config import config
from portage.package.ebuild.digestgen import digestgen
-from portage.package.ebuild.fetch import (_download_suffix, fetch, FlatLayout,
- FilenameHashLayout, MirrorLayoutConfig)
+from portage.package.ebuild.fetch import (
+ ContentHashLayout,
+ DistfileName,
+ _download_suffix,
+ fetch,
+ FilenameHashLayout,
+ FlatLayout,
+ MirrorLayoutConfig,
+)
+from portage._emirrordist.Config import Config as EmirrordistConfig
from _emerge.EbuildFetcher import EbuildFetcher
from _emerge.Package import Package
@@ -50,23 +60,10 @@ class EbuildFetchTestCase(TestCase):
loop = SchedulerInterface(global_event_loop())
- def run_async(func, *args, **kwargs):
- with ForkExecutor(loop=loop) as executor:
- return loop.run_until_complete(loop.run_in_executor(executor,
- functools.partial(func, *args, **kwargs)))
-
scheme = 'http'
host = '127.0.0.1'
content = {}
- content['/distfiles/layout.conf'] = b'[structure]\n0=flat\n'
-
- for k, v in distfiles.items():
- # mirror path
- content['/distfiles/{}'.format(k)] = v
- # upstream path
- content['/distfiles/{}.txt'.format(k)] = v
-
with AsyncHTTPServer(host, content, loop) as server:
ebuilds_subst = {}
for cpv, metadata in ebuilds.items():
@@ -86,22 +83,106 @@ class EbuildFetchTestCase(TestCase):
playground = ResolverPlayground(ebuilds=ebuilds_subst, distfiles=distfiles, user_config=user_config_subst)
ro_distdir = tempfile.mkdtemp()
- eubin = os.path.join(playground.eprefix, "usr", "bin")
try:
- fetchcommand = portage.util.shlex_split(playground.settings['FETCHCOMMAND'])
- fetch_bin = portage.process.find_binary(fetchcommand[0])
- if fetch_bin is None:
- self.skipTest('FETCHCOMMAND not found: {}'.format(playground.settings['FETCHCOMMAND']))
- os.symlink(fetch_bin, os.path.join(eubin, os.path.basename(fetch_bin)))
- resumecommand = portage.util.shlex_split(playground.settings['RESUMECOMMAND'])
- resume_bin = portage.process.find_binary(resumecommand[0])
- if resume_bin is None:
- self.skipTest('RESUMECOMMAND not found: {}'.format(playground.settings['RESUMECOMMAND']))
- if resume_bin != fetch_bin:
- os.symlink(resume_bin, os.path.join(eubin, os.path.basename(resume_bin)))
- root_config = playground.trees[playground.eroot]['root_config']
- portdb = root_config.trees["porttree"].dbapi
+ self._testEbuildFetch(loop, scheme, host, distfiles, ebuilds, content, server, playground, ro_distdir)
+ finally:
+ shutil.rmtree(ro_distdir)
+ playground.cleanup()
+
+ def _testEbuildFetch(
+ self,
+ loop,
+ scheme,
+ host,
+ orig_distfiles,
+ ebuilds,
+ content,
+ server,
+ playground,
+ ro_distdir,
+ ):
+ mirror_layouts = (
+ (
+ "[structure]",
+ "0=filename-hash BLAKE2B 8",
+ "1=flat",
+ ),
+ (
+ "[structure]",
+ "1=filename-hash BLAKE2B 8",
+ "0=flat",
+ ),
+ (
+ "[structure]",
+ "0=content-hash SHA512 8:8:8",
+ "1=flat",
+ ),
+ )
+
+ fetchcommand = portage.util.shlex_split(playground.settings["FETCHCOMMAND"])
+ fetch_bin = portage.process.find_binary(fetchcommand[0])
+ if fetch_bin is None:
+ self.skipTest(
+ "FETCHCOMMAND not found: {}".format(playground.settings["FETCHCOMMAND"])
+ )
+ eubin = os.path.join(playground.eprefix, "usr", "bin")
+ os.symlink(fetch_bin, os.path.join(eubin, os.path.basename(fetch_bin)))
+ resumecommand = portage.util.shlex_split(playground.settings["RESUMECOMMAND"])
+ resume_bin = portage.process.find_binary(resumecommand[0])
+ if resume_bin is None:
+ self.skipTest(
+ "RESUMECOMMAND not found: {}".format(
+ playground.settings["RESUMECOMMAND"]
+ )
+ )
+ if resume_bin != fetch_bin:
+ os.symlink(resume_bin, os.path.join(eubin, os.path.basename(resume_bin)))
+ root_config = playground.trees[playground.eroot]["root_config"]
+ portdb = root_config.trees["porttree"].dbapi
+
+ def run_async(func, *args, **kwargs):
+ with ForkExecutor(loop=loop) as executor:
+ return loop.run_until_complete(
+ loop.run_in_executor(
+ executor, functools.partial(func, *args, **kwargs)
+ )
+ )
+
+ for layout_lines in mirror_layouts:
settings = config(clone=playground.settings)
+ layout_data = "".join("{}\n".format(line) for line in layout_lines)
+ mirror_conf = MirrorLayoutConfig()
+ mirror_conf.read_from_file(io.StringIO(layout_data))
+ layouts = mirror_conf.get_all_layouts()
+ content["/distfiles/layout.conf"] = layout_data.encode("utf8")
+ distfiles = {}
+ for k, v in orig_distfiles.items():
+ filename = DistfileName(
+ k,
+ digests=dict((algo, checksum_str(v, hashname=algo)) for algo in MANIFEST2_HASH_DEFAULTS),
+ )
+ distfiles[filename] = v
+
+ # mirror path
+ for layout in layouts:
+ content["/distfiles/" + layout.get_path(filename)] = v
+ # upstream path
+ content["/distfiles/{}.txt".format(k)] = v
+
+ shutil.rmtree(settings["DISTDIR"])
+ os.makedirs(settings["DISTDIR"])
+ with open(os.path.join(settings['DISTDIR'], 'layout.conf'), 'wt') as f:
+ f.write(layout_data)
+
+ if any(isinstance(layout, ContentHashLayout) for layout in layouts):
+ content_db = os.path.join(playground.eprefix, 'var/db/emirrordist/content.db')
+ os.makedirs(os.path.dirname(content_db), exist_ok=True)
+ try:
+ os.unlink(content_db)
+ except OSError:
+ pass
+ else:
+ content_db = None
# Demonstrate that fetch preserves a stale file in DISTDIR when no digests are given.
foo_uri = {'foo': ('{scheme}://{host}:{port}/distfiles/foo'.format(scheme=scheme, host=host, port=server.server_port),)}
@@ -164,23 +245,43 @@ class EbuildFetchTestCase(TestCase):
os.path.join(self.bindir, 'emirrordist'),
'--distfiles', settings['DISTDIR'],
'--config-root', settings['EPREFIX'],
+ '--delete',
'--repositories-configuration', settings.repositories.config_string(),
'--repo', 'test_repo', '--mirror')
+ if content_db is not None:
+ emirrordist_cmd = emirrordist_cmd + ('--content-db', content_db,)
+
env = settings.environ()
env['PYTHONPATH'] = ':'.join(
filter(None, [PORTAGE_PYM_PATH] + os.environ.get('PYTHONPATH', '').split(':')))
for k in distfiles:
- os.unlink(os.path.join(settings['DISTDIR'], k))
+ try:
+ os.unlink(os.path.join(settings['DISTDIR'], k))
+ except OSError:
+ pass
proc = loop.run_until_complete(asyncio.create_subprocess_exec(*emirrordist_cmd, env=env))
self.assertEqual(loop.run_until_complete(proc.wait()), 0)
for k in distfiles:
- with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
+ with open(os.path.join(settings['DISTDIR'], layouts[0].get_path(k)), 'rb') as f:
self.assertEqual(f.read(), distfiles[k])
+ if content_db is not None:
+ loop.run_until_complete(
+ self._test_content_db(
+ emirrordist_cmd,
+ env,
+ layouts,
+ content_db,
+ distfiles,
+ settings,
+ portdb,
+ )
+ )
+
# Tests only work with one ebuild at a time, so the config
# pool only needs a single config instance.
class config_pool:
@@ -354,9 +455,125 @@ class EbuildFetchTestCase(TestCase):
os.chmod(settings['DISTDIR'], orig_distdir_mode)
settings.features.remove('skiprocheck')
settings.features.add('distlocks')
- finally:
- shutil.rmtree(ro_distdir)
- playground.cleanup()
+
+ async def _test_content_db(
+ self, emirrordist_cmd, env, layouts, content_db, distfiles, settings, portdb
+ ):
+ # Simulate distfile digest change for ContentDB.
+ emdisopts = types.SimpleNamespace(
+ content_db=content_db, distfiles=settings["DISTDIR"]
+ )
+ with EmirrordistConfig(
+ emdisopts, portdb, asyncio.get_event_loop()
+ ) as emdisconf:
+ # Copy revisions from bar to foo.
+ for revision_key in emdisconf.content_db["filename:{}".format("bar")]:
+ emdisconf.content_db.add(
+ DistfileName("foo", digests=dict(revision_key))
+ )
+
+ # Copy revisions from foo to bar.
+ for revision_key in emdisconf.content_db["filename:{}".format("foo")]:
+ emdisconf.content_db.add(
+ DistfileName("bar", digests=dict(revision_key))
+ )
+
+ content_db_state = dict(emdisconf.content_db.items())
+ self.assertEqual(content_db_state, dict(emdisconf.content_db.items()))
+ self.assertEqual(
+ [
+ k[len("filename:") :]
+ for k in content_db_state
+ if k.startswith("filename:")
+ ],
+ ["bar", "foo"],
+ )
+ self.assertEqual(
+ content_db_state["filename:foo"], content_db_state["filename:bar"]
+ )
+ self.assertEqual(len(content_db_state["filename:foo"]), 2)
+
+ for k in distfiles:
+ try:
+ os.unlink(os.path.join(settings["DISTDIR"], k))
+ except OSError:
+ pass
+
+ proc = await asyncio.create_subprocess_exec(*emirrordist_cmd, env=env)
+ self.assertEqual(await proc.wait(), 0)
+
+ for k in distfiles:
+ with open(
+ os.path.join(settings["DISTDIR"], layouts[0].get_path(k)), "rb"
+ ) as f:
+ self.assertEqual(f.read(), distfiles[k])
+
+ with EmirrordistConfig(
+ emdisopts, portdb, asyncio.get_event_loop()
+ ) as emdisconf:
+ self.assertEqual(content_db_state, dict(emdisconf.content_db.items()))
+
+ # Verify that remove works as expected
+ filename = [filename for filename in distfiles if filename == "foo"][0]
+ self.assertTrue(bool(filename.digests))
+ emdisconf.content_db.remove(filename)
+ # foo should still have a content revision corresponding to bar's content.
+ self.assertEqual(
+ [
+ k[len("filename:") :]
+ for k in emdisconf.content_db
+ if k.startswith("filename:")
+ ],
+ ["bar", "foo"],
+ )
+ self.assertEqual(len(emdisconf.content_db["filename:foo"]), 1)
+ self.assertEqual(
+ len(
+ [
+ revision_key
+ for revision_key in emdisconf.content_db["filename:foo"]
+ if not filename.digests_equal(
+ DistfileName(
+ "foo",
+ digests=dict(revision_key),
+ )
+ )
+ ]
+ ),
+ 1,
+ )
+ # bar should still have a content revision corresponding to foo's content.
+ self.assertEqual(len(emdisconf.content_db["filename:bar"]), 2)
+ self.assertEqual(
+ len(
+ [
+ revision_key
+ for revision_key in emdisconf.content_db["filename:bar"]
+ if filename.digests_equal(
+ DistfileName(
+ "bar",
+ digests=dict(revision_key),
+ )
+ )
+ ]
+ ),
+ 1,
+ )
+ # remove the foo which refers to bar's content
+ bar = [filename for filename in distfiles if filename == "bar"][0]
+ foo_remaining = DistfileName("foo", digests=bar.digests)
+ emdisconf.content_db.remove(foo_remaining)
+ self.assertEqual(
+ [
+ k[len("filename:") :]
+ for k in emdisconf.content_db
+ if k.startswith("filename:")
+ ],
+ ["bar"],
+ )
+ self.assertRaises(KeyError, emdisconf.content_db.__getitem__, "filename:foo")
+ # bar should still have a content revision corresponding to foo's content.
+ self.assertEqual(len(emdisconf.content_db["filename:bar"]), 2)
def test_flat_layout(self):
self.assertTrue(FlatLayout.verify_args(('flat',)))
@@ -381,6 +598,35 @@ class EbuildFetchTestCase(TestCase):
self.assertEqual(FilenameHashLayout('SHA1', '8:16:24').get_path('foo-1.tar.gz'),
'19/c3b6/37a94b/foo-1.tar.gz')
+ def test_content_hash_layout(self):
+ self.assertFalse(ContentHashLayout.verify_args(('content-hash',)))
+ self.assertTrue(ContentHashLayout.verify_args(('content-hash', 'SHA1', '8')))
+ self.assertFalse(ContentHashLayout.verify_args(('content-hash', 'INVALID-HASH', '8')))
+ self.assertTrue(ContentHashLayout.verify_args(('content-hash', 'SHA1', '4:8:12')))
+ self.assertFalse(ContentHashLayout.verify_args(('content-hash', 'SHA1', '3')))
+ self.assertFalse(ContentHashLayout.verify_args(('content-hash', 'SHA1', 'junk')))
+ self.assertFalse(ContentHashLayout.verify_args(('content-hash', 'SHA1', '4:8:junk')))
+
+ filename = DistfileName(
+ 'foo-1.tar.gz',
+ digests=dict((algo, checksum_str(b'', hashname=algo)) for algo in MANIFEST2_HASH_DEFAULTS),
+ )
+
+ # Raise KeyError for a hash algorithm SHA1 which is not in MANIFEST2_HASH_DEFAULTS.
+ self.assertRaises(KeyError, ContentHashLayout('SHA1', '4').get_path, filename)
+
+ # Raise AttributeError for a plain string argument.
+ self.assertRaises(AttributeError, ContentHashLayout('SHA512', '4').get_path, str(filename))
+
+ self.assertEqual(ContentHashLayout('SHA512', '4').get_path(filename),
+ 'c/cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e')
+ self.assertEqual(ContentHashLayout('SHA512', '8').get_path(filename),
+ 'cf/cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e')
+ self.assertEqual(ContentHashLayout('SHA512', '8:16').get_path(filename),
+ 'cf/83e1/cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e')
+ self.assertEqual(ContentHashLayout('SHA512', '8:16:24').get_path(filename),
+ 'cf/83e1/357eef/cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e')
+
def test_mirror_layout_config(self):
mlc = MirrorLayoutConfig()
self.assertEqual(mlc.serialize(), ())
@@ -448,14 +694,18 @@ class EbuildFetchTestCase(TestCase):
io.StringIO(conf))
def test_filename_hash_layout_get_filenames(self):
+ filename = DistfileName(
+ 'foo-1.tar.gz',
+ digests=dict((algo, checksum_str(b'', hashname=algo)) for algo in MANIFEST2_HASH_DEFAULTS),
+ )
layouts = (
FlatLayout(),
FilenameHashLayout('SHA1', '4'),
FilenameHashLayout('SHA1', '8'),
FilenameHashLayout('SHA1', '8:16'),
FilenameHashLayout('SHA1', '8:16:24'),
+ ContentHashLayout('SHA512', '8:8:8'),
)
- filename = 'foo-1.tar.gz'
for layout in layouts:
distdir = tempfile.mkdtemp()
@@ -469,6 +719,12 @@ class EbuildFetchTestCase(TestCase):
with open(path, 'wb') as f:
pass
- self.assertEqual([filename], list(layout.get_filenames(distdir)))
+ file_list = list(layout.get_filenames(distdir))
+ self.assertTrue(len(file_list) > 0)
+ for filename_result in file_list:
+ if isinstance(filename_result, DistfileName):
+ self.assertTrue(filename_result.digests_equal(filename))
+ else:
+ self.assertEqual(filename_result, str(filename))
finally:
shutil.rmtree(distdir)
diff --git a/lib/portage/tests/emerge/test_simple.py b/lib/portage/tests/emerge/test_simple.py
index d26146aa9..6e282337f 100644
--- a/lib/portage/tests/emerge/test_simple.py
+++ b/lib/portage/tests/emerge/test_simple.py
@@ -322,11 +322,11 @@ call_has_and_best_version() {
rm_cmd + ("-rf", cachedir),
emerge_cmd + ("--oneshot", "virtual/foo"),
lambda: self.assertFalse(os.path.exists(
- os.path.join(pkgdir, "virtual", "foo-0.tbz2"))),
+ os.path.join(pkgdir, "virtual", "foo", "foo-0-1.xpak"))),
({"FEATURES" : "unmerge-backup"},) + \
emerge_cmd + ("--unmerge", "virtual/foo"),
lambda: self.assertTrue(os.path.exists(
- os.path.join(pkgdir, "virtual", "foo-0.tbz2"))),
+ os.path.join(pkgdir, "virtual", "foo", "foo-0-1.xpak"))),
emerge_cmd + ("--pretend", "dev-libs/A"),
ebuild_cmd + (test_ebuild, "manifest", "clean", "package", "merge"),
emerge_cmd + ("--pretend", "--tree", "--complete-graph", "dev-libs/A"),
diff --git a/lib/portage/tests/resolver/ResolverPlayground.py b/lib/portage/tests/resolver/ResolverPlayground.py
index fb32f0a53..6cd3b432a 100644
--- a/lib/portage/tests/resolver/ResolverPlayground.py
+++ b/lib/portage/tests/resolver/ResolverPlayground.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2020 Gentoo Authors
+# Copyright 2010-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import bz2
@@ -44,7 +44,13 @@ class ResolverPlayground:
config_files = frozenset(("eapi", "layout.conf", "make.conf", "modules", "package.accept_keywords",
"package.keywords", "package.license", "package.mask", "package.properties",
"package.provided", "packages",
- "package.unmask", "package.use", "package.use.aliases", "package.use.stable.mask",
+ "package.unmask",
+ "package.use",
+ "package.use.aliases",
+ "package.use.force",
+ "package.use.mask",
+ "package.use.stable.force",
+ "package.use.stable.mask",
"soname.provided",
"unpack_dependencies", "use.aliases", "use.force", "use.mask", "layout.conf"))
diff --git a/lib/portage/tests/resolver/binpkg_multi_instance/test_build_id_profile_format.py b/lib/portage/tests/resolver/binpkg_multi_instance/test_build_id_profile_format.py
index 0397509f8..f9b1abb35 100644
--- a/lib/portage/tests/resolver/binpkg_multi_instance/test_build_id_profile_format.py
+++ b/lib/portage/tests/resolver/binpkg_multi_instance/test_build_id_profile_format.py
@@ -1,4 +1,4 @@
-# Copyright 2015 Gentoo Foundation
+# Copyright 2015-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
@@ -10,14 +10,22 @@ class BuildIdProfileFormatTestCase(TestCase):
def testBuildIdProfileFormat(self):
profile = {
- "packages": ("=app-misc/A-1-2",),
+ "packages": ("=app-misc/A-1-2::test_repo",),
+ "package.mask": ("<app-misc/A-1::test_repo",),
+ "package.keywords": ("app-misc/A-1::test_repo x86",),
+ "package.unmask": (">=app-misc/A-1::test_repo",),
+ "package.use": ("app-misc/A-1::test_repo foo",),
+ "package.use.mask": ("app-misc/A-1::test_repo -foo",),
+ "package.use.stable.mask": ("app-misc/A-1::test_repo -foo",),
+ "package.use.force": ("app-misc/A-1::test_repo foo",),
+ "package.use.stable.force": ("app-misc/A-1::test_repo foo",),
"package.provided": ("sys-libs/zlib-1.2.8-r1",),
}
repo_configs = {
"test_repo": {
"layout.conf": (
- "profile-formats = build-id profile-set",
+ "profile-formats = build-id profile-repo-deps profile-set",
),
}
}
diff --git a/lib/portage/tests/resolver/test_autounmask.py b/lib/portage/tests/resolver/test_autounmask.py
index a3bf0ff94..86ae4bbf6 100644
--- a/lib/portage/tests/resolver/test_autounmask.py
+++ b/lib/portage/tests/resolver/test_autounmask.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2019 Gentoo Authors
+# Copyright 2010-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
@@ -440,13 +440,34 @@ class AutounmaskTestCase(TestCase):
mergelist=["dev-libs/A-1"],
license_changes={ "dev-libs/A-1": set(["TEST"]) }),
- # Test default --autounmask-license
+ # Test that --autounmask enables --autounmask-license
ResolverPlaygroundTestCase(
["=dev-libs/A-1"],
+ options={"--autounmask": True},
success=False,
mergelist=["dev-libs/A-1"],
license_changes={ "dev-libs/A-1": set(["TEST"]) }),
+ # Test that --autounmask-license is not enabled by default
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-1"],
+ success=False,
+ ),
+
+ # Test that --autounmask does not override --autounmask-license=n
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-1"],
+ options={"--autounmask": True, "--autounmask-license": "n"},
+ success=False,
+ ),
+
+ # Test that --autounmask=n overrides --autounmask-license=y
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-1"],
+ options={"--autounmask": "n", "--autounmask-license": "y"},
+ success=False,
+ ),
+
ResolverPlaygroundTestCase(
["=dev-libs/A-1"],
options={"--autounmask-license": "n"},
diff --git a/lib/portage/tests/resolver/test_autounmask_use_slot_conflict.py b/lib/portage/tests/resolver/test_autounmask_use_slot_conflict.py
new file mode 100644
index 000000000..2e090d45e
--- /dev/null
+++ b/lib/portage/tests/resolver/test_autounmask_use_slot_conflict.py
@@ -0,0 +1,51 @@
+# Copyright 2017-2021 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (
+ ResolverPlayground,
+ ResolverPlaygroundTestCase,
+)
+
+
+class AutounmaskUseSlotConflictTestCase(TestCase):
+ def testAutounmaskUseSlotConflict(self):
+ self.todo = True
+
+ ebuilds = {
+ "sci-libs/K-1": {"IUSE": "+foo", "EAPI": 1},
+ "sci-libs/L-1": {"DEPEND": "sci-libs/K[-foo]", "EAPI": 2},
+ "sci-libs/M-1": {"DEPEND": "sci-libs/K[foo=]", "IUSE": "+foo", "EAPI": 2},
+ }
+
+ installed = {}
+
+ test_cases = (
+ # Test bug 615824, where an automask USE change results in
+ # a conflict which is not reported. In order to install L,
+ # foo must be disabled for both K and M, but autounmask
+ # disables foo for K and leaves it enabled for M.
+ ResolverPlaygroundTestCase(
+ ["sci-libs/L", "sci-libs/M"],
+ options={"--backtrack": 0},
+ success=False,
+ mergelist=[
+ "sci-libs/L-1",
+ "sci-libs/M-1",
+ "sci-libs/K-1",
+ ],
+ ignore_mergelist_order=True,
+ slot_collision_solutions=[
+ {"sci-libs/K-1": {"foo": False}, "sci-libs/M-1": {"foo": False}}
+ ],
+ ),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_unpack_dependencies.py b/lib/portage/tests/resolver/test_unpack_dependencies.py
deleted file mode 100644
index cfceff4b1..000000000
--- a/lib/portage/tests/resolver/test_unpack_dependencies.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# Copyright 2012 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
-
-class UnpackDependenciesTestCase(TestCase):
- def testUnpackDependencies(self):
- distfiles = {
- "A-1.tar.gz": b"binary\0content",
- "B-1.TAR.XZ": b"binary\0content",
- "B-docs-1.tar.bz2": b"binary\0content",
- "C-1.TAR.XZ": b"binary\0content",
- "C-docs-1.tar.bz2": b"binary\0content",
- }
-
- ebuilds = {
- "dev-libs/A-1": {"SRC_URI": "A-1.tar.gz", "EAPI": "5-progress"},
- "dev-libs/B-1": {"IUSE": "doc", "SRC_URI": "B-1.TAR.XZ doc? ( B-docs-1.tar.bz2 )", "EAPI": "5-progress"},
- "dev-libs/C-1": {"IUSE": "doc", "SRC_URI": "C-1.TAR.XZ doc? ( C-docs-1.tar.bz2 )", "EAPI": "5-progress"},
- "app-arch/bzip2-1": {},
- "app-arch/gzip-1": {},
- "app-arch/tar-1": {},
- "app-arch/xz-utils-1": {},
- }
-
- repo_configs = {
- "test_repo": {
- "unpack_dependencies/5-progress": (
- "tar.bz2 app-arch/tar app-arch/bzip2",
- "tar.gz app-arch/tar app-arch/gzip",
- "tar.xz app-arch/tar app-arch/xz-utils",
- ),
- },
- }
-
- test_cases = (
- ResolverPlaygroundTestCase(
- ["dev-libs/A"],
- success = True,
- ignore_mergelist_order = True,
- mergelist = ["app-arch/tar-1", "app-arch/gzip-1", "dev-libs/A-1"]),
- ResolverPlaygroundTestCase(
- ["dev-libs/B"],
- success = True,
- ignore_mergelist_order = True,
- mergelist = ["app-arch/tar-1", "app-arch/xz-utils-1", "dev-libs/B-1"]),
- ResolverPlaygroundTestCase(
- ["dev-libs/C"],
- success = True,
- ignore_mergelist_order = True,
- mergelist = ["app-arch/tar-1", "app-arch/xz-utils-1", "app-arch/bzip2-1", "dev-libs/C-1"]),
- )
-
- user_config = {
- "package.use": ("dev-libs/C doc",)
- }
-
- playground = ResolverPlayground(distfiles=distfiles, ebuilds=ebuilds, repo_configs=repo_configs, user_config=user_config)
- try:
- for test_case in test_cases:
- playground.run_TestCase(test_case)
- self.assertEqual(test_case.test_success, True, test_case.fail_msg)
- finally:
- playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_use_aliases.py b/lib/portage/tests/resolver/test_use_aliases.py
deleted file mode 100644
index 7c2debbb1..000000000
--- a/lib/portage/tests/resolver/test_use_aliases.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# Copyright 2012 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
-
-class UseAliasesTestCase(TestCase):
- def testUseAliases(self):
- ebuilds = {
- "dev-libs/A-1": {"DEPEND": "dev-libs/K[x]", "RDEPEND": "dev-libs/K[x]", "EAPI": "5"},
- "dev-libs/B-1": {"DEPEND": "dev-libs/L[x]", "RDEPEND": "dev-libs/L[x]", "EAPI": "5"},
- "dev-libs/C-1": {"DEPEND": "dev-libs/M[xx]", "RDEPEND": "dev-libs/M[xx]", "EAPI": "5"},
- "dev-libs/D-1": {"DEPEND": "dev-libs/N[-x]", "RDEPEND": "dev-libs/N[-x]", "EAPI": "5"},
- "dev-libs/E-1": {"DEPEND": "dev-libs/O[-xx]", "RDEPEND": "dev-libs/O[-xx]", "EAPI": "5"},
- "dev-libs/F-1": {"DEPEND": "dev-libs/P[-xx]", "RDEPEND": "dev-libs/P[-xx]", "EAPI": "5"},
- "dev-libs/G-1": {"DEPEND": "dev-libs/Q[x-y]", "RDEPEND": "dev-libs/Q[x-y]", "EAPI": "5"},
- "dev-libs/H-1": {"DEPEND": "=dev-libs/R-1*[yy]", "RDEPEND": "=dev-libs/R-1*[yy]", "EAPI": "5"},
- "dev-libs/H-2": {"DEPEND": "=dev-libs/R-2*[yy]", "RDEPEND": "=dev-libs/R-2*[yy]", "EAPI": "5"},
- "dev-libs/I-1": {"DEPEND": "dev-libs/S[y-z]", "RDEPEND": "dev-libs/S[y-z]", "EAPI": "5"},
- "dev-libs/I-2": {"DEPEND": "dev-libs/S[y_z]", "RDEPEND": "dev-libs/S[y_z]", "EAPI": "5"},
- "dev-libs/J-1": {"DEPEND": "dev-libs/T[x]", "RDEPEND": "dev-libs/T[x]", "EAPI": "5"},
- "dev-libs/K-1": {"IUSE": "+x", "EAPI": "5"},
- "dev-libs/K-2::repo1": {"IUSE": "+X", "EAPI": "5-progress"},
- "dev-libs/L-1": {"IUSE": "+x", "EAPI": "5"},
- "dev-libs/M-1::repo1": {"IUSE": "X", "EAPI": "5-progress"},
- "dev-libs/N-1": {"IUSE": "x", "EAPI": "5"},
- "dev-libs/N-2::repo1": {"IUSE": "X", "EAPI": "5-progress"},
- "dev-libs/O-1": {"IUSE": "x", "EAPI": "5"},
- "dev-libs/P-1::repo1": {"IUSE": "+X", "EAPI": "5-progress"},
- "dev-libs/Q-1::repo2": {"IUSE": "X.Y", "EAPI": "5-progress"},
- "dev-libs/R-1::repo1": {"IUSE": "Y", "EAPI": "5-progress"},
- "dev-libs/R-2::repo1": {"IUSE": "y", "EAPI": "5-progress"},
- "dev-libs/S-1::repo2": {"IUSE": "Y.Z", "EAPI": "5-progress"},
- "dev-libs/S-2::repo2": {"IUSE": "Y.Z", "EAPI": "5-progress"},
- "dev-libs/T-1::repo1": {"IUSE": "+X", "EAPI": "5"},
- }
-
- installed = {
- "dev-libs/L-2::repo1": {"IUSE": "+X", "USE": "X", "EAPI": "5-progress"},
- "dev-libs/O-2::repo1": {"IUSE": "X", "USE": "", "EAPI": "5-progress"},
- }
-
- repo_configs = {
- "repo1": {
- "use.aliases": ("X x xx",),
- "package.use.aliases": (
- "=dev-libs/R-1* Y yy",
- "=dev-libs/R-2* y yy",
- )
- },
- "repo2": {
- "eapi": ("5-progress",),
- "use.aliases": ("X.Y x-y",),
- "package.use.aliases": (
- "=dev-libs/S-1* Y.Z y-z",
- "=dev-libs/S-2* Y.Z y_z",
- ),
- },
- }
-
- test_cases = (
- ResolverPlaygroundTestCase(
- ["dev-libs/A"],
- success = True,
- mergelist = ["dev-libs/K-2", "dev-libs/A-1"]),
- ResolverPlaygroundTestCase(
- ["dev-libs/B"],
- success = True,
- mergelist = ["dev-libs/B-1"]),
- ResolverPlaygroundTestCase(
- ["dev-libs/C"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["dev-libs/M-1", "dev-libs/C-1"],
- use_changes = {"dev-libs/M-1": {"X": True}}),
- ResolverPlaygroundTestCase(
- ["dev-libs/D"],
- success = True,
- mergelist = ["dev-libs/N-2", "dev-libs/D-1"]),
- ResolverPlaygroundTestCase(
- ["dev-libs/E"],
- success = True,
- mergelist = ["dev-libs/E-1"]),
- ResolverPlaygroundTestCase(
- ["dev-libs/F"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["dev-libs/P-1", "dev-libs/F-1"],
- use_changes = {"dev-libs/P-1": {"X": False}}),
- ResolverPlaygroundTestCase(
- ["dev-libs/G"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["dev-libs/Q-1", "dev-libs/G-1"],
- use_changes = {"dev-libs/Q-1": {"X.Y": True}}),
- ResolverPlaygroundTestCase(
- ["=dev-libs/H-1*"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["dev-libs/R-1", "dev-libs/H-1"],
- use_changes = {"dev-libs/R-1": {"Y": True}}),
- ResolverPlaygroundTestCase(
- ["=dev-libs/H-2*"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["dev-libs/R-2", "dev-libs/H-2"],
- use_changes = {"dev-libs/R-2": {"y": True}}),
- ResolverPlaygroundTestCase(
- ["=dev-libs/I-1*"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["dev-libs/S-1", "dev-libs/I-1"],
- use_changes = {"dev-libs/S-1": {"Y.Z": True}}),
- ResolverPlaygroundTestCase(
- ["=dev-libs/I-2*"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["dev-libs/S-2", "dev-libs/I-2"],
- use_changes = {"dev-libs/S-2": {"Y.Z": True}}),
- ResolverPlaygroundTestCase(
- ["dev-libs/J"],
- success = False),
- )
-
- playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, repo_configs=repo_configs)
- try:
- for test_case in test_cases:
- playground.run_TestCase(test_case)
- self.assertEqual(test_case.test_success, True, test_case.fail_msg)
- finally:
- playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_useflags.py b/lib/portage/tests/resolver/test_useflags.py
index 0a5f3b3ff..b799e62ff 100644
--- a/lib/portage/tests/resolver/test_useflags.py
+++ b/lib/portage/tests/resolver/test_useflags.py
@@ -10,17 +10,24 @@ class UseFlagsTestCase(TestCase):
ebuilds = {
"dev-libs/A-1": { "IUSE": "X", },
"dev-libs/B-1": { "IUSE": "X Y", },
+ "dev-libs/C-1": { "IUSE": "abi_x86_32", "EAPI": "7" },
+ "dev-libs/D-1": { "IUSE": "abi_x86_32", "EAPI": "7", "RDEPEND": "dev-libs/C[abi_x86_32?]" },
}
installed = {
"dev-libs/A-1": { "IUSE": "X", },
"dev-libs/B-1": { "IUSE": "X", },
+ "dev-libs/C-1": { "IUSE": "abi_x86_32", "USE": "abi_x86_32", "EAPI": "7" },
+ "dev-libs/D-1": { "IUSE": "abi_x86_32", "USE": "abi_x86_32", "EAPI": "7", "RDEPEND": "dev-libs/C[abi_x86_32]" },
}
binpkgs = installed
user_config = {
- "package.use": ( "dev-libs/A X", ),
+ "package.use": (
+ "dev-libs/A X",
+ "dev-libs/D abi_x86_32",
+ ),
"use.force": ( "Y", ),
}
@@ -39,6 +46,25 @@ class UseFlagsTestCase(TestCase):
success = True,
mergelist = ["dev-libs/A-1"]),
+ # For bug 773469, we wanted --binpkg-respect-use=y to trigger a
+ # slot collision. Instead, a combination of default --autounmask-use
+ # combined with --autounmask-backtrack=y from EMERGE_DEFAULT_OPTS
+ # triggered this behavior which appeared confusingly similar to
+ #--binpkg-respect-use=n behavior.
+ #ResolverPlaygroundTestCase(
+ # ["dev-libs/C", "dev-libs/D"],
+ # options={"--usepkg": True, "--binpkg-respect-use": "y", "--autounmask-backtrack": "y"},
+ # success=True,
+ # use_changes={"dev-libs/C-1": {"abi_x86_32": True}},
+ # mergelist=["[binary]dev-libs/C-1", "[binary]dev-libs/D-1"],
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C", "dev-libs/D"],
+ options={"--usepkg": True, "--binpkg-respect-use": "y", "--autounmask-backtrack": "y"},
+ success=False,
+ slot_collision_solutions=[{"dev-libs/C-1": {"abi_x86_32": True}}],
+ mergelist=["dev-libs/C-1", "[binary]dev-libs/D-1"],
+ ),
+
#--binpkg-respect-use=n: use binpkgs with different use flags
ResolverPlaygroundTestCase(
["dev-libs/A"],
diff --git a/lib/portage/tests/sync/test_sync_local.py b/lib/portage/tests/sync/test_sync_local.py
index 21c03a98b..02a8b2958 100644
--- a/lib/portage/tests/sync/test_sync_local.py
+++ b/lib/portage/tests/sync/test_sync_local.py
@@ -1,4 +1,4 @@
-# Copyright 2014-2020 Gentoo Authors
+# Copyright 2014-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import datetime
@@ -55,14 +55,23 @@ class SyncLocalTestCase(TestCase):
}
ebuilds = {
- "dev-libs/A-0": {}
+ "dev-libs/A-0": {},
+ "sys-apps/portage-3.0": {"IUSE": "+python_targets_python3_8"},
+ }
+
+ installed = {
+ "sys-apps/portage-2.3.99": {
+ "EAPI": "7",
+ "IUSE": "+python_targets_python3_8",
+ "USE": "python_targets_python3_8",
+ },
}
user_config = {
'make.conf': ('FEATURES="metadata-transfer"',)
}
- playground = ResolverPlayground(ebuilds=ebuilds,
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed,
profile=profile, user_config=user_config, debug=debug)
settings = playground.settings
eprefix = settings["EPREFIX"]
diff --git a/lib/portage/tests/unicode/test_string_format.py b/lib/portage/tests/unicode/test_string_format.py
index 3b994d622..54ac038a6 100644
--- a/lib/portage/tests/unicode/test_string_format.py
+++ b/lib/portage/tests/unicode/test_string_format.py
@@ -15,9 +15,6 @@ class StringFormatTestCase(TestCase):
which may be either python2 or python3.
"""
- # We need unicode_literals in order to get some unicode test strings
- # in a way that works in both python2 and python3.
-
unicode_strings = (
'\u2018',
'\u2019',
@@ -31,8 +28,6 @@ class StringFormatTestCase(TestCase):
arg_bytes = _unicode_encode(arg_unicode, encoding=_encodings['content'])
dependency_arg = DependencyArg(arg=arg_unicode)
- # Use unicode_literals for unicode format string so that
- # __unicode__() is called in Python 2.
formatted_str = "%s" % (dependency_arg,)
self.assertEqual(formatted_str, arg_unicode)
@@ -48,8 +43,6 @@ class StringFormatTestCase(TestCase):
arg_bytes = _unicode_encode(arg_unicode, encoding=_encodings['content'])
e = PortageException(arg_unicode)
- # Use unicode_literals for unicode format string so that
- # __unicode__() is called in Python 2.
formatted_str = "%s" % (e,)
self.assertEqual(formatted_str, arg_unicode)
@@ -66,8 +59,6 @@ class StringFormatTestCase(TestCase):
for arg_unicode in self.unicode_strings:
e = UseFlagDisplay(arg_unicode, enabled, forced)
- # Use unicode_literals for unicode format string so that
- # __unicode__() is called in Python 2.
formatted_str = "%s" % (e,)
self.assertEqual(isinstance(formatted_str, str), True)
diff --git a/lib/portage/tests/util/test_shelve.py b/lib/portage/tests/util/test_shelve.py
new file mode 100644
index 000000000..60592c6fb
--- /dev/null
+++ b/lib/portage/tests/util/test_shelve.py
@@ -0,0 +1,60 @@
+# Copyright 2020-2021 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+import argparse
+import os
+import shutil
+import tempfile
+import time
+
+from portage.tests import TestCase
+from portage.util.shelve import dump, open_shelve, restore
+
+
+class ShelveUtilsTestCase(TestCase):
+
+ TEST_DATA = (
+ # distfiles_db
+ {
+ "portage-2.3.89.tar.bz2": "sys-apps/portage-2.3.89",
+ "portage-2.3.99.tar.bz2": "sys-apps/portage-2.3.99",
+ },
+ # deletion_db
+ {
+ "portage-2.3.89.tar.bz2": time.time(),
+ "portage-2.3.99.tar.bz2": time.time(),
+ },
+ # recycle_db
+ {
+ "portage-2.3.89.tar.bz2": (0, time.time()),
+ "portage-2.3.99.tar.bz2": (0, time.time()),
+ },
+ )
+
+ def test_dump_restore(self):
+ for data in self.TEST_DATA:
+ tmpdir = tempfile.mkdtemp()
+ try:
+ dump_args = argparse.Namespace(
+ src=os.path.join(tmpdir, "shelve_file"),
+ dest=os.path.join(tmpdir, "pickle_file"),
+ )
+ db = open_shelve(dump_args.src, flag="c")
+ for k, v in data.items():
+ db[k] = v
+ db.close()
+ dump(dump_args)
+
+ os.unlink(dump_args.src)
+ restore_args = argparse.Namespace(
+ dest=dump_args.src,
+ src=dump_args.dest,
+ )
+ restore(restore_args)
+
+ db = open_shelve(restore_args.dest, flag="r")
+ for k, v in data.items():
+ self.assertEqual(db[k], v)
+ db.close()
+ finally:
+ shutil.rmtree(tmpdir)
diff --git a/lib/portage/util/_async/BuildLogger.py b/lib/portage/util/_async/BuildLogger.py
index 5a9c076b6..896e4d7b5 100644
--- a/lib/portage/util/_async/BuildLogger.py
+++ b/lib/portage/util/_async/BuildLogger.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Gentoo Authors
+# Copyright 2020-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import functools
@@ -11,7 +11,6 @@ from portage.util import shlex_split
from portage.util._async.PipeLogger import PipeLogger
from portage.util._async.PopenProcess import PopenProcess
from portage.util.futures import asyncio
-from portage.util.futures.compat_coroutine import coroutine
class BuildLogger(AsynchronousTask):
"""
@@ -78,16 +77,15 @@ class BuildLogger(AsynchronousTask):
pipe_logger.start()
self._main_task_cancel = functools.partial(self._main_cancel, filter_proc, pipe_logger)
- self._main_task = asyncio.ensure_future(self._main(filter_proc, pipe_logger, loop=self.scheduler), loop=self.scheduler)
+ self._main_task = asyncio.ensure_future(self._main(filter_proc, pipe_logger), loop=self.scheduler)
self._main_task.add_done_callback(self._main_exit)
- @coroutine
- def _main(self, filter_proc, pipe_logger, loop=None):
+ async def _main(self, filter_proc, pipe_logger):
try:
if pipe_logger.poll() is None:
- yield pipe_logger.async_wait()
+ await pipe_logger.async_wait()
if filter_proc is not None and filter_proc.poll() is None:
- yield filter_proc.async_wait()
+ await filter_proc.async_wait()
except asyncio.CancelledError:
self._main_cancel(filter_proc, pipe_logger)
raise
diff --git a/lib/portage/util/_async/PipeLogger.py b/lib/portage/util/_async/PipeLogger.py
index e8203268c..b7c03043f 100644
--- a/lib/portage/util/_async/PipeLogger.py
+++ b/lib/portage/util/_async/PipeLogger.py
@@ -1,4 +1,4 @@
-# Copyright 2008-2020 Gentoo Authors
+# Copyright 2008-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import fcntl
@@ -9,7 +9,6 @@ import portage
from portage import os, _encodings, _unicode_encode
from portage.util.futures import asyncio
from portage.util.futures._asyncio.streams import _writer
-from portage.util.futures.compat_coroutine import coroutine
from portage.util.futures.unix_events import _set_nonblocking
from _emerge.AbstractPollTask import AbstractPollTask
@@ -53,7 +52,7 @@ class PipeLogger(AbstractPollTask):
fcntl.fcntl(fd, fcntl.F_SETFL,
fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
- self._io_loop_task = asyncio.ensure_future(self._io_loop(self.input_fd, loop=self.scheduler), loop=self.scheduler)
+ self._io_loop_task = asyncio.ensure_future(self._io_loop(self.input_fd), loop=self.scheduler)
self._io_loop_task.add_done_callback(self._io_loop_done)
self._registered = True
@@ -62,8 +61,7 @@ class PipeLogger(AbstractPollTask):
if self.returncode is None:
self.returncode = self._cancelled_returncode
- @coroutine
- def _io_loop(self, input_file, loop=None):
+ async def _io_loop(self, input_file):
background = self.background
stdout_fd = self.stdout_fd
log_file = self._log_file
@@ -77,7 +75,7 @@ class PipeLogger(AbstractPollTask):
future = self.scheduler.create_future()
self.scheduler.add_reader(fd, future.set_result, None)
try:
- yield future
+ await future
finally:
# The loop and input file may have been closed.
if not self.scheduler.is_closed():
@@ -130,7 +128,7 @@ class PipeLogger(AbstractPollTask):
if self._log_file_nb:
# Use the _writer function which uses os.write, since the
# log_file.write method looses data when an EAGAIN occurs.
- yield _writer(log_file, buf, loop=self.scheduler)
+ await _writer(log_file, buf)
else:
# For gzip.GzipFile instances, the above _writer function
# will not work because data written directly to the file
diff --git a/lib/portage/util/_async/PopenProcess.py b/lib/portage/util/_async/PopenProcess.py
index c1931327a..7f4e17ea2 100644
--- a/lib/portage/util/_async/PopenProcess.py
+++ b/lib/portage/util/_async/PopenProcess.py
@@ -1,4 +1,4 @@
-# Copyright 2012-2018 Gentoo Foundation
+# Copyright 2012-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from _emerge.SubProcess import SubProcess
@@ -13,7 +13,7 @@ class PopenProcess(SubProcess):
self._registered = True
if self.pipe_reader is None:
- self._async_waitpid()
+ self.scheduler.call_soon(self._async_waitpid)
else:
try:
self.pipe_reader.scheduler = self.scheduler
diff --git a/lib/portage/util/_async/SchedulerInterface.py b/lib/portage/util/_async/SchedulerInterface.py
index 2865266eb..c2d1be51f 100644
--- a/lib/portage/util/_async/SchedulerInterface.py
+++ b/lib/portage/util/_async/SchedulerInterface.py
@@ -1,4 +1,4 @@
-# Copyright 2012-2020 Gentoo Authors
+# Copyright 2012-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import gzip
@@ -8,7 +8,6 @@ from portage import _encodings
from portage import _unicode_encode
from portage.util import writemsg_level
from portage.util.futures._asyncio.streams import _writer
-from portage.util.futures.compat_coroutine import coroutine
from ..SlotObject import SlotObject
class SchedulerInterface(SlotObject):
@@ -55,9 +54,8 @@ class SchedulerInterface(SlotObject):
def _return_false():
return False
- @coroutine
- def async_output(self, msg, log_file=None, background=None,
- level=0, noiselevel=-1, loop=None):
+ async def async_output(self, msg, log_file=None, background=None,
+ level=0, noiselevel=-1):
"""
Output a msg to stdio (if not in background) and to a log file
if provided.
@@ -81,7 +79,7 @@ class SchedulerInterface(SlotObject):
writemsg_level(msg, level=level, noiselevel=noiselevel)
if log_file is not None:
- yield _writer(log_file, _unicode_encode(msg), loop=loop)
+ await _writer(log_file, _unicode_encode(msg))
def output(self, msg, log_path=None, background=None,
level=0, noiselevel=-1):
diff --git a/lib/portage/util/_eventloop/EventLoop.py b/lib/portage/util/_eventloop/EventLoop.py
deleted file mode 100644
index ff2b73255..000000000
--- a/lib/portage/util/_eventloop/EventLoop.py
+++ /dev/null
@@ -1,1153 +0,0 @@
-# Copyright 1999-2020 Gentoo Authors
-# Distributed under the terms of the GNU General Public License v2
-
-import asyncio as _real_asyncio
-import collections
-import errno
-import functools
-import logging
-import os
-import select
-import signal
-import time
-import traceback
-
-try:
- import fcntl
-except ImportError:
- # http://bugs.jython.org/issue1074
- fcntl = None
-
-try:
- import threading
-except ImportError:
- import dummy_threading as threading
-
-import portage
-portage.proxy.lazyimport.lazyimport(globals(),
- 'portage.util.futures:asyncio',
- 'portage.util.futures.executor.fork:ForkExecutor',
- 'portage.util.futures.unix_events:_PortageEventLoop,_PortageChildWatcher',
-)
-
-from portage.util import writemsg_level
-from ..SlotObject import SlotObject
-from .PollConstants import PollConstants
-from .PollSelectAdapter import PollSelectAdapter
-
-class EventLoop:
- """
- An event loop, intended to be compatible with the GLib event loop.
- Call the iteration method in order to execute one iteration of the
- loop. The idle_add and timeout_add methods serve as thread-safe
- means to interact with the loop's thread.
- """
-
- supports_multiprocessing = True
-
- # TODO: Find out why SIGCHLD signals aren't delivered during poll
- # calls, forcing us to wakeup in order to receive them.
- _sigchld_interval = 250
-
- class _child_callback_class(SlotObject):
- __slots__ = ("callback", "data", "pid", "source_id")
-
- class _idle_callback_class(SlotObject):
- __slots__ = ("_args", "_callback", "_cancelled")
-
- class _io_handler_class(SlotObject):
- __slots__ = ("args", "callback", "f", "source_id")
-
- class _timeout_handler_class(SlotObject):
- __slots__ = ("args", "function", "calling", "interval", "source_id",
- "timestamp")
-
- class _handle:
- """
- A callback wrapper object, compatible with asyncio.Handle.
- """
- __slots__ = ("_callback_id", "_loop")
-
- def __init__(self, callback_id, loop):
- self._callback_id = callback_id
- self._loop = loop
-
- def cancel(self):
- """
- Cancel the call. If the callback is already canceled or executed,
- this method has no effect.
- """
- self._loop.source_remove(self._callback_id)
-
- class _call_soon_callback:
- """
- Wraps a call_soon callback, and always returns False, since these
- callbacks are only supposed to run once.
- """
- __slots__ = ("_args", "_callback")
-
- def __init__(self, callback, args):
- self._callback = callback
- self._args = args
-
- def __call__(self):
- self._callback(*self._args)
- return False
-
- class _selector_callback:
- """
- Wraps an callback, and always returns True, for callbacks that
- are supposed to run repeatedly.
- """
- __slots__ = ("_args", "_callbacks")
-
- def __init__(self, callbacks):
- self._callbacks = callbacks
-
- def __call__(self, fd, event):
- for callback, mask in self._callbacks:
- if event & mask:
- callback()
- return True
-
- def __init__(self, main=True):
- """
- @param main: If True then this is a singleton instance for use
- in the main thread, otherwise it is a local instance which
- can safely be use in a non-main thread (default is True, so
- that global_event_loop does not need constructor arguments)
- @type main: bool
- """
- self._use_signal = main and fcntl is not None
- self._debug = bool(os.environ.get('PYTHONASYNCIODEBUG'))
- self._thread_rlock = threading.RLock()
- self._thread_condition = threading.Condition(self._thread_rlock)
- self._poll_event_queue = []
- self._poll_event_handlers = {}
- self._poll_event_handler_ids = {}
- # Number of current calls to self.iteration(). A number greater
- # than 1 indicates recursion, which is not supported by asyncio's
- # default event loop.
- self._iteration_depth = 0
- # Increment id for each new handler.
- self._event_handler_id = 0
- # New call_soon callbacks must have an opportunity to
- # execute before it's safe to wait on self._thread_condition
- # without a timeout, since delaying its execution indefinitely
- # could lead to a deadlock. The following attribute stores the
- # event handler id of the most recently added call_soon callback.
- # If this attribute has changed since the last time that the
- # call_soon callbacks have been called, then it's not safe to
- # wait on self._thread_condition without a timeout.
- self._call_soon_id = None
- # Use deque, with thread-safe append, in order to emulate the FIFO
- # queue behavior of the AbstractEventLoop.call_soon method.
- self._idle_callbacks = collections.deque()
- self._idle_callbacks_remaining = 0
- self._timeout_handlers = {}
- self._timeout_interval = None
- self._default_executor = None
-
- self._poll_obj = None
- try:
- select.epoll
- except AttributeError:
- pass
- else:
- try:
- epoll_obj = select.epoll()
- except IOError:
- # This happens with Linux 2.4 kernels:
- # IOError: [Errno 38] Function not implemented
- pass
- else:
- self._poll_obj = _epoll_adapter(epoll_obj)
- self.IO_ERR = select.EPOLLERR
- self.IO_HUP = select.EPOLLHUP
- self.IO_IN = select.EPOLLIN
- self.IO_NVAL = 0
- self.IO_OUT = select.EPOLLOUT
- self.IO_PRI = select.EPOLLPRI
-
- if self._poll_obj is None:
- self._poll_obj = create_poll_instance()
- self.IO_ERR = PollConstants.POLLERR
- self.IO_HUP = PollConstants.POLLHUP
- self.IO_IN = PollConstants.POLLIN
- self.IO_NVAL = PollConstants.POLLNVAL
- self.IO_OUT = PollConstants.POLLOUT
- self.IO_PRI = PollConstants.POLLPRI
-
- # These trigger both reader and writer callbacks.
- EVENT_SHARED = self.IO_HUP | self.IO_ERR | self.IO_NVAL
-
- self._EVENT_READ = self.IO_IN | EVENT_SHARED
- self._EVENT_WRITE = self.IO_OUT | EVENT_SHARED
-
- self._child_handlers = {}
- self._sigchld_read = None
- self._sigchld_write = None
- self._sigchld_src_id = None
- self._pid = portage.getpid()
- self._asyncio_wrapper = _PortageEventLoop(loop=self)
- self._asyncio_child_watcher = _PortageChildWatcher(self)
-
- def create_future(self):
- """
- Create a Future object attached to the loop.
- """
- return asyncio.Future(loop=self._asyncio_wrapper)
-
- def _new_source_id(self):
- """
- Generate a new source id. This method is thread-safe.
- """
- with self._thread_rlock:
- self._event_handler_id += 1
- return self._event_handler_id
-
- def _poll(self, timeout=None):
- """
- All poll() calls pass through here. The poll events
- are added directly to self._poll_event_queue.
- In order to avoid endless blocking, this raises
- StopIteration if timeout is None and there are
- no file descriptors to poll.
- """
-
- if timeout is None and \
- not self._poll_event_handlers:
- raise StopIteration(
- "timeout is None and there are no poll() event handlers")
-
- while True:
- try:
- self._poll_event_queue.extend(self._poll_obj.poll(timeout))
- break
- except (IOError, select.error) as e:
- # Silently handle EINTR, which is normal when we have
- # received a signal such as SIGINT (epoll objects may
- # raise IOError rather than select.error, at least in
- # Python 3.2).
- if not (e.args and e.args[0] == errno.EINTR):
- writemsg_level("\n!!! select error: %s\n" % (e,),
- level=logging.ERROR, noiselevel=-1)
- del e
-
- # This typically means that we've received a SIGINT, so
- # raise StopIteration in order to break out of our current
- # iteration and respond appropriately to the signal as soon
- # as possible.
- raise StopIteration("interrupted")
-
- def iteration(self, *args):
- """
- Like glib.MainContext.iteration(), runs a single iteration. In order
- to avoid blocking forever when may_block is True (the default),
- callers must be careful to ensure that at least one of the following
- conditions is met:
- 1) An event source or timeout is registered which is guaranteed
- to trigger at least on event (a call to an idle function
- only counts as an event if it returns a False value which
- causes it to stop being called)
- 2) Another thread is guaranteed to call one of the thread-safe
- methods which notify iteration to stop waiting (such as
- idle_add or timeout_add).
- These rules ensure that iteration is able to block until an event
- arrives, without doing any busy waiting that would waste CPU time.
- @type may_block: bool
- @param may_block: if True the call may block waiting for an event
- (default is True).
- @rtype: bool
- @return: True if events were dispatched.
- """
- self._iteration_depth += 1
- try:
- return self._iteration(*args)
- finally:
- self._iteration_depth -= 1
-
- def _iteration(self, *args):
- may_block = True
-
- if args:
- if len(args) > 1:
- raise TypeError(
- "expected at most 1 argument (%s given)" % len(args))
- may_block = args[0]
-
- event_queue = self._poll_event_queue
- event_handlers = self._poll_event_handlers
- events_handled = 0
- timeouts_checked = False
-
- if not event_handlers:
- with self._thread_condition:
- prev_call_soon_id = self._call_soon_id
- if self._run_timeouts():
- events_handled += 1
- timeouts_checked = True
-
- call_soon = prev_call_soon_id is not self._call_soon_id
- if self._call_soon_id is not None and self._call_soon_id._cancelled:
- # Allow garbage collection of cancelled callback.
- self._call_soon_id = None
-
- if (not call_soon and not event_handlers
- and not events_handled and may_block):
- # Block so that we don't waste cpu time by looping too
- # quickly. This makes EventLoop useful for code that needs
- # to wait for timeout callbacks regardless of whether or
- # not any IO handlers are currently registered.
- timeout = self._get_poll_timeout()
- if timeout is None:
- wait_timeout = None
- else:
- wait_timeout = timeout / 1000
- # NOTE: In order to avoid a possible infinite wait when
- # wait_timeout is None, the previous _run_timeouts()
- # call must have returned False *with* _thread_condition
- # acquired. Otherwise, we would risk going to sleep after
- # our only notify event has already passed.
- self._thread_condition.wait(wait_timeout)
- if self._run_timeouts():
- events_handled += 1
- timeouts_checked = True
-
- # If any timeouts have executed, then return immediately,
- # in order to minimize latency in termination of iteration
- # loops that they may control.
- if events_handled or not event_handlers:
- return bool(events_handled)
-
- if not event_queue:
-
- if may_block:
- timeout = self._get_poll_timeout()
-
- # Avoid blocking for IO if there are any timeout
- # or idle callbacks available to process.
- if timeout != 0 and not timeouts_checked:
- if self._run_timeouts():
- events_handled += 1
- timeouts_checked = True
- if events_handled:
- # Minimize latency for loops controlled
- # by timeout or idle callback events.
- timeout = 0
- else:
- timeout = 0
-
- try:
- self._poll(timeout=timeout)
- except StopIteration:
- # This can be triggered by EINTR which is caused by signals.
- pass
-
- # NOTE: IO event handlers may be re-entrant, in case something
- # like AbstractPollTask._wait_loop() needs to be called inside
- # a handler for some reason.
- while event_queue:
- events_handled += 1
- f, event = event_queue.pop()
- try:
- x = event_handlers[f]
- except KeyError:
- # This is known to be triggered by the epoll
- # implementation in qemu-user-1.2.2, and appears
- # to be harmless (see bug #451326).
- continue
- if not x.callback(f, event, *x.args):
- self.source_remove(x.source_id)
-
- if not timeouts_checked:
- if self._run_timeouts():
- events_handled += 1
- timeouts_checked = True
-
- return bool(events_handled)
-
- def _get_poll_timeout(self):
-
- with self._thread_rlock:
- if self._child_handlers:
- if self._timeout_interval is None:
- timeout = self._sigchld_interval
- else:
- timeout = min(self._sigchld_interval,
- self._timeout_interval)
- else:
- timeout = self._timeout_interval
-
- return timeout
-
- def child_watch_add(self, pid, callback, data=None):
- """
- Like glib.child_watch_add(), sets callback to be called with the
- user data specified by data when the child indicated by pid exits.
- The signature for the callback is:
-
- def callback(pid, condition, user_data)
-
- where pid is is the child process id, condition is the status
- information about the child process and user_data is data.
-
- @type int
- @param pid: process id of a child process to watch
- @type callback: callable
- @param callback: a function to call
- @type data: object
- @param data: the optional data to pass to function
- @rtype: int
- @return: an integer ID
- """
- source_id = self._new_source_id()
- self._child_handlers[source_id] = self._child_callback_class(
- callback=callback, data=data, pid=pid, source_id=source_id)
-
- if self._use_signal:
- if self._sigchld_read is None:
- self._sigchld_read, self._sigchld_write = os.pipe()
-
- fcntl.fcntl(self._sigchld_read, fcntl.F_SETFL,
- fcntl.fcntl(self._sigchld_read,
- fcntl.F_GETFL) | os.O_NONBLOCK)
-
- # The IO watch is dynamically registered and unregistered as
- # needed, since we don't want to consider it as a valid source
- # of events when there are no child listeners. It's important
- # to distinguish when there are no valid sources of IO events,
- # in order to avoid an endless poll call if there's no timeout.
- if self._sigchld_src_id is None:
- self._sigchld_src_id = self.io_add_watch(
- self._sigchld_read, self.IO_IN, self._sigchld_io_cb)
- signal.signal(signal.SIGCHLD, self._sigchld_sig_cb)
-
- # poll soon, in case the SIGCHLD has already arrived
- self.call_soon(self._poll_child_processes)
- return source_id
-
- def _sigchld_sig_cb(self, signum, frame):
- # If this signal handler was not installed by the
- # current process then the signal doesn't belong to
- # this EventLoop instance.
- if portage.getpid() == self._pid:
- os.write(self._sigchld_write, b'\0')
-
- def _sigchld_io_cb(self, fd, events):
- try:
- while True:
- os.read(self._sigchld_read, 4096)
- except OSError:
- # read until EAGAIN
- pass
- self._poll_child_processes()
- return True
-
- def _poll_child_processes(self):
- if not self._child_handlers:
- return False
-
- calls = 0
-
- for x in list(self._child_handlers.values()):
- if x.source_id not in self._child_handlers:
- # it's already been called via re-entrance
- continue
- try:
- wait_retval = os.waitpid(x.pid, os.WNOHANG)
- except OSError as e:
- if e.errno != errno.ECHILD:
- raise
- del e
- self.source_remove(x.source_id)
- else:
- # With waitpid and WNOHANG, only check the
- # first element of the tuple since the second
- # element may vary (bug #337465).
- if wait_retval[0] != 0:
- calls += 1
- self.source_remove(x.source_id)
- x.callback(x.pid, wait_retval[1], x.data)
-
- return bool(calls)
-
- def idle_add(self, callback, *args):
- """
- Like glib.idle_add(), if callback returns False it is
- automatically removed from the list of event sources and will
- not be called again. This method is thread-safe.
-
- The idle_add method is deprecated. Use the call_soon and
- call_soon_threadsafe methods instead.
-
- @type callback: callable
- @param callback: a function to call
- @return: a handle which can be used to cancel the callback
- via the source_remove method
- @rtype: object
- """
- with self._thread_condition:
- source_id = self._idle_add(callback, *args)
- self._thread_condition.notify()
- return source_id
-
- def _idle_add(self, callback, *args):
- """Like idle_add(), but without thread safety."""
- # Hold self._thread_condition when assigning self._call_soon_id,
- # since it might be modified via a thread-safe method.
- with self._thread_condition:
- handle = self._call_soon_id = self._idle_callback_class(
- _args=args, _callback=callback)
- # This deque append is thread-safe, but it does *not* notify the
- # loop's thread, so the caller must notify if appropriate.
- self._idle_callbacks.append(handle)
- return handle
-
- def _run_idle_callbacks(self):
- # assumes caller has acquired self._thread_rlock
- if not self._idle_callbacks:
- return False
- state_change = 0
- reschedule = []
- # Use remaining count to avoid calling any newly scheduled callbacks,
- # since self._idle_callbacks can be modified during the exection of
- # these callbacks. The remaining count can be reset by recursive
- # calls to this method. Recursion must remain supported until all
- # consumers of AsynchronousLock.unlock() have been migrated to the
- # async_unlock() method, see bug 614108.
- self._idle_callbacks_remaining = len(self._idle_callbacks)
-
- while self._idle_callbacks_remaining:
- self._idle_callbacks_remaining -= 1
- try:
- x = self._idle_callbacks.popleft() # thread-safe
- except IndexError:
- break
- if x._cancelled:
- # it got cancelled while executing another callback
- continue
- if x._callback(*x._args):
- # Reschedule, but not until after it's called, since
- # we don't want it to call itself in a recursive call
- # to this method.
- self._idle_callbacks.append(x)
- else:
- x._cancelled = True
- state_change += 1
-
- return bool(state_change)
-
- def timeout_add(self, interval, function, *args):
- """
- Like glib.timeout_add(), interval argument is the number of
- milliseconds between calls to your function, and your function
- should return False to stop being called, or True to continue
- being called. Any additional positional arguments given here
- are passed to your function when it's called. This method is
- thread-safe.
- """
- with self._thread_condition:
- source_id = self._new_source_id()
- self._timeout_handlers[source_id] = \
- self._timeout_handler_class(
- interval=interval, function=function, args=args,
- source_id=source_id, timestamp=self.time())
- if self._timeout_interval is None or \
- self._timeout_interval > interval:
- self._timeout_interval = interval
- self._thread_condition.notify()
- return source_id
-
- def _run_timeouts(self):
-
- calls = 0
- if not self._use_signal:
- if self._poll_child_processes():
- calls += 1
-
- with self._thread_rlock:
-
- if self._run_idle_callbacks():
- calls += 1
-
- if not self._timeout_handlers:
- return bool(calls)
-
- ready_timeouts = []
- current_time = self.time()
- for x in self._timeout_handlers.values():
- elapsed_seconds = current_time - x.timestamp
- # elapsed_seconds < 0 means the system clock has been adjusted
- if elapsed_seconds < 0 or \
- (x.interval - 1000 * elapsed_seconds) <= 0:
- ready_timeouts.append(x)
-
- # Iterate of our local list, since self._timeout_handlers can be
- # modified during the exection of these callbacks.
- for x in ready_timeouts:
- if x.source_id not in self._timeout_handlers:
- # it got cancelled while executing another timeout
- continue
- if x.calling:
- # don't call it recursively
- continue
- calls += 1
- x.calling = True
- try:
- x.timestamp = self.time()
- if not x.function(*x.args):
- self.source_remove(x.source_id)
- finally:
- x.calling = False
-
- return bool(calls)
-
- def add_reader(self, fd, callback, *args):
- """
- Start watching the file descriptor for read availability and then
- call the callback with specified arguments.
-
- Use functools.partial to pass keywords to the callback.
- """
- handler = self._poll_event_handlers.get(fd)
- callbacks = [(functools.partial(callback, *args), self._EVENT_READ)]
- selector_mask = self._EVENT_READ
- if handler is not None:
- if not isinstance(handler.callback, self._selector_callback):
- raise AssertionError("add_reader called with fd "
- "registered directly via io_add_watch")
- for item in handler.callback._callbacks:
- callback, mask = item
- if mask != self._EVENT_READ:
- selector_mask |= mask
- callbacks.append(item)
- self.source_remove(handler.source_id)
- self.io_add_watch(fd, selector_mask, self._selector_callback(callbacks))
-
- def remove_reader(self, fd):
- """
- Stop watching the file descriptor for read availability.
- """
- handler = self._poll_event_handlers.get(fd)
- if handler is not None:
- if not isinstance(handler.callback, self._selector_callback):
- raise AssertionError("remove_reader called with fd "
- "registered directly via io_add_watch")
- callbacks = []
- selector_mask = 0
- removed = False
- for item in handler.callback._callbacks:
- callback, mask = item
- if mask == self._EVENT_READ:
- removed = True
- else:
- selector_mask |= mask
- callbacks.append(item)
- self.source_remove(handler.source_id)
- if callbacks:
- self.io_add_watch(fd, selector_mask,
- self._selector_callback(callbacks))
- return removed
- return False
-
- def add_writer(self, fd, callback, *args):
- """
- Start watching the file descriptor for write availability and then
- call the callback with specified arguments.
-
- Use functools.partial to pass keywords to the callback.
- """
- handler = self._poll_event_handlers.get(fd)
- callbacks = [(functools.partial(callback, *args), self._EVENT_WRITE)]
- selector_mask = self._EVENT_WRITE
- if handler is not None:
- if not isinstance(handler.callback, self._selector_callback):
- raise AssertionError("add_reader called with fd "
- "registered directly via io_add_watch")
- for item in handler.callback._callbacks:
- callback, mask = item
- if mask != self._EVENT_WRITE:
- selector_mask |= mask
- callbacks.append(item)
- self.source_remove(handler.source_id)
- self.io_add_watch(fd, selector_mask, self._selector_callback(callbacks))
-
- def remove_writer(self, fd):
- """
- Stop watching the file descriptor for write availability.
- """
- handler = self._poll_event_handlers.get(fd)
- if handler is not None:
- if not isinstance(handler.callback, self._selector_callback):
- raise AssertionError("remove_reader called with fd "
- "registered directly via io_add_watch")
- callbacks = []
- selector_mask = 0
- removed = False
- for item in handler.callback._callbacks:
- callback, mask = item
- if mask == self._EVENT_WRITE:
- removed = True
- else:
- selector_mask |= mask
- callbacks.append(item)
- self.source_remove(handler.source_id)
- if callbacks:
- self.io_add_watch(fd, selector_mask,
- self._selector_callback(callbacks))
- return removed
- return False
-
- def io_add_watch(self, f, condition, callback, *args):
- """
- Like glib.io_add_watch(), your function should return False to
- stop being called, or True to continue being called. Any
- additional positional arguments given here are passed to your
- function when it's called.
-
- @type f: int or object with fileno() method
- @param f: a file descriptor to monitor
- @type condition: int
- @param condition: a condition mask
- @type callback: callable
- @param callback: a function to call
- @rtype: int
- @return: an integer ID of the event source
- """
- if f in self._poll_event_handlers:
- raise AssertionError("fd %d is already registered" % f)
- source_id = self._new_source_id()
- self._poll_event_handler_ids[source_id] = f
- self._poll_event_handlers[f] = self._io_handler_class(
- args=args, callback=callback, f=f, source_id=source_id)
- self._poll_obj.register(f, condition)
- return source_id
-
- def source_remove(self, reg_id):
- """
- Like glib.source_remove(), this returns True if the given reg_id
- is found and removed, and False if the reg_id is invalid or has
- already been removed.
- """
- if isinstance(reg_id, self._idle_callback_class):
- if not reg_id._cancelled:
- reg_id._cancelled = True
- return True
- return False
-
- x = self._child_handlers.pop(reg_id, None)
- if x is not None:
- if not self._child_handlers and self._use_signal:
- signal.signal(signal.SIGCHLD, signal.SIG_DFL)
- self.source_remove(self._sigchld_src_id)
- self._sigchld_src_id = None
- return True
-
- with self._thread_rlock:
- timeout_handler = self._timeout_handlers.pop(reg_id, None)
- if timeout_handler is not None:
- if timeout_handler.interval == self._timeout_interval:
- if self._timeout_handlers:
- self._timeout_interval = min(x.interval
- for x in self._timeout_handlers.values())
- else:
- self._timeout_interval = None
- return True
-
- f = self._poll_event_handler_ids.pop(reg_id, None)
- if f is None:
- return False
- self._poll_obj.unregister(f)
- if self._poll_event_queue:
- # Discard any unhandled events that belong to this file,
- # in order to prevent these events from being erroneously
- # delivered to a future handler that is using a reallocated
- # file descriptor of the same numeric value (causing
- # extremely confusing bugs).
- remaining_events = []
- discarded_events = False
- for event in self._poll_event_queue:
- if event[0] == f:
- discarded_events = True
- else:
- remaining_events.append(event)
-
- if discarded_events:
- self._poll_event_queue[:] = remaining_events
-
- del self._poll_event_handlers[f]
- return True
-
- def run_until_complete(self, future):
- """
- Run until the Future is done.
-
- @type future: asyncio.Future
- @param future: a Future to wait for
- @rtype: object
- @return: the Future's result
- @raise: the Future's exception
- """
- future = asyncio.ensure_future(future, loop=self._asyncio_wrapper)
-
- # Since done callbacks are executed via call_soon, it's desirable
- # to continue iterating until those callbacks have executed, which
- # is easily achieved by registering a done callback and waiting for
- # it to execute.
- waiter = self.create_future()
- future.add_done_callback(waiter.set_result)
- while not waiter.done():
- self.iteration()
-
- return future.result()
-
- def call_soon(self, callback, *args, **kwargs):
- """
- Arrange for a callback to be called as soon as possible. The callback
- is called after call_soon() returns, when control returns to the event
- loop.
-
- This operates as a FIFO queue, callbacks are called in the order in
- which they are registered. Each callback will be called exactly once.
-
- Any positional arguments after the callback will be passed to the
- callback when it is called.
-
- The context argument currently does nothing, but exists for minimal
- interoperability with Future instances that require it for PEP 567.
-
- An object compatible with asyncio.Handle is returned, which can
- be used to cancel the callback.
-
- @type callback: callable
- @param callback: a function to call
- @type context: contextvars.Context
- @param context: An optional keyword-only context argument allows
- specifying a custom contextvars.Context for the callback to run
- in. The current context is used when no context is provided.
- @return: a handle which can be used to cancel the callback
- @rtype: asyncio.Handle (or compatible)
- """
- try:
- unexpected = next(key for key in kwargs if key != 'context')
- except StopIteration:
- pass
- else:
- raise TypeError("call_soon() got an unexpected keyword argument '%s'" % unexpected)
- return self._handle(self._idle_add(
- self._call_soon_callback(callback, args)), self)
-
- def call_soon_threadsafe(self, callback, *args, **kwargs):
- """Like call_soon(), but thread safe."""
- try:
- unexpected = next(key for key in kwargs if key != 'context')
- except StopIteration:
- pass
- else:
- raise TypeError("call_soon_threadsafe() got an unexpected keyword argument '%s'" % unexpected)
- # idle_add provides thread safety
- return self._handle(self.idle_add(
- self._call_soon_callback(callback, args)), self)
-
- def time(self):
- """Return the time according to the event loop's clock.
-
- This is a float expressed in seconds since an epoch, but the
- epoch, precision, accuracy and drift are unspecified and may
- differ per event loop.
- """
- return time.monotonic()
-
- def call_later(self, delay, callback, *args, **kwargs):
- """
- Arrange for the callback to be called after the given delay seconds
- (either an int or float).
-
- An instance of asyncio.Handle is returned, which can be used to cancel
- the callback.
-
- callback will be called exactly once per call to call_later(). If two
- callbacks are scheduled for exactly the same time, it is undefined
- which will be called first.
-
- The optional positional args will be passed to the callback when
- it is called. If you want the callback to be called with some named
- arguments, use a closure or functools.partial().
-
- The context argument currently does nothing, but exists for minimal
- interoperability with Future instances that require it for PEP 567.
-
- Use functools.partial to pass keywords to the callback.
-
- @type delay: int or float
- @param delay: delay seconds
- @type callback: callable
- @param callback: a function to call
- @type context: contextvars.Context
- @param context: An optional keyword-only context argument allows
- specifying a custom contextvars.Context for the callback to run
- in. The current context is used when no context is provided.
- @return: a handle which can be used to cancel the callback
- @rtype: asyncio.Handle (or compatible)
- """
- try:
- unexpected = next(key for key in kwargs if key != 'context')
- except StopIteration:
- pass
- else:
- raise TypeError("call_later() got an unexpected keyword argument '%s'" % unexpected)
- return self._handle(self.timeout_add(
- delay * 1000, self._call_soon_callback(callback, args)), self)
-
- def call_at(self, when, callback, *args, **kwargs):
- """
- Arrange for the callback to be called at the given absolute
- timestamp when (an int or float), using the same time reference as
- AbstractEventLoop.time().
-
- This method's behavior is the same as call_later().
-
- An instance of asyncio.Handle is returned, which can be used to
- cancel the callback.
-
- Use functools.partial to pass keywords to the callback.
-
- @type when: int or float
- @param when: absolute timestamp when to call callback
- @type callback: callable
- @param callback: a function to call
- @type context: contextvars.Context
- @param context: An optional keyword-only context argument allows
- specifying a custom contextvars.Context for the callback to run
- in. The current context is used when no context is provided.
- @return: a handle which can be used to cancel the callback
- @rtype: asyncio.Handle (or compatible)
- """
- try:
- unexpected = next(key for key in kwargs if key != 'context')
- except StopIteration:
- pass
- else:
- raise TypeError("call_at() got an unexpected keyword argument '%s'" % unexpected)
- delta = when - self.time()
- return self.call_later(delta if delta > 0 else 0, callback, *args)
-
- def run_in_executor(self, executor, func, *args):
- """
- Arrange for a func to be called in the specified executor.
-
- The executor argument should be an Executor instance. The default
- executor is used if executor is None.
-
- Use functools.partial to pass keywords to the *func*.
-
- @param executor: executor
- @type executor: concurrent.futures.Executor or None
- @param func: a function to call
- @type func: callable
- @return: a Future
- @rtype: asyncio.Future (or compatible)
- """
- if executor is None:
- executor = self._default_executor
- if executor is None:
- executor = ForkExecutor(loop=self)
- self._default_executor = executor
- future = executor.submit(func, *args)
- future = _real_asyncio.wrap_future(future,
- loop=self._asyncio_wrapper)
- return future
-
- def is_running(self):
- """Return whether the event loop is currently running."""
- return self._iteration_depth > 0
-
- def is_closed(self):
- """Returns True if the event loop was closed."""
- return self._poll_obj is None
-
- def close(self):
- """Close the event loop.
-
- This clears the queues and shuts down the executor,
- and waits for it to finish.
- """
- executor = self._default_executor
- if executor is not None:
- self._default_executor = None
- executor.shutdown(wait=True)
-
- if self._poll_obj is not None:
- close = getattr(self._poll_obj, 'close', None)
- if close is not None:
- close()
- self._poll_obj = None
-
- def default_exception_handler(self, context):
- """
- Default exception handler.
-
- This is called when an exception occurs and no exception
- handler is set, and can be called by a custom exception
- handler that wants to defer to the default behavior.
-
- The context parameter has the same meaning as in
- `call_exception_handler()`.
-
- @param context: exception context
- @type context: dict
- """
- message = context.get('message')
- if not message:
- message = 'Unhandled exception in event loop'
-
- exception = context.get('exception')
- if exception is not None:
- exc_info = (type(exception), exception, exception.__traceback__)
- else:
- exc_info = False
-
- log_lines = [message]
- for key in sorted(context):
- if key in {'message', 'exception'}:
- continue
- value = context[key]
- if key == 'source_traceback':
- tb = ''.join(traceback.format_list(value))
- value = 'Object created at (most recent call last):\n'
- value += tb.rstrip()
- elif key == 'handle_traceback':
- tb = ''.join(traceback.format_list(value))
- value = 'Handle created at (most recent call last):\n'
- value += tb.rstrip()
- else:
- value = repr(value)
- log_lines.append('{}: {}'.format(key, value))
-
- logging.error('\n'.join(log_lines), exc_info=exc_info)
- os.kill(portage.getpid(), signal.SIGTERM)
-
- def call_exception_handler(self, context):
- """
- Call the current event loop's exception handler.
-
- The context argument is a dict containing the following keys:
-
- - 'message': Error message;
- - 'exception' (optional): Exception object;
- - 'future' (optional): Future instance;
- - 'handle' (optional): Handle instance;
- - 'protocol' (optional): Protocol instance;
- - 'transport' (optional): Transport instance;
- - 'socket' (optional): Socket instance;
- - 'asyncgen' (optional): Asynchronous generator that caused
- the exception.
-
- New keys may be introduced in the future.
-
- @param context: exception context
- @type context: dict
- """
- self.default_exception_handler(context)
-
- def get_debug(self):
- """
- Get the debug mode (bool) of the event loop.
-
- The default value is True if the environment variable
- PYTHONASYNCIODEBUG is set to a non-empty string, False otherwise.
- """
- return self._debug
-
- def set_debug(self, enabled):
- """Set the debug mode of the event loop."""
- self._debug = enabled
-
-
-_can_poll_device = None
-
-def can_poll_device():
- """
- Test if it's possible to use poll() on a device such as a pty. This
- is known to fail on Darwin.
- @rtype: bool
- @return: True if poll() on a device succeeds, False otherwise.
- """
-
- global _can_poll_device
- if _can_poll_device is not None:
- return _can_poll_device
-
- if not hasattr(select, "poll"):
- _can_poll_device = False
- return _can_poll_device
-
- try:
- dev_null = open('/dev/null', 'rb')
- except IOError:
- _can_poll_device = False
- return _can_poll_device
-
- p = select.poll()
- try:
- p.register(dev_null.fileno(), PollConstants.POLLIN)
- except TypeError:
- # Jython: Object 'org.python.core.io.FileIO@f8f175' is not watchable
- _can_poll_device = False
- return _can_poll_device
-
- invalid_request = False
- for f, event in p.poll():
- if event & PollConstants.POLLNVAL:
- invalid_request = True
- break
- dev_null.close()
-
- _can_poll_device = not invalid_request
- return _can_poll_device
-
-def create_poll_instance():
- """
- Create an instance of select.poll, or an instance of
- PollSelectAdapter there is no poll() implementation or
- it is broken somehow.
- """
- if can_poll_device():
- return select.poll()
- return PollSelectAdapter()
-
-class _epoll_adapter:
- """
- Wraps a select.epoll instance in order to make it compatible
- with select.poll instances. This is necessary since epoll instances
- interpret timeout arguments differently. Note that the file descriptor
- that is associated with an epoll instance will close automatically when
- it is garbage collected, so it's not necessary to close it explicitly.
- """
- __slots__ = ('_epoll_obj', 'close')
-
- def __init__(self, epoll_obj):
- self._epoll_obj = epoll_obj
- self.close = epoll_obj.close
-
- def register(self, fd, *args):
- self._epoll_obj.register(fd, *args)
-
- def unregister(self, fd):
- self._epoll_obj.unregister(fd)
-
- def poll(self, *args):
- if len(args) > 1:
- raise TypeError(
- "poll expected at most 2 arguments, got " + \
- repr(1 + len(args)))
- timeout = -1
- if args:
- timeout = args[0]
- if timeout is None or timeout < 0:
- timeout = -1
- elif timeout != 0:
- timeout = timeout / 1000
-
- return self._epoll_obj.poll(timeout)
diff --git a/lib/portage/util/_eventloop/PollConstants.py b/lib/portage/util/_eventloop/PollConstants.py
deleted file mode 100644
index c5700d108..000000000
--- a/lib/portage/util/_eventloop/PollConstants.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright 1999-2009 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-import select
-class PollConstants:
-
- """
- Provides POLL* constants that are equivalent to those from the
- select module, for use by PollSelectAdapter.
- """
-
- names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
- v = 1
- for k in names:
- locals()[k] = getattr(select, k, v)
- v *= 2
- del k, v
diff --git a/lib/portage/util/_eventloop/PollSelectAdapter.py b/lib/portage/util/_eventloop/PollSelectAdapter.py
deleted file mode 100644
index c4637a352..000000000
--- a/lib/portage/util/_eventloop/PollSelectAdapter.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# Copyright 1999-2020 Gentoo Authors
-# Distributed under the terms of the GNU General Public License v2
-
-import select
-
-from .PollConstants import PollConstants
-
-class PollSelectAdapter:
-
- """
- Use select to emulate a poll object, for
- systems that don't support poll().
- """
-
- def __init__(self):
- self._registered = {}
- self._select_args = [[], [], []]
-
- def register(self, fd, *args):
- """
- Only POLLIN is currently supported!
- """
- if len(args) > 1:
- raise TypeError(
- "register expected at most 2 arguments, got " + \
- repr(1 + len(args)))
-
- eventmask = PollConstants.POLLIN | \
- PollConstants.POLLPRI | PollConstants.POLLOUT
- if args:
- eventmask = args[0]
-
- self._registered[fd] = eventmask
- self._select_args = None
-
- def unregister(self, fd):
- self._select_args = None
- del self._registered[fd]
-
- def poll(self, *args):
- if len(args) > 1:
- raise TypeError(
- "poll expected at most 2 arguments, got " + \
- repr(1 + len(args)))
-
- timeout = None
- if args:
- timeout = args[0]
-
- select_args = self._select_args
- if select_args is None:
- select_args = [list(self._registered), [], []]
-
- if timeout is not None:
- select_args = select_args[:]
- # Translate poll() timeout args to select() timeout args:
- #
- # | units | value(s) for indefinite block
- # ---------|--------------|------------------------------
- # poll | milliseconds | omitted, negative, or None
- # ---------|--------------|------------------------------
- # select | seconds | omitted
- # ---------|--------------|------------------------------
-
- if timeout is not None and timeout < 0:
- timeout = None
- if timeout is not None:
- select_args.append(timeout / 1000)
-
- select_events = select.select(*select_args)
- poll_events = []
- for fd in select_events[0]:
- poll_events.append((fd, PollConstants.POLLIN))
- return poll_events
diff --git a/lib/portage/util/_eventloop/asyncio_event_loop.py b/lib/portage/util/_eventloop/asyncio_event_loop.py
index b77728088..6dfac3569 100644
--- a/lib/portage/util/_eventloop/asyncio_event_loop.py
+++ b/lib/portage/util/_eventloop/asyncio_event_loop.py
@@ -1,4 +1,4 @@
-# Copyright 2018-2020 Gentoo Authors
+# Copyright 2018-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import os
@@ -17,11 +17,6 @@ class AsyncioEventLoop(_AbstractEventLoop):
event loop and is minimally compatible with _PortageEventLoop.
"""
- # Use portage's internal event loop in subprocesses, as a workaround
- # for https://bugs.python.org/issue22087, and also
- # https://bugs.python.org/issue29703 which affects pypy3-5.10.1.
- supports_multiprocessing = False
-
def __init__(self, loop=None):
loop = loop or _real_asyncio.get_event_loop()
self._loop = loop
diff --git a/lib/portage/util/bin_entry_point.py b/lib/portage/util/bin_entry_point.py
new file mode 100644
index 000000000..3ead95d98
--- /dev/null
+++ b/lib/portage/util/bin_entry_point.py
@@ -0,0 +1,35 @@
+# Copyright 2021 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ["bin_entry_point"]
+
+import re
+import sys
+
+from portage.const import PORTAGE_BIN_PATH
+from portage import os
+
+
+def bin_entry_point():
+ """
+ Adjust sys.argv[0] to point to a script in PORTAGE_BIN_PATH, and
+ then execute the script, in order to implement entry_points when
+ portage has been installed by pip.
+ """
+ script_path = os.path.join(PORTAGE_BIN_PATH, os.path.basename(sys.argv[0]))
+ if os.access(script_path, os.X_OK):
+ with open(script_path, "rt") as f:
+ shebang = f.readline()
+ python_match = re.search(r"/python[\d\.]*\s+([^/]*)\s+$", shebang)
+ if python_match:
+ sys.argv = [
+ os.path.join(os.path.dirname(sys.argv[0]), "python"),
+ python_match.group(1),
+ script_path,
+ ] + sys.argv[1:]
+ os.execvp(sys.argv[0], sys.argv)
+ sys.argv[0] = script_path
+ os.execvp(sys.argv[0], sys.argv)
+ else:
+ print("File not found:", script_path, file=sys.stderr)
+ return 127
diff --git a/lib/portage/util/digraph.py b/lib/portage/util/digraph.py
index c262cddee..623f2d988 100644
--- a/lib/portage/util/digraph.py
+++ b/lib/portage/util/digraph.py
@@ -311,9 +311,6 @@ class digraph:
def debug_print(self):
def output(s):
writemsg(s, noiselevel=-1)
- # Use unicode_literals to force unicode format
- # strings for python-2.x safety, ensuring that
- # node.__unicode__() is used when necessary.
for node in self.nodes:
output("%s " % (node,))
if self.nodes[node][0]:
diff --git a/lib/portage/util/futures/_asyncio/__init__.py b/lib/portage/util/futures/_asyncio/__init__.py
index 5590963f1..4643697e0 100644
--- a/lib/portage/util/futures/_asyncio/__init__.py
+++ b/lib/portage/util/futures/_asyncio/__init__.py
@@ -25,6 +25,16 @@ import types
import weakref
import asyncio as _real_asyncio
+# pylint: disable=redefined-builtin
+from asyncio import (
+ ALL_COMPLETED,
+ CancelledError,
+ FIRST_COMPLETED,
+ FIRST_EXCEPTION,
+ Future,
+ InvalidStateError,
+ TimeoutError,
+)
try:
import threading
@@ -37,21 +47,6 @@ portage.proxy.lazyimport.lazyimport(globals(),
'portage.util.futures:compat_coroutine@_compat_coroutine',
)
from portage.util._eventloop.asyncio_event_loop import AsyncioEventLoop as _AsyncioEventLoop
-# pylint: disable=redefined-builtin
-from portage.util.futures.futures import (
- CancelledError,
- Future,
- InvalidStateError,
- TimeoutError,
-)
-# pylint: enable=redefined-builtin
-from portage.util.futures._asyncio.process import _Process
-from portage.util.futures._asyncio.tasks import (
- ALL_COMPLETED,
- FIRST_COMPLETED,
- FIRST_EXCEPTION,
- wait,
-)
_lock = threading.Lock()
@@ -124,27 +119,24 @@ def create_subprocess_exec(*args, **kwargs):
@type loop: event loop
@type kwargs: varies
@param kwargs: subprocess.Popen parameters
- @rtype: asyncio.Future (or compatible)
- @return: subset of asyncio.subprocess.Process interface
+ @rtype: asyncio.subprocess.Process (or compatible)
+ @return: asyncio.subprocess.Process interface
"""
loop = _wrap_loop(kwargs.pop('loop', None))
# Python 3.4 and later implement PEP 446, which makes newly
# created file descriptors non-inheritable by default.
kwargs.setdefault('close_fds', False)
- if isinstance(loop._asyncio_wrapper, _AsyncioEventLoop):
- # Use the real asyncio create_subprocess_exec (loop argument
- # is deprecated since since Python 3.8).
- return _real_asyncio.create_subprocess_exec(*args, **kwargs)
+ # Use the real asyncio create_subprocess_exec (loop argument
+ # is deprecated since since Python 3.8).
+ return ensure_future(_real_asyncio.create_subprocess_exec(*args, **kwargs), loop=loop)
- result = loop.create_future()
- result.set_result(_Process(subprocess.Popen(
- args,
- stdin=kwargs.pop('stdin', None),
- stdout=kwargs.pop('stdout', None),
- stderr=kwargs.pop('stderr', None), **kwargs), loop))
-
- return result
+def wait(futures, loop=None, timeout=None, return_when=ALL_COMPLETED):
+ """
+ Wraps asyncio.wait() and omits the loop argument which is not
+ supported since python 3.10.
+ """
+ return _real_asyncio.wait(futures, timeout=timeout, return_when=return_when)
def iscoroutinefunction(func):
diff --git a/lib/portage/util/futures/_asyncio/process.py b/lib/portage/util/futures/_asyncio/process.py
deleted file mode 100644
index 275c9031a..000000000
--- a/lib/portage/util/futures/_asyncio/process.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# Copyright 2018-2020 Gentoo Authors
-# Distributed under the terms of the GNU General Public License v2
-
-import os
-
-import portage
-portage.proxy.lazyimport.lazyimport(globals(),
- 'portage.util.futures:asyncio',
- 'portage.util.futures.unix_events:_set_nonblocking',
-)
-from portage.util.futures._asyncio.streams import _reader, _writer
-from portage.util.futures.compat_coroutine import coroutine, coroutine_return
-
-
-class _Process:
- """
- Emulate a subset of the asyncio.subprocess.Process interface,
- for python2.
- """
- def __init__(self, proc, loop):
- """
- @param proc: process instance
- @type proc: subprocess.Popen
- @param loop: asyncio.AbstractEventLoop (or compatible)
- @type loop: event loop
- """
- self._proc = proc
- self._loop = loop
- self.terminate = proc.terminate
- self.kill = proc.kill
- self.send_signal = proc.send_signal
- self.pid = proc.pid
- self._waiters = []
- loop._asyncio_child_watcher.\
- add_child_handler(self.pid, self._proc_exit)
-
- @property
- def returncode(self):
- return self._proc.returncode
-
- @coroutine
- def communicate(self, input=None, loop=None): # pylint: disable=redefined-builtin
- """
- Read data from stdout and stderr, until end-of-file is reached.
- Wait for process to terminate.
-
- @param input: stdin content to write
- @type input: bytes
- @return: tuple (stdout_data, stderr_data)
- @rtype: asyncio.Future (or compatible)
- """
- loop = asyncio._wrap_loop(loop or self._loop)
- futures = []
- for input_file in (self._proc.stdout, self._proc.stderr):
- if input_file is None:
- future = loop.create_future()
- future.set_result(None)
- else:
- future = _reader(input_file, loop=loop)
- futures.append(future)
-
- writer = None
- if input is not None:
- if self._proc.stdin is None:
- raise TypeError('communicate: expected file or int, got {}'.format(type(self._proc.stdin)))
- stdin = self._proc.stdin
- stdin = os.fdopen(stdin, 'wb', 0) if isinstance(stdin, int) else stdin
- _set_nonblocking(stdin.fileno())
- writer = asyncio.ensure_future(_writer(stdin, input, loop=loop), loop=loop)
- writer.add_done_callback(lambda writer: stdin.close())
-
- try:
- yield asyncio.wait(futures + [self.wait(loop=loop)], loop=loop)
- finally:
- if writer is not None:
- if writer.done():
- # Consume expected exceptions.
- try:
- writer.result()
- except EnvironmentError:
- # This is normal if the other end of the pipe was closed.
- pass
- else:
- writer.cancel()
-
- coroutine_return(tuple(future.result() for future in futures))
-
- def wait(self, loop=None):
- """
- Wait for child process to terminate. Set and return returncode attribute.
-
- @return: returncode
- @rtype: asyncio.Future (or compatible)
- """
- loop = asyncio._wrap_loop(loop or self._loop)
- waiter = loop.create_future()
- if self.returncode is None:
- self._waiters.append(waiter)
- waiter.add_done_callback(self._waiter_cancel)
- else:
- waiter.set_result(self.returncode)
- return waiter
-
- def _waiter_cancel(self, waiter):
- if waiter.cancelled():
- try:
- self._waiters.remove(waiter)
- except ValueError:
- pass
-
- def _proc_exit(self, pid, returncode):
- self._proc.returncode = returncode
- waiters = self._waiters
- self._waiters = []
- for waiter in waiters:
- waiter.set_result(returncode)
diff --git a/lib/portage/util/futures/_asyncio/streams.py b/lib/portage/util/futures/_asyncio/streams.py
index ea5882dd3..7a8d4a3e0 100644
--- a/lib/portage/util/futures/_asyncio/streams.py
+++ b/lib/portage/util/futures/_asyncio/streams.py
@@ -1,4 +1,4 @@
-# Copyright 2018-2020 Gentoo Authors
+# Copyright 2018-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import errno
@@ -9,7 +9,6 @@ portage.proxy.lazyimport.lazyimport(globals(),
'_emerge.PipeReader:PipeReader',
'portage.util.futures:asyncio',
)
-from portage.util.futures.compat_coroutine import coroutine
def _reader(input_file, loop=None):
@@ -55,8 +54,7 @@ class _Reader:
self._pipe_reader = None
-@coroutine
-def _writer(output_file, content, loop=None):
+async def _writer(output_file, content, loop=DeprecationWarning):
"""
Asynchronously write bytes to output file. The output file is
assumed to be in non-blocking mode. If an EnvironmentError
@@ -68,10 +66,9 @@ def _writer(output_file, content, loop=None):
@type output_file: file object
@param content: content to write
@type content: bytes
- @param loop: asyncio.AbstractEventLoop (or compatible)
- @type loop: event loop
+ @param loop: deprecated
"""
- loop = asyncio._wrap_loop(loop)
+ loop = asyncio.get_event_loop()
fd = output_file.fileno()
while content:
try:
@@ -82,7 +79,7 @@ def _writer(output_file, content, loop=None):
waiter = loop.create_future()
loop.add_writer(fd, lambda: waiter.done() or waiter.set_result(None))
try:
- yield waiter
+ await waiter
finally:
# The loop and output file may have been closed.
if not loop.is_closed():
diff --git a/lib/portage/util/futures/_asyncio/tasks.py b/lib/portage/util/futures/_asyncio/tasks.py
deleted file mode 100644
index c9db3146e..000000000
--- a/lib/portage/util/futures/_asyncio/tasks.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# Copyright 2018-2020 Gentoo Authors
-# Distributed under the terms of the GNU General Public License v2
-
-___all___ = (
- 'ALL_COMPLETED',
- 'FIRST_COMPLETED',
- 'FIRST_EXCEPTION',
- 'wait',
-)
-
-from asyncio import ALL_COMPLETED, FIRST_COMPLETED, FIRST_EXCEPTION
-
-import portage
-portage.proxy.lazyimport.lazyimport(globals(),
- 'portage.util.futures:asyncio',
-)
-
-def wait(futures, loop=None, timeout=None, return_when=ALL_COMPLETED):
- """
- Use portage's internal EventLoop to emulate asyncio.wait:
- https://docs.python.org/3/library/asyncio-task.html#asyncio.wait
-
- @param futures: futures to wait for
- @type futures: asyncio.Future (or compatible)
- @param timeout: number of seconds to wait (wait indefinitely if
- not specified)
- @type timeout: int or float
- @param return_when: indicates when this function should return, must
- be one of the constants ALL_COMPLETED, FIRST_COMPLETED, or
- FIRST_EXCEPTION (default is ALL_COMPLETED)
- @type return_when: object
- @param loop: event loop
- @type loop: EventLoop
- @return: tuple of (done, pending).
- @rtype: asyncio.Future (or compatible)
- """
- loop = asyncio._wrap_loop(loop)
- result_future = loop.create_future()
- _Waiter(futures, timeout, return_when, result_future, loop)
- return result_future
-
-
-class _Waiter:
- def __init__(self, futures, timeout, return_when, result_future, loop):
- self._futures = futures
- self._completed = set()
- self._exceptions = set()
- self._return_when = return_when
- self._result_future = result_future
- self._loop = loop
- self._ready = False
- self._timeout = None
- result_future.add_done_callback(self._cancel_callback)
- for future in self._futures:
- future.add_done_callback(self._done_callback)
- if timeout is not None:
- self._timeout = loop.call_later(timeout, self._timeout_callback)
-
- def _cancel_callback(self, future):
- if future.cancelled():
- self._ready_callback()
-
- def _timeout_callback(self):
- if not self._ready:
- self._ready = True
- self._ready_callback()
-
- def _done_callback(self, future):
- if future.cancelled() or future.exception() is None:
- self._completed.add(id(future))
- else:
- self._exceptions.add(id(future))
- if not self._ready and (
- (self._return_when is FIRST_COMPLETED and self._completed) or
- (self._return_when is FIRST_EXCEPTION and self._exceptions) or
- (len(self._futures) == len(self._completed) + len(self._exceptions))):
- self._ready = True
- # use call_soon in case multiple callbacks complete in quick succession
- self._loop.call_soon(self._ready_callback)
-
- def _ready_callback(self):
- if self._timeout is not None:
- self._timeout.cancel()
- self._timeout = None
- if self._result_future.cancelled():
- return
- done = []
- pending = []
- done_ids = self._completed.union(self._exceptions)
- for future in self._futures:
- if id(future) in done_ids:
- done.append(future)
- else:
- pending.append(future)
- future.remove_done_callback(self._done_callback)
- self._result_future.set_result((set(done), set(pending)))
diff --git a/lib/portage/util/futures/events.py b/lib/portage/util/futures/events.py
deleted file mode 100644
index 85032fcdf..000000000
--- a/lib/portage/util/futures/events.py
+++ /dev/null
@@ -1,186 +0,0 @@
-# Copyright 2018 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-__all__ = (
- 'AbstractEventLoopPolicy',
- 'AbstractEventLoop',
-)
-
-import socket
-import subprocess
-
-from asyncio.events import (
- AbstractEventLoop as _AbstractEventLoop,
- AbstractEventLoopPolicy as _AbstractEventLoopPolicy,
-)
-
-
-class AbstractEventLoopPolicy(_AbstractEventLoopPolicy):
- """Abstract policy for accessing the event loop."""
-
- def get_event_loop(self):
- raise NotImplementedError
-
- def set_event_loop(self, loop):
- raise NotImplementedError
-
- def new_event_loop(self):
- raise NotImplementedError
-
- def get_child_watcher(self):
- raise NotImplementedError
-
- def set_child_watcher(self, watcher):
- raise NotImplementedError
-
-
-class AbstractEventLoop(_AbstractEventLoop):
- """Abstract event loop."""
-
- def run_forever(self):
- raise NotImplementedError
-
- def run_until_complete(self, future):
- raise NotImplementedError
-
- def stop(self):
- raise NotImplementedError
-
- def is_running(self):
- raise NotImplementedError
-
- def is_closed(self):
- raise NotImplementedError
-
- def close(self):
- raise NotImplementedError
-
- def shutdown_asyncgens(self):
- raise NotImplementedError
-
- def _timer_handle_cancelled(self, handle):
- raise NotImplementedError
-
- def call_soon(self, callback, *args):
- return self.call_later(0, callback, *args)
-
- def call_later(self, delay, callback, *args):
- raise NotImplementedError
-
- def call_at(self, when, callback, *args):
- raise NotImplementedError
-
- def time(self):
- raise NotImplementedError
-
- def create_future(self):
- raise NotImplementedError
-
- def create_task(self, coro):
- raise NotImplementedError
-
- def call_soon_threadsafe(self, callback, *args):
- raise NotImplementedError
-
- def run_in_executor(self, executor, func, *args):
- raise NotImplementedError
-
- def set_default_executor(self, executor):
- raise NotImplementedError
-
- def getaddrinfo(self, host, port, family=0, type=0, proto=0, flags=0): # pylint: disable=redefined-builtin
- raise NotImplementedError
-
- def getnameinfo(self, sockaddr, flags=0):
- raise NotImplementedError
-
- def create_connection(self, protocol_factory, host=None, port=None,
- ssl=None, family=0, proto=0, flags=0, sock=None,
- local_addr=None, server_hostname=None):
- raise NotImplementedError
-
- def create_server(self, protocol_factory, host=None, port=None,
- family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE,
- sock=None, backlog=100, ssl=None, reuse_address=None,
- reuse_port=None):
- raise NotImplementedError
-
- def create_unix_connection(self, protocol_factory, path,
- ssl=None, sock=None,
- server_hostname=None):
- raise NotImplementedError
-
- def create_unix_server(self, protocol_factory, path,
- sock=None, backlog=100, ssl=None):
- raise NotImplementedError
-
- def create_datagram_endpoint(self, protocol_factory,
- local_addr=None, remote_addr=None,
- family=0, proto=0, flags=0,
- reuse_address=None, reuse_port=None,
- allow_broadcast=None, sock=None):
- raise NotImplementedError
-
- def connect_read_pipe(self, protocol_factory, pipe):
- raise NotImplementedError
-
- def connect_write_pipe(self, protocol_factory, pipe):
- raise NotImplementedError
-
- def subprocess_shell(self, protocol_factory, cmd, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE,
- **kwargs):
- raise NotImplementedError
-
- def subprocess_exec(self, protocol_factory, *args, **kwargs):
- for k in ('stdin', 'stdout', 'stderr'):
- kwargs.setdefault(k, subprocess.PIPE)
- raise NotImplementedError
-
- def add_writer(self, fd, callback, *args):
- raise NotImplementedError
-
- def remove_writer(self, fd):
- raise NotImplementedError
-
- def sock_recv(self, sock, nbytes):
- raise NotImplementedError
-
- def sock_sendall(self, sock, data):
- raise NotImplementedError
-
- def sock_connect(self, sock, address):
- raise NotImplementedError
-
- def sock_accept(self, sock):
- raise NotImplementedError
-
- def add_signal_handler(self, sig, callback, *args):
- raise NotImplementedError
-
- def remove_signal_handler(self, sig):
- raise NotImplementedError
-
- def set_task_factory(self, factory):
- raise NotImplementedError
-
- def get_task_factory(self):
- raise NotImplementedError
-
- def get_exception_handler(self):
- raise NotImplementedError
-
- def set_exception_handler(self, handler):
- raise NotImplementedError
-
- def default_exception_handler(self, context):
- raise NotImplementedError
-
- def call_exception_handler(self, context):
- raise NotImplementedError
-
- def get_debug(self):
- raise NotImplementedError
-
- def set_debug(self, enabled):
- raise NotImplementedError
diff --git a/lib/portage/util/futures/futures.py b/lib/portage/util/futures/futures.py
index 839c767a7..3f239890a 100644
--- a/lib/portage/util/futures/futures.py
+++ b/lib/portage/util/futures/futures.py
@@ -1,4 +1,4 @@
-# Copyright 2016-2018 Gentoo Foundation
+# Copyright 2016-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
#
# For compatibility with python versions which do not have the
@@ -19,157 +19,3 @@ from asyncio import (
InvalidStateError,
TimeoutError,
)
-# pylint: enable=redefined-builtin
-
-import portage
-portage.proxy.lazyimport.lazyimport(globals(),
- 'portage.util._eventloop.global_event_loop:global_event_loop@_global_event_loop',
-)
-
-_PENDING = 'PENDING'
-_CANCELLED = 'CANCELLED'
-_FINISHED = 'FINISHED'
-
-class _EventLoopFuture:
- """
- This class provides (a subset of) the asyncio.Future interface, for
- use with the EventLoop class, because EventLoop is currently
- missing some of the asyncio.AbstractEventLoop methods that
- asyncio.Future requires.
- """
-
- # Class variables serving as defaults for instance variables.
- _state = _PENDING
- _result = None
- _exception = None
- _loop = None
-
- def __init__(self, loop=None):
- """Initialize the future.
-
- The optional loop argument allows explicitly setting the event
- loop object used by the future. If it's not provided, the future uses
- the default event loop.
- """
- if loop is None:
- self._loop = _global_event_loop()
- else:
- self._loop = loop
- self._callbacks = []
-
- def cancel(self):
- """Cancel the future and schedule callbacks.
-
- If the future is already done or cancelled, return False. Otherwise,
- change the future's state to cancelled, schedule the callbacks and
- return True.
- """
- if self._state != _PENDING:
- return False
- self._state = _CANCELLED
- self._schedule_callbacks()
- return True
-
- def _schedule_callbacks(self):
- """Internal: Ask the event loop to call all callbacks.
-
- The callbacks are scheduled to be called as soon as possible. Also
- clears the callback list.
- """
- callbacks = self._callbacks[:]
- if not callbacks:
- return
-
- self._callbacks[:] = []
- for callback in callbacks:
- self._loop.call_soon(callback, self)
-
- def cancelled(self):
- """Return True if the future was cancelled."""
- return self._state == _CANCELLED
-
- def done(self):
- """Return True if the future is done.
-
- Done means either that a result / exception are available, or that the
- future was cancelled.
- """
- return self._state != _PENDING
-
- def result(self):
- """Return the result this future represents.
-
- If the future has been cancelled, raises CancelledError. If the
- future's result isn't yet available, raises InvalidStateError. If
- the future is done and has an exception set, this exception is raised.
- """
- if self._state == _CANCELLED:
- raise CancelledError()
- if self._state != _FINISHED:
- raise InvalidStateError('Result is not ready.')
- if self._exception is not None:
- raise self._exception
- return self._result
-
- def exception(self):
- """Return the exception that was set on this future.
-
- The exception (or None if no exception was set) is returned only if
- the future is done. If the future has been cancelled, raises
- CancelledError. If the future isn't done yet, raises
- InvalidStateError.
- """
- if self._state == _CANCELLED:
- raise CancelledError
- if self._state != _FINISHED:
- raise InvalidStateError('Exception is not set.')
- return self._exception
-
- def add_done_callback(self, fn):
- """Add a callback to be run when the future becomes done.
-
- The callback is called with a single argument - the future object. If
- the future is already done when this is called, the callback is
- scheduled with call_soon.
- """
- if self._state != _PENDING:
- self._loop.call_soon(fn, self)
- else:
- self._callbacks.append(fn)
-
- def remove_done_callback(self, fn):
- """Remove all instances of a callback from the "call when done" list.
-
- Returns the number of callbacks removed.
- """
- filtered_callbacks = [f for f in self._callbacks if f != fn]
- removed_count = len(self._callbacks) - len(filtered_callbacks)
- if removed_count:
- self._callbacks[:] = filtered_callbacks
- return removed_count
-
- def set_result(self, result):
- """Mark the future done and set its result.
-
- If the future is already done when this method is called, raises
- InvalidStateError.
- """
- if self._state != _PENDING:
- raise InvalidStateError('{}: {!r}'.format(self._state, self))
- self._result = result
- self._state = _FINISHED
- self._schedule_callbacks()
-
- def set_exception(self, exception):
- """Mark the future done and set an exception.
-
- If the future is already done when this method is called, raises
- InvalidStateError.
- """
- if self._state != _PENDING:
- raise InvalidStateError('{}: {!r}'.format(self._state, self))
- if isinstance(exception, type):
- exception = exception()
- self._exception = exception
- self._state = _FINISHED
- self._schedule_callbacks()
diff --git a/lib/portage/util/futures/transports.py b/lib/portage/util/futures/transports.py
deleted file mode 100644
index 016ecbef8..000000000
--- a/lib/portage/util/futures/transports.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# Copyright 2018 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from asyncio.transports import Transport as _Transport
-
-
-class _FlowControlMixin(_Transport):
- """
- This is identical to the standard library's private
- asyncio.transports._FlowControlMixin class.
-
- All the logic for (write) flow control in a mix-in base class.
-
- The subclass must implement get_write_buffer_size(). It must call
- _maybe_pause_protocol() whenever the write buffer size increases,
- and _maybe_resume_protocol() whenever it decreases. It may also
- override set_write_buffer_limits() (e.g. to specify different
- defaults).
-
- The subclass constructor must call super().__init__(extra). This
- will call set_write_buffer_limits().
-
- The user may call set_write_buffer_limits() and
- get_write_buffer_size(), and their protocol's pause_writing() and
- resume_writing() may be called.
- """
-
- def __init__(self, extra=None, loop=None):
- super().__init__(extra)
- assert loop is not None
- self._loop = loop
- self._protocol_paused = False
- self._set_write_buffer_limits()
-
- def _maybe_pause_protocol(self):
- size = self.get_write_buffer_size()
- if size <= self._high_water:
- return
- if not self._protocol_paused:
- self._protocol_paused = True
- try:
- self._protocol.pause_writing()
- except Exception as exc:
- self._loop.call_exception_handler({
- 'message': 'protocol.pause_writing() failed',
- 'exception': exc,
- 'transport': self,
- 'protocol': self._protocol,
- })
-
- def _maybe_resume_protocol(self):
- if (self._protocol_paused and
- self.get_write_buffer_size() <= self._low_water):
- self._protocol_paused = False
- try:
- self._protocol.resume_writing()
- except Exception as exc:
- self._loop.call_exception_handler({
- 'message': 'protocol.resume_writing() failed',
- 'exception': exc,
- 'transport': self,
- 'protocol': self._protocol,
- })
-
- def get_write_buffer_limits(self):
- return (self._low_water, self._high_water)
-
- def _set_write_buffer_limits(self, high=None, low=None):
- if high is None:
- if low is None:
- high = 64*1024
- else:
- high = 4*low
- if low is None:
- low = high // 4
- if not high >= low >= 0:
- raise ValueError('high (%r) must be >= low (%r) must be >= 0' %
- (high, low))
- self._high_water = high
- self._low_water = low
-
- def set_write_buffer_limits(self, high=None, low=None):
- self._set_write_buffer_limits(high=high, low=low)
- self._maybe_pause_protocol()
-
- def get_write_buffer_size(self):
- raise NotImplementedError
diff --git a/lib/portage/util/futures/unix_events.py b/lib/portage/util/futures/unix_events.py
index 16a9e12b7..4feee0a3b 100644
--- a/lib/portage/util/futures/unix_events.py
+++ b/lib/portage/util/futures/unix_events.py
@@ -1,4 +1,4 @@
-# Copyright 2018 Gentoo Foundation
+# Copyright 2018-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
__all__ = (
@@ -7,239 +7,15 @@ __all__ = (
)
import asyncio as _real_asyncio
-from asyncio.base_subprocess import BaseSubprocessTransport as _BaseSubprocessTransport
-from asyncio.unix_events import AbstractChildWatcher as _AbstractChildWatcher
-from asyncio.transports import (
- ReadTransport as _ReadTransport,
- WriteTransport as _WriteTransport,
-)
+from asyncio import events
+from asyncio.unix_events import AbstractChildWatcher
-import errno
import fcntl
-import functools
-import logging
import os
-import socket
-import stat
-import subprocess
-import sys
from portage.util._eventloop.global_event_loop import (
global_event_loop as _global_event_loop,
)
-from portage.util.futures import (
- asyncio,
- events,
-)
-
-from portage.util.futures.transports import _FlowControlMixin
-
-
-class _PortageEventLoop(events.AbstractEventLoop):
- """
- Implementation of asyncio.AbstractEventLoop which wraps portage's
- internal event loop.
- """
-
- def __init__(self, loop):
- """
- @type loop: EventLoop
- @param loop: an instance of portage's internal event loop
- """
- self._loop = loop
- self.run_until_complete = loop.run_until_complete
- self.call_soon = loop.call_soon
- self.call_soon_threadsafe = loop.call_soon_threadsafe
- self.call_later = loop.call_later
- self.call_at = loop.call_at
- self.is_running = loop.is_running
- self.is_closed = loop.is_closed
- self.close = loop.close
- self.create_future = loop.create_future
- self.add_reader = loop.add_reader
- self.remove_reader = loop.remove_reader
- self.add_writer = loop.add_writer
- self.remove_writer = loop.remove_writer
- self.run_in_executor = loop.run_in_executor
- self.time = loop.time
- self.default_exception_handler = loop.default_exception_handler
- self.call_exception_handler = loop.call_exception_handler
- self.set_debug = loop.set_debug
- self.get_debug = loop.get_debug
-
- @property
- def _asyncio_child_watcher(self):
- """
- In order to avoid accessing the internal _loop attribute, portage
- internals should use this property when possible.
-
- @rtype: asyncio.AbstractChildWatcher
- @return: the internal event loop's AbstractChildWatcher interface
- """
- return self._loop._asyncio_child_watcher
-
- @property
- def _asyncio_wrapper(self):
- """
- In order to avoid accessing the internal _loop attribute, portage
- internals should use this property when possible.
-
- @rtype: asyncio.AbstractEventLoop
- @return: the internal event loop's AbstractEventLoop interface
- """
- return self
-
- def create_task(self, coro):
- """
- Schedule a coroutine object.
-
- @type coro: coroutine
- @param coro: a coroutine to schedule
- @rtype: asyncio.Task
- @return: a task object
- """
- return asyncio.Task(coro, loop=self)
-
- def connect_read_pipe(self, protocol_factory, pipe):
- """
- Register read pipe in event loop. Set the pipe to non-blocking mode.
-
- @type protocol_factory: callable
- @param protocol_factory: must instantiate object with Protocol interface
- @type pipe: file
- @param pipe: a pipe to read from
- @rtype: asyncio.Future
- @return: Return pair (transport, protocol), where transport supports the
- ReadTransport interface.
- """
- protocol = protocol_factory()
- result = self.create_future()
- waiter = self.create_future()
- transport = self._make_read_pipe_transport(pipe, protocol, waiter=waiter)
-
- def waiter_callback(waiter):
- try:
- waiter.result()
- except Exception as e:
- transport.close()
- result.set_exception(e)
- else:
- result.set_result((transport, protocol))
-
- waiter.add_done_callback(waiter_callback)
- return result
-
- def connect_write_pipe(self, protocol_factory, pipe):
- """
- Register write pipe in event loop. Set the pipe to non-blocking mode.
-
- @type protocol_factory: callable
- @param protocol_factory: must instantiate object with Protocol interface
- @type pipe: file
- @param pipe: a pipe to write to
- @rtype: asyncio.Future
- @return: Return pair (transport, protocol), where transport supports the
- WriteTransport interface.
- """
- protocol = protocol_factory()
- result = self.create_future()
- waiter = self.create_future()
- transport = self._make_write_pipe_transport(pipe, protocol, waiter)
-
- def waiter_callback(waiter):
- try:
- waiter.result()
- except Exception as e:
- transport.close()
- result.set_exception(e)
- else:
- result.set_result((transport, protocol))
-
- waiter.add_done_callback(waiter_callback)
- return result
-
- def subprocess_exec(self, protocol_factory, program, *args, **kwargs):
- """
- Run subprocesses asynchronously using the subprocess module.
-
- @type protocol_factory: callable
- @param protocol_factory: must instantiate a subclass of the
- asyncio.SubprocessProtocol class
- @type program: str or bytes
- @param program: the program to execute
- @type args: str or bytes
- @param args: program's arguments
- @type kwargs: varies
- @param kwargs: subprocess.Popen parameters
- @rtype: asyncio.Future
- @return: Returns a pair of (transport, protocol), where transport
- is an instance of BaseSubprocessTransport
- """
-
- # python2.7 does not allow arguments with defaults after *args
- stdin = kwargs.pop('stdin', subprocess.PIPE)
- stdout = kwargs.pop('stdout', subprocess.PIPE)
- stderr = kwargs.pop('stderr', subprocess.PIPE)
-
- universal_newlines = kwargs.pop('universal_newlines', False)
- shell = kwargs.pop('shell', False)
- bufsize = kwargs.pop('bufsize', 0)
-
- if universal_newlines:
- raise ValueError("universal_newlines must be False")
- if shell:
- raise ValueError("shell must be False")
- if bufsize != 0:
- raise ValueError("bufsize must be 0")
- popen_args = (program,) + args
- for arg in popen_args:
- if not isinstance(arg, (str, bytes)):
- raise TypeError("program arguments must be "
- "a bytes or text string, not %s"
- % type(arg).__name__)
- result = self.create_future()
- self._make_subprocess_transport(
- result, protocol_factory(), popen_args, False, stdin, stdout, stderr,
- bufsize, **kwargs)
- return result
-
- def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
- extra=None):
- return _UnixReadPipeTransport(self, pipe, protocol, waiter, extra)
-
- def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
- extra=None):
- return _UnixWritePipeTransport(self, pipe, protocol, waiter, extra)
-
- def _make_subprocess_transport(self, result, protocol, args, shell,
- stdin, stdout, stderr, bufsize, extra=None, **kwargs):
- waiter = self.create_future()
- transp = _UnixSubprocessTransport(self,
- protocol, args, shell, stdin, stdout, stderr, bufsize,
- waiter=waiter, extra=extra,
- **kwargs)
-
- self._loop._asyncio_child_watcher.add_child_handler(
- transp.get_pid(), self._child_watcher_callback, transp)
-
- waiter.add_done_callback(functools.partial(
- self._subprocess_transport_callback, transp, protocol, result))
-
- def _subprocess_transport_callback(self, transp, protocol, result, waiter):
- if waiter.exception() is None:
- result.set_result((transp, protocol))
- else:
- transp.close()
- wait_transp = asyncio.ensure_future(transp._wait(), loop=self)
- wait_transp.add_done_callback(
- functools.partial(self._subprocess_transport_failure,
- result, waiter.exception()))
-
- def _child_watcher_callback(self, pid, returncode, transp):
- self.call_soon_threadsafe(transp._process_exited, returncode)
-
- def _subprocess_transport_failure(self, result, exception, wait_transp):
- result.set_exception(wait_transp.exception() or exception)
if hasattr(os, 'set_blocking'):
@@ -252,402 +28,6 @@ else:
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
-class _UnixReadPipeTransport(_ReadTransport):
- """
- This is identical to the standard library's private
- asyncio.unix_events._UnixReadPipeTransport class, except that it
- only calls public AbstractEventLoop methods.
- """
-
- max_size = 256 * 1024 # max bytes we read in one event loop iteration
-
- def __init__(self, loop, pipe, protocol, waiter=None, extra=None):
- super().__init__(extra)
- self._extra['pipe'] = pipe
- self._loop = loop
- self._pipe = pipe
- self._fileno = pipe.fileno()
- self._protocol = protocol
- self._closing = False
-
- mode = os.fstat(self._fileno).st_mode
- if not (stat.S_ISFIFO(mode) or
- stat.S_ISSOCK(mode) or
- stat.S_ISCHR(mode)):
- self._pipe = None
- self._fileno = None
- self._protocol = None
- raise ValueError("Pipe transport is for pipes/sockets only.")
-
- _set_nonblocking(self._fileno)
-
- self._loop.call_soon(self._protocol.connection_made, self)
- # only start reading when connection_made() has been called
- self._loop.call_soon(self._loop.add_reader,
- self._fileno, self._read_ready)
- if waiter is not None:
- # only wake up the waiter when connection_made() has been called
- self._loop.call_soon(
- lambda: None if waiter.cancelled() else waiter.set_result(None))
-
- def _read_ready(self):
- try:
- data = os.read(self._fileno, self.max_size)
- except (BlockingIOError, InterruptedError):
- pass
- except OSError as exc:
- self._fatal_error(exc, 'Fatal read error on pipe transport')
- else:
- if data:
- self._protocol.data_received(data)
- else:
- self._closing = True
- self._loop.remove_reader(self._fileno)
- self._loop.call_soon(self._protocol.eof_received)
- self._loop.call_soon(self._call_connection_lost, None)
-
- def pause_reading(self):
- self._loop.remove_reader(self._fileno)
-
- def resume_reading(self):
- self._loop.add_reader(self._fileno, self._read_ready)
-
- def set_protocol(self, protocol):
- self._protocol = protocol
-
- def get_protocol(self):
- return self._protocol
-
- def is_closing(self):
- return self._closing
-
- def close(self):
- if not self._closing:
- self._close(None)
-
- def _fatal_error(self, exc, message='Fatal error on pipe transport'):
- # should be called by exception handler only
- if (isinstance(exc, OSError) and exc.errno == errno.EIO):
- if self._loop.get_debug():
- logging.debug("%r: %s", self, message, exc_info=True)
- else:
- self._loop.call_exception_handler({
- 'message': message,
- 'exception': exc,
- 'transport': self,
- 'protocol': self._protocol,
- })
- self._close(exc)
-
- def _close(self, exc):
- self._closing = True
- self._loop.remove_reader(self._fileno)
- self._loop.call_soon(self._call_connection_lost, exc)
-
- def _call_connection_lost(self, exc):
- try:
- self._protocol.connection_lost(exc)
- finally:
- self._pipe.close()
- self._pipe = None
- self._protocol = None
- self._loop = None
-
-
-class _UnixWritePipeTransport(_FlowControlMixin, _WriteTransport):
- """
- This is identical to the standard library's private
- asyncio.unix_events._UnixWritePipeTransport class, except that it
- only calls public AbstractEventLoop methods.
- """
-
- def __init__(self, loop, pipe, protocol, waiter=None, extra=None):
- super().__init__(extra, loop)
- self._extra['pipe'] = pipe
- self._pipe = pipe
- self._fileno = pipe.fileno()
- self._protocol = protocol
- self._buffer = bytearray()
- self._conn_lost = 0
- self._closing = False # Set when close() or write_eof() called.
-
- mode = os.fstat(self._fileno).st_mode
- is_char = stat.S_ISCHR(mode)
- is_fifo = stat.S_ISFIFO(mode)
- is_socket = stat.S_ISSOCK(mode)
- if not (is_char or is_fifo or is_socket):
- self._pipe = None
- self._fileno = None
- self._protocol = None
- raise ValueError("Pipe transport is only for "
- "pipes, sockets and character devices")
-
- _set_nonblocking(self._fileno)
- self._loop.call_soon(self._protocol.connection_made, self)
-
- # On AIX, the reader trick (to be notified when the read end of the
- # socket is closed) only works for sockets. On other platforms it
- # works for pipes and sockets. (Exception: OS X 10.4? Issue #19294.)
- if is_socket or (is_fifo and not sys.platform.startswith("aix")):
- # only start reading when connection_made() has been called
- self._loop.call_soon(self._loop.add_reader,
- self._fileno, self._read_ready)
-
- if waiter is not None:
- # only wake up the waiter when connection_made() has been called
- self._loop.call_soon(
- lambda: None if waiter.cancelled() else waiter.set_result(None))
-
- def get_write_buffer_size(self):
- return len(self._buffer)
-
- def _read_ready(self):
- # Pipe was closed by peer.
- if self._loop.get_debug():
- logging.info("%r was closed by peer", self)
- if self._buffer:
- self._close(BrokenPipeError())
- else:
- self._close()
-
- def write(self, data):
- assert isinstance(data, (bytes, bytearray, memoryview)), repr(data)
- if isinstance(data, bytearray):
- data = memoryview(data)
- if not data:
- return
-
- if self._conn_lost or self._closing:
- self._conn_lost += 1
- return
-
- if not self._buffer:
- # Attempt to send it right away first.
- try:
- n = os.write(self._fileno, data)
- except (BlockingIOError, InterruptedError):
- n = 0
- except Exception as exc:
- self._conn_lost += 1
- self._fatal_error(exc, 'Fatal write error on pipe transport')
- return
- if n == len(data):
- return
- if n > 0:
- data = memoryview(data)[n:]
- self._loop.add_writer(self._fileno, self._write_ready)
-
- self._buffer += data
- self._maybe_pause_protocol()
-
- def _write_ready(self):
- assert self._buffer, 'Data should not be empty'
-
- try:
- n = os.write(self._fileno, self._buffer)
- except (BlockingIOError, InterruptedError):
- pass
- except Exception as exc:
- self._buffer.clear()
- self._conn_lost += 1
- # Remove writer here, _fatal_error() doesn't it
- # because _buffer is empty.
- self._loop.remove_writer(self._fileno)
- self._fatal_error(exc, 'Fatal write error on pipe transport')
- else:
- if n == len(self._buffer):
- self._buffer.clear()
- self._loop.remove_writer(self._fileno)
- self._maybe_resume_protocol() # May append to buffer.
- if self._closing:
- self._loop.remove_reader(self._fileno)
- self._call_connection_lost(None)
- return
- if n > 0:
- del self._buffer[:n]
-
- def can_write_eof(self):
- return True
-
- def write_eof(self):
- if self._closing:
- return
- assert self._pipe
- self._closing = True
- if not self._buffer:
- self._loop.remove_reader(self._fileno)
- self._loop.call_soon(self._call_connection_lost, None)
-
- def set_protocol(self, protocol):
- self._protocol = protocol
-
- def get_protocol(self):
- return self._protocol
-
- def is_closing(self):
- return self._closing
-
- def close(self):
- if self._pipe is not None and not self._closing:
- # write_eof is all what we needed to close the write pipe
- self.write_eof()
-
- def abort(self):
- self._close(None)
-
- def _fatal_error(self, exc, message='Fatal error on pipe transport'):
- # should be called by exception handler only
- if isinstance(exc,
- (BrokenPipeError, ConnectionResetError, ConnectionAbortedError)):
- if self._loop.get_debug():
- logging.debug("%r: %s", self, message, exc_info=True)
- else:
- self._loop.call_exception_handler({
- 'message': message,
- 'exception': exc,
- 'transport': self,
- 'protocol': self._protocol,
- })
- self._close(exc)
-
- def _close(self, exc=None):
- self._closing = True
- if self._buffer:
- self._loop.remove_writer(self._fileno)
- self._buffer.clear()
- self._loop.remove_reader(self._fileno)
- self._loop.call_soon(self._call_connection_lost, exc)
-
- def _call_connection_lost(self, exc):
- try:
- self._protocol.connection_lost(exc)
- finally:
- self._pipe.close()
- self._pipe = None
- self._protocol = None
- self._loop = None
-
-
-if hasattr(os, 'set_inheritable'):
- # Python 3.4 and newer
- _set_inheritable = os.set_inheritable
-else:
- def _set_inheritable(fd, inheritable):
- cloexec_flag = getattr(fcntl, 'FD_CLOEXEC', 1)
-
- old = fcntl.fcntl(fd, fcntl.F_GETFD)
- if not inheritable:
- fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
- else:
- fcntl.fcntl(fd, fcntl.F_SETFD, old & ~cloexec_flag)
-
-
-class _UnixSubprocessTransport(_BaseSubprocessTransport):
- """
- This is identical to the standard library's private
- asyncio.unix_events._UnixSubprocessTransport class, except that it
- only calls public AbstractEventLoop methods.
- """
- def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
- stdin_w = None
- if stdin == subprocess.PIPE:
- # Use a socket pair for stdin, since not all platforms
- # support selecting read events on the write end of a
- # socket (which we use in order to detect closing of the
- # other end). Notably this is needed on AIX, and works
- # just fine on other platforms.
- stdin, stdin_w = socket.socketpair()
-
- # Mark the write end of the stdin pipe as non-inheritable,
- # needed by close_fds=False on Python 3.3 and older
- # (Python 3.4 implements the PEP 446, socketpair returns
- # non-inheritable sockets)
- _set_inheritable(stdin_w.fileno(), False)
- self._proc = subprocess.Popen(
- args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr,
- universal_newlines=False, bufsize=bufsize, **kwargs)
- if stdin_w is not None:
- stdin.close()
- self._proc.stdin = os.fdopen(stdin_w.detach(), 'wb', bufsize)
-
-
-class AbstractChildWatcher(_AbstractChildWatcher):
- def add_child_handler(self, pid, callback, *args):
- raise NotImplementedError()
-
- def remove_child_handler(self, pid):
- raise NotImplementedError()
-
- def attach_loop(self, loop):
- raise NotImplementedError()
-
- def close(self):
- raise NotImplementedError()
-
- def __enter__(self):
- raise NotImplementedError()
-
- def __exit__(self, a, b, c):
- raise NotImplementedError()
-
-
-class _PortageChildWatcher(_AbstractChildWatcher):
- def __init__(self, loop):
- """
- @type loop: EventLoop
- @param loop: an instance of portage's internal event loop
- """
- self._loop = loop
- self._callbacks = {}
-
- def close(self):
- pass
-
- def __enter__(self):
- return self
-
- def __exit__(self, a, b, c):
- pass
-
- def _child_exit(self, pid, status, data):
- self._callbacks.pop(pid)
- callback, args = data
- callback(pid, self._compute_returncode(status), *args)
-
- def _compute_returncode(self, status):
- if os.WIFSIGNALED(status):
- return -os.WTERMSIG(status)
- if os.WIFEXITED(status):
- return os.WEXITSTATUS(status)
- return status
-
- def add_child_handler(self, pid, callback, *args):
- """
- Register a new child handler.
-
- Arrange for callback(pid, returncode, *args) to be called when
- process 'pid' terminates. Specifying another callback for the same
- process replaces the previous handler.
- """
- source_id = self._callbacks.get(pid)
- if source_id is not None:
- self._loop.source_remove(source_id)
- self._callbacks[pid] = self._loop.child_watch_add(
- pid, self._child_exit, data=(callback, args))
-
- def remove_child_handler(self, pid):
- """
- Removes the handler for process 'pid'.
-
- The function returns True if the handler was successfully removed,
- False if there was nothing to remove.
- """
- source_id = self._callbacks.pop(pid, None)
- if source_id is not None:
- return self._loop.source_remove(source_id)
- return False
-
-
class _PortageEventLoopPolicy(events.AbstractEventLoopPolicy):
"""
Implementation of asyncio.AbstractEventLoopPolicy based on portage's
diff --git a/lib/portage/util/path.py b/lib/portage/util/path.py
index a0b96c7f3..f174bd71f 100644
--- a/lib/portage/util/path.py
+++ b/lib/portage/util/path.py
@@ -1,4 +1,4 @@
-# Copyright 2014 Gentoo Foundation
+# Copyright 2014-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import errno
@@ -45,4 +45,6 @@ def iter_parents(path):
yield path
while path != os.sep:
path = os.path.dirname(path)
+ if not path:
+ break
yield path
diff --git a/lib/portage/util/shelve.py b/lib/portage/util/shelve.py
new file mode 100644
index 000000000..f070ee753
--- /dev/null
+++ b/lib/portage/util/shelve.py
@@ -0,0 +1,58 @@
+# Copyright 2020-2021 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+import pickle
+import shelve
+
+
+def open_shelve(db_file, flag="r"):
+ """
+ The optional flag parameter has the same interpretation as the flag
+ parameter of dbm.open()
+ """
+ try:
+ db = shelve.open(db_file, flag=flag)
+ except ImportError as e:
+ # ImportError has different attributes for python2 vs. python3
+ if getattr(e, "name", None) == "bsddb" or getattr(e, "message", None) in (
+ "No module named bsddb",
+ "No module named _bsddb",
+ ):
+ from bsddb3 import dbshelve
+
+ db = dbshelve.open(db_file)
+ else:
+ raise
+
+ return db
+
+
+def dump(args):
+ src = open_shelve(args.src, flag="r")
+ try:
+ with open(args.dest, "wb") as dest:
+ for key in src:
+ try:
+ value = src[key]
+ except KeyError:
+ logging.exception(key)
+ continue
+ pickle.dump((key, value), dest)
+ finally:
+ src.close()
+
+
+def restore(args):
+ dest = open_shelve(args.dest, flag="c")
+ try:
+ with open(args.src, "rb") as src:
+ while True:
+ try:
+ k, v = pickle.load(src)
+ except EOFError:
+ break
+ else:
+ dest[k] = v
+ finally:
+ dest.close()
diff --git a/lib/portage/versions.py b/lib/portage/versions.py
index d4ab9d199..84809386a 100644
--- a/lib/portage/versions.py
+++ b/lib/portage/versions.py
@@ -375,11 +375,11 @@ def catpkgsplit(mydata, silent=1, eapi=None):
class _pkg_str(str):
"""
- This class represents a cpv. It inherits from str (unicode in python2) and
- has attributes that cache results for use by functions like catpkgsplit and
- cpv_getkey which are called frequently (especially in match_from_list).
- Instances are typically created in dbapi.cp_list() or the Atom contructor,
- and propagate from there. Generally, code that pickles these objects will
+ This class represents a cpv. It inherits from str and has attributes
+ that cache results for use by functions like catpkgsplit and cpv_getkey
+ which are called frequently (especially in match_from_list). Instances
+ are typically created in dbapi.cp_list() or the Atom contructor, and
+ propagate from there. Generally, code that pickles these objects will
manually convert them to a plain unicode object first.
Instances of this class will have missing attributes for metadata that
diff --git a/man/emaint.1 b/man/emaint.1
index d244756e9..682487c74 100644
--- a/man/emaint.1
+++ b/man/emaint.1
@@ -1,4 +1,4 @@
-.TH "EMAINT" "1" "Jan 2017" "Portage VERSION" "Portage"
+.TH "EMAINT" "1" "Feb 2021" "Portage VERSION" "Portage"
.SH NAME
emaint \- performs package management related system health checks and maintenance
.SH SYNOPSIS
@@ -82,6 +82,10 @@ OPTION
.BR \-t \ \fINUM\fR,\ \-\-time \ \fINUM\fR
Changes the minimum age \fINUM\fR (in days) of the logs to be listed or
deleted.
+.SH OPTIONS merges command only
+.TP
+.BR \-y ", " \-\-yes
+Do not prompt for emerge invocations.
.SH OPTIONS sync command only
.TP
.BR \-a ", " \-\-auto
diff --git a/man/emerge.1 b/man/emerge.1
index 8f9416db5..edd5833be 100644
--- a/man/emerge.1
+++ b/man/emerge.1
@@ -1,4 +1,4 @@
-.TH "EMERGE" "1" "Nov 2020" "Portage VERSION" "Portage"
+.TH "EMERGE" "1" "Mar 2021" "Portage VERSION" "Portage"
.SH "NAME"
emerge \- Command\-line interface to the Portage system
.SH "SYNOPSIS"
@@ -155,7 +155,7 @@ unmerge matched packages that have no reverse dependencies. Use
\fB\-\-depclean\fR together with \fB\-\-verbose\fR to show reverse
dependencies.
.TP
-.BR "\-\-deselect [ y | n ]"
+.BR "\-\-deselect [ y | n ]", " \-W
Remove atoms and/or sets from the world file. This action is implied
by uninstall actions, including \fB-\-depclean\fR,
\fB-\-prune\fR and \fB-\-unmerge\fR. Use \fB-\-deselect=n\fR
@@ -356,8 +356,8 @@ intended to be set in the \fBmake.conf\fR(5)
Automatically unmask packages and generate package.use
settings as necessary to satisfy dependencies. This option
is disabled by default, except for portions of behavior
-which are controlled by the \fB\-\-autounmask\-use\fR and
-\fB\-\-autounmask\-license\fR options (\fB\-\-autounmask=n\fR
+which are controlled by the \fB\-\-autounmask\-use\fR
+(\fB\-\-autounmask=n\fR
disables autounmask behavior entirely). If any configuration
changes are required, then they will be displayed
after the merge list and emerge will immediately
@@ -413,14 +413,12 @@ will be created. This leads to unsatisfied dependencies if
no other solution exists.
.TP
.BR "\-\-autounmask\-license < y | n >"
-Allow autounmask package.license changes. This option is enabled by default
-(either \fB\-\-autounmask=n\fR or \fB\-\-autounmask\-license=n\fR disables
-it). The \fBEMERGE_DEFAULT_OPTS\fR variable may be used to
-disable this option by default in \fBmake.conf\fR(5).
+Allow autounmask package.license changes.
.TP
.BR "\-\-autounmask\-use < y | n >"
Allow autounmask package.use changes. This option is enabled by default
-(either \fB\-\-autounmask=n\fR or \fB\-\-autounmask\-use=n\fR disables
+(any of \fB\-\-autounmask=n\fR, \fB\-\-autounmask\-use=n\fR,
+or \fB\-\-binpkg\-respect\-use=y\fR disables
it). The \fBEMERGE_DEFAULT_OPTS\fR variable may be used to
disable this option by default in \fBmake.conf\fR(5).
.TP
@@ -451,7 +449,9 @@ Tells emerge to ignore binary packages if their USE flags
don't match the current configuration. In order to help avoid issues
with resolving inconsistent USE flag settings, this option is
automatically enabled unless the \fB\-\-usepkgonly\fR option
-is enabled.
+is enabled. If \fB\-\-binpkg\-respect\-use\fR is given explicitly,
+then it implies \fB\-\-autounmask\-use=n\fR, because these options
+naturally oppose eachother.
.TP
.BR "\-\-buildpkg [ y | n ]" ", " \-b
Tells emerge to build binary packages for all ebuilds processed in
diff --git a/man/emirrordist.1 b/man/emirrordist.1
index 45108ef8c..d66a1849d 100644
--- a/man/emirrordist.1
+++ b/man/emirrordist.1
@@ -1,4 +1,4 @@
-.TH "EMIRRORDIST" "1" "Dec 2015" "Portage VERSION" "Portage"
+.TH "EMIRRORDIST" "1" "Feb 2021" "Portage VERSION" "Portage"
.SH "NAME"
emirrordist \- a fetch tool for mirroring of package distfiles
.SH SYNOPSIS
@@ -66,6 +66,10 @@ reporting purposes. Opened in append mode.
Log file for scheduled deletions, with tab\-delimited output, for
reporting purposes. Overwritten with each run.
.TP
+\fB\-\-content\-db\fR=\fIFILE\fR
+Database file used to pair content digests with distfiles names
+(required for content\-hash layout).
+.TP
\fB\-\-delete\fR
Enable deletion of unused distfiles.
.TP
diff --git a/man/make.conf.5 b/man/make.conf.5
index 8d325cb81..3ab1048a4 100644
--- a/man/make.conf.5
+++ b/man/make.conf.5
@@ -1,4 +1,4 @@
-.TH "MAKE.CONF" "5" "Jan 2021" "Portage VERSION" "Portage"
+.TH "MAKE.CONF" "5" "Feb 2021" "Portage VERSION" "Portage"
.SH "NAME"
make.conf \- custom settings for Portage
.SH "SYNOPSIS"
@@ -316,9 +316,10 @@ per\-package PATH attribute in the 'Packages' index directs them to
download the file from the correct URI, and they automatically use
BUILD_TIME metadata to select the latest builds.
-There is currently no automated way to prune old builds from PKGDIR,
-although it is possible to remove packages manually, and then run
+The \fBeclean\-pkg\fR command can be used to prune old builds from PKGDIR.
+It is also possible to remove packages manually, and then run
\(aqemaint \-\-fix binhost' to update the ${PKGDIR}/Packages index.
+This feature is enabled by default.
.TP
.B buildpkg
Binary packages will be created for all packages that are merged. Also see
@@ -761,7 +762,7 @@ ___
l l l.
Placeholder Meaning Example
-\\${DIGESTS} Space separated list of file digests blake2b <hexdigest> sha512 <hexdigest>
+\\${DIGESTS} Space separated list of file digests blake2b:<hexdigest> sha512:<hexdigest>
.TE
.RE
.TP
@@ -854,7 +855,7 @@ Defaults to false.
Defines the location where created .tbz2 binary packages will be
stored when the \fBemerge\fR(1) \fB\-\-buildpkg\fR option is enabled.
By default, a given package is stored in a subdirectory corresponding
-to it's category. However, for backward compatibility with the layout
+to its category. However, for backward compatibility with the layout
used by older versions of portage, if the \fI${PKGDIR}/All\fR directory
exists then all packages will be stored inside of it and symlinks to
the packages will be created in the category subdirectories.
@@ -883,7 +884,7 @@ build packages for clients. It defines the URI header field for the package
index file which is located at ${PKGDIR}/Packages. Clients that have
\fBPORTAGE_BINHOST\fR properly configured will be able to fetch the index and
use the URI header field as a base URI for fetching binary packages. If the URI
-header field is not defined then the client will use it's ${PORTAGE_BINHOST}
+header field is not defined then the client will use its ${PORTAGE_BINHOST}
setting as the base URI.
.TP
.B PORTAGE_BINPKG_FORMAT
@@ -994,7 +995,7 @@ Defaults to 0.
.TP
\fBPORTAGE_IONICE_COMMAND\fR = \fI[ionice command string]\fR
This variable should contain a command for portage to call in order
-to adjust the io priority of portage and it's subprocesses. The command
+to adjust the io priority of portage and its subprocesses. The command
string should contain a \\${PID} place-holder that will be substituted
with an integer pid. For example, a value of "ionice \-c 3 \-p \\${PID}"
will set idle io priority. For more information about ionice, see
@@ -1011,7 +1012,7 @@ Logs are created only when this is set. They are stored as
${CATEGORY}:${PF}:YYYYMMDD\-HHMMSS.log in the directory specified. If the
directory does not exist, it will be created automatically and group
permissions will be applied to it. If the directory already exists, portage
-will not modify it's permissions.
+will not modify its permissions.
.TP
.B PORTAGE_LOGDIR_CLEAN
This variable should contain a command for portage to call in order
diff --git a/man/portage.5 b/man/portage.5
index f6ec1b0fa..247ec5ab0 100644
--- a/man/portage.5
+++ b/man/portage.5
@@ -1,4 +1,4 @@
-.TH "PORTAGE" "5" "Sep 2020" "Portage VERSION" "Portage"
+.TH "PORTAGE" "5" "Mar 2021" "Portage VERSION" "Portage"
.SH NAME
portage \- the heart of Gentoo
.SH "DESCRIPTION"
@@ -1466,7 +1466,7 @@ The EAPI to use for profiles when unspecified. This attribute is
supported only if profile-default-eapi is included in profile-formats.
.TP
.BR profile\-formats " = [pms] [portage-1] [portage-2] [profile-bashrcs] \
-[profile-set] [profile-default-eapi] [build-id]"
+[profile-set] [profile-default-eapi] [build-id] [profile-repo-deps]"
Control functionality available to profiles in this repo such as which files
may be dirs, or the syntax available in parent files. Use "portage-2" if you're
unsure. The default is "portage-1-compat" mode which is meant to be compatible
@@ -1481,6 +1481,8 @@ dependency atoms in the profile to refer to specific builds (see the
binpkg\-multi\-instance FEATURES setting in \fBmake.conf\fR(5)). A
build\-id atom is identical to a version-specific atom, except that the
version is followed by a hyphen and an integer build\-id.
+Setting profile\-repo\-deps allows dependency atoms in the profile to
+refer to specific repositories.
.RE
.RE
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 000000000..646e59c96
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,6 @@
+[build-system]
+requires = [
+ "setuptools",
+ "wheel",
+]
+build-backend = "setuptools.build_meta"
diff --git a/repoman/RELEASE-NOTES b/repoman/RELEASE-NOTES
index a09845b79..df34803cc 100644
--- a/repoman/RELEASE-NOTES
+++ b/repoman/RELEASE-NOTES
@@ -1,6 +1,12 @@
Release Notes; upgrade information mainly.
Features/major bugfixes are listed in NEWS
+repoman-3.0.3
+==================================
+* Bug Fixes:
+ - Bug 608664 variable.phase check like pkgcheck VariableScopeCheck
+ - Bug 692486 Change message for preserve_old_lib
+
repoman-3.0.2
==================================
* Bug Fixes:
diff --git a/repoman/bin/repoman b/repoman/bin/repoman
index 29c630772..c52ab15b7 100755
--- a/repoman/bin/repoman
+++ b/repoman/bin/repoman
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright 1999-2020 Gentoo Authors
+# Copyright 1999-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
"""Ebuild and tree health checks and maintenance utilities.
@@ -41,7 +41,7 @@ from portage.util._eventloop.global_event_loop import global_event_loop
from repoman.main import repoman_main
try:
- sys.exit(repoman_main(sys.argv[1:]))
+ sys.exit(repoman_main(sys.argv))
except IOError as e:
if e.errno == errno.EACCES:
print("\nRepoman: Need user access")
diff --git a/repoman/cnf/linechecks/linechecks.yaml b/repoman/cnf/linechecks/linechecks.yaml
index 2182b467a..c6c72ab26 100644
--- a/repoman/cnf/linechecks/linechecks.yaml
+++ b/repoman/cnf/linechecks/linechecks.yaml
@@ -25,7 +25,7 @@ errors:
DEPRECATED_BINDNOW_FLAGS: 'Deprecated bindnow-flags call'
EAPI_DEFINED_AFTER_INHERIT: 'EAPI defined after inherit'
NO_AS_NEEDED: 'Upstream asneeded linking bug (no-as-needed)'
- PRESERVE_OLD_LIB: 'Ebuild calls deprecated preserve_old_lib'
+ PRESERVE_OLD_LIB: 'Ebuild calls preserve_old_lib function reserved for system packages'
BUILT_WITH_USE: 'built_with_use'
NO_OFFSET_WITH_HELPERS: 'Helper function is used with D, ROOT, ED, EROOT or EPREFIX'
USEQ_ERROR: 'Ebuild calls deprecated useq function'
diff --git a/repoman/cnf/qa_data/qa_data.yaml b/repoman/cnf/qa_data/qa_data.yaml
index 29a3d6e9f..530c8c806 100644
--- a/repoman/cnf/qa_data/qa_data.yaml
+++ b/repoman/cnf/qa_data/qa_data.yaml
@@ -129,6 +129,7 @@ qahelp:
obsolete: "The ebuild makes use of an obsolete construct"
variable:
invalidchar: "A variable contains an invalid character that is not part of the ASCII character set"
+ phase: "Variable referenced found within scope of incorrect ebuild phase as specified by PMS"
readonly: "Assigning a readonly variable"
usedwithhelpers: "Ebuild uses D, ROOT, BROOT, ED, EROOT or EPREFIX with helpers"
virtual:
diff --git a/repoman/cnf/repository/qa_data.yaml b/repoman/cnf/repository/qa_data.yaml
index 3fe6b53d5..2249000c3 100644
--- a/repoman/cnf/repository/qa_data.yaml
+++ b/repoman/cnf/repository/qa_data.yaml
@@ -80,6 +80,7 @@ qawarnings:
- usage.obsolete
- upstream.workaround
- uri.https
+ - variable.phase
- virtual.suspect
- wxwidgets.eclassnotused
diff --git a/repoman/cnf/repository/repository.yaml b/repoman/cnf/repository/repository.yaml
index ad00d18c1..dbc1decaa 100644
--- a/repoman/cnf/repository/repository.yaml
+++ b/repoman/cnf/repository/repository.yaml
@@ -61,6 +61,7 @@ linechecks_modules:
emakeparallel
srccompileeconf
srcunpackpatches
+ pmsvariablerefphasescope
portageinternal
portageinternalvariableassignment
quote
diff --git a/repoman/lib/repoman/actions.py b/repoman/lib/repoman/actions.py
index 0f89572b9..351df07be 100644
--- a/repoman/lib/repoman/actions.py
+++ b/repoman/lib/repoman/actions.py
@@ -307,7 +307,6 @@ the whole commit message to abort.
utilities.repoman_sez(
"\"Make your QA payment on time"
" and you'll never see the likes of me.\"\n")
- sys.exit(1)
def _fail(self, result, can_force):
diff --git a/repoman/lib/repoman/argparser.py b/repoman/lib/repoman/argparser.py
index 6d545ccca..495fdfa60 100644
--- a/repoman/lib/repoman/argparser.py
+++ b/repoman/lib/repoman/argparser.py
@@ -1,12 +1,11 @@
# repoman: Argument parser
-# Copyright 2007-2019 Gentoo Authors
+# Copyright 2007-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
"""This module contains functions used in Repoman to parse CLI arguments."""
import argparse
import logging
-import sys
# import our initialized portage instance
from repoman._portage import portage
@@ -222,7 +221,7 @@ def parse_args(argv, repoman_default_opts):
if not opts.ignore_default_opts:
default_opts = util.shlex_split(repoman_default_opts)
if default_opts:
- opts = parser.parse_args(default_opts + sys.argv[1:])
+ opts = parser.parse_args(default_opts + argv[1:])
args = []
if opts.mode is not None:
diff --git a/repoman/lib/repoman/main.py b/repoman/lib/repoman/main.py
index 50b99c21f..d84abf3ae 100755
--- a/repoman/lib/repoman/main.py
+++ b/repoman/lib/repoman/main.py
@@ -3,6 +3,7 @@
# Copyright 1999-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
+import collections
import io
import logging
import sys
@@ -47,7 +48,26 @@ portage.util.initialize_logger(LOGLEVEL)
VALID_VERSIONS = [1,]
+_repoman_main_vars = collections.namedtuple("_repoman_main_vars", (
+ "can_force",
+ "exitcode",
+ "options",
+ "qadata",
+ "repo_settings",
+ "scanner",
+ "vcs_settings",
+))
+
+
def repoman_main(argv):
+ repoman_vars = _repoman_init(argv)
+ if repoman_vars.exitcode is not None:
+ return repoman_vars.exitcode
+ result = _repoman_scan(*repoman_vars)
+ return _handle_result(*repoman_vars, result)
+
+
+def _repoman_init(argv):
config_root = os.environ.get("PORTAGE_CONFIGROOT")
repoman_settings = portage.config(config_root=config_root, local_config=False)
repoman_settings.valid_versions = VALID_VERSIONS
@@ -58,11 +78,11 @@ def repoman_main(argv):
nocolor()
options, arguments = parse_args(
- sys.argv, repoman_settings.get("REPOMAN_DEFAULT_OPTS", ""))
+ argv, repoman_settings.get("REPOMAN_DEFAULT_OPTS", ""))
if options.version:
print("Repoman", VERSION, "(portage-%s)" % portage.VERSION)
- sys.exit(0)
+ return _repoman_main_vars(None, 0, None, None, None, None, None)
logger = logging.getLogger()
@@ -75,10 +95,15 @@ def repoman_main(argv):
# something other than a QA issue) makes it impossible to
# commit (like if Manifest generation fails).
can_force = ExtendedFuture(True)
+ repo_settings, vcs_settings, scanner, qadata = _create_scanner(options, can_force, config_root, repoman_settings)
+ return _repoman_main_vars(can_force, None, options, qadata, repo_settings, scanner, vcs_settings)
+
+
+def _create_scanner(options, can_force, config_root, repoman_settings):
portdir, portdir_overlay, mydir = utilities.FindPortdir(repoman_settings)
if portdir is None:
- sys.exit(1)
+ return (None, None, None, None)
myreporoot = os.path.basename(portdir_overlay)
myreporoot += mydir[len(portdir_overlay):]
@@ -117,6 +142,10 @@ def repoman_main(argv):
# Perform the main checks
scanner = Scanner(repo_settings, myreporoot, config_root, options,
vcs_settings, mydir, env)
+ return repo_settings, vcs_settings, scanner, qadata
+
+
+def _repoman_scan(can_force, exitcode, options, qadata, repo_settings, scanner, vcs_settings):
scanner.scan_pkgs(can_force)
if options.if_modified == "y" and len(scanner.effective_scanlist) < 1:
@@ -142,6 +171,10 @@ def repoman_main(argv):
(result['warn'] and not (options.quiet or options.mode == "scan")):
result['full'] = 0
+ return result
+
+
+def _handle_result(can_force, exitcode, options, qadata, repo_settings, scanner, vcs_settings, result):
commitmessage = None
if options.commitmsg:
commitmessage = options.commitmsg
@@ -189,5 +222,7 @@ def repoman_main(argv):
if actions.inform(can_force.get(), result):
# perform any other actions
actions.perform(qa_output)
+ elif result['fail']:
+ return 1
- sys.exit(0)
+ return 0
diff --git a/repoman/lib/repoman/modules/linechecks/deprecated/deprecated.py b/repoman/lib/repoman/modules/linechecks/deprecated/deprecated.py
index d1a590f1d..7c65c9ca0 100644
--- a/repoman/lib/repoman/modules/linechecks/deprecated/deprecated.py
+++ b/repoman/lib/repoman/modules/linechecks/deprecated/deprecated.py
@@ -19,7 +19,7 @@ class DeprecatedHasq(LineCheck):
class PreserveOldLib(LineCheck):
- """Check for calls to the deprecated preserve_old_lib function."""
+ """Check for calls to the preserve_old_lib function reserved for system packages."""
repoman_check_name = 'ebuild.minorsyn'
re = re.compile(r'.*preserve_old_lib')
error = 'PRESERVE_OLD_LIB'
diff --git a/repoman/lib/repoman/modules/linechecks/phases/__init__.py b/repoman/lib/repoman/modules/linechecks/phases/__init__.py
index 686c675d2..e166b31a3 100644
--- a/repoman/lib/repoman/modules/linechecks/phases/__init__.py
+++ b/repoman/lib/repoman/modules/linechecks/phases/__init__.py
@@ -29,6 +29,12 @@ module_spec = {
'class': "SrcUnpackPatches",
'description': doc,
},
+ 'pmsvariablerefphasescope-check': {
+ 'name': "pmsvariablerefphasescope",
+ 'sourcefile': "phase",
+ 'class': "PMSVariableReference",
+ 'description': doc,
+ },
},
'version': 1,
}
diff --git a/repoman/lib/repoman/modules/linechecks/phases/phase.py b/repoman/lib/repoman/modules/linechecks/phases/phase.py
index 74cf4608f..433e93601 100644
--- a/repoman/lib/repoman/modules/linechecks/phases/phase.py
+++ b/repoman/lib/repoman/modules/linechecks/phases/phase.py
@@ -1,7 +1,19 @@
+import fnmatch
import re
-
-from portage.eapi import eapi_has_src_prepare_and_src_configure
+import types
+
+from portage.eapi import (
+ eapi_has_broot,
+ eapi_has_sysroot,
+ eapi_has_src_prepare_and_src_configure,
+ eapi_exports_AA,
+ eapi_exports_replace_vars,
+ eapi_exports_ECLASSDIR,
+ eapi_exports_PORTDIR,
+ eapi_supports_prefix,
+ eapi_exports_merge_type,
+)
from repoman.modules.linechecks.base import LineCheck
@@ -9,11 +21,22 @@ class PhaseCheck(LineCheck):
""" basic class for function detection """
func_end_re = re.compile(r'^\}$')
- phases_re = re.compile('(%s)' % '|'.join((
- 'pkg_pretend', 'pkg_setup', 'src_unpack', 'src_prepare',
- 'src_configure', 'src_compile', 'src_test', 'src_install',
- 'pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm',
- 'pkg_config')))
+ phase_funcs = (
+ 'pkg_pretend',
+ 'pkg_setup',
+ 'src_unpack',
+ 'src_prepare',
+ 'src_configure',
+ 'src_compile',
+ 'src_test',
+ 'src_install',
+ 'pkg_preinst',
+ 'pkg_postinst',
+ 'pkg_prerm',
+ 'pkg_postrm',
+ 'pkg_config',
+ )
+ phases_re = re.compile('(%s)' % '|'.join(phase_funcs))
in_phase = ''
def check(self, num, line):
@@ -69,3 +92,98 @@ class SrcUnpackPatches(PhaseCheck):
if m is not None:
return ("'%s'" % m.group(1)) + \
" call should be moved to src_prepare"
+
+# Refererences
+# - https://projects.gentoo.org/pms/7/pms.html#x1-10900011.1
+# - https://pkgcore.github.io/pkgcheck/_modules/pkgcheck/checks/codingstyle.html#VariableScopeCheck
+_pms_vars = (
+ ("A", None, ("src_*", "pkg_nofetch")),
+ ("AA", eapi_exports_AA, ("src_*", "pkg_nofetch")),
+ ("FILESDIR", None, ("src_*",)),
+ ("DISTDIR", None, ("src_*",)),
+ ("WORKDIR", None, ("src_*",)),
+ ("S", None, ("src_*",)),
+ ("PORTDIR", eapi_exports_PORTDIR, ("src_*",)),
+ ("ECLASSDIR", eapi_exports_ECLASSDIR, ("src_*",)),
+ ("ROOT", None, ("pkg_*",)),
+ ("EROOT", eapi_supports_prefix, ("pkg_*",)),
+ ("SYSROOT", eapi_has_sysroot, ("src_*", "pkg_setup")),
+ ("ESYSROOT", eapi_has_sysroot, ("src_*", "pkg_setup")),
+ ("BROOT", eapi_has_broot, ("src_*", "pkg_setup")),
+ ("D", None, ("src_install", "pkg_preinst", "pkg_postint")),
+ ("ED", eapi_supports_prefix, ("src_install", "pkg_preinst", "pkg_postint")),
+ ("DESTTREE", None, ("src_install",)),
+ ("INSDESTTREE", None, ("src_install",)),
+ ("MERGE_TYPE", eapi_exports_merge_type, ("pkg_*",)),
+ ("REPLACING_VERSIONS", eapi_exports_replace_vars, ("pkg_*",)),
+ ("REPLACED_BY_VERSION", eapi_exports_replace_vars, ("pkg_prerm", "pkg_postrm")),
+)
+
+
+def _compile_phases():
+ phase_vars = {}
+ for phase_func in PhaseCheck.phase_funcs:
+ for variable, eapi_filter, allowed_scopes in _pms_vars:
+ allowed = False
+ for scope in allowed_scopes:
+ if fnmatch.fnmatch(phase_func, scope):
+ allowed = True
+ break
+
+ if not allowed:
+ phase_vars.setdefault(phase_func, []).append((variable, eapi_filter))
+
+ phase_info = {}
+ for phase_func, prohibited_vars in phase_vars.items():
+ phase_func_vars = []
+ for variable, eapi_filter in prohibited_vars:
+ phase_func_vars.append(variable)
+ phase_obj = phase_info[phase_func] = types.SimpleNamespace()
+ phase_obj.prohibited_vars = dict(prohibited_vars)
+ phase_obj.var_names = "(%s)" % "|".join(
+ variable for variable, eapi_filter in prohibited_vars
+ )
+ phase_obj.var_reference = re.compile(
+ r"\$(\{|)%s(\}|\W)" % (phase_obj.var_names,)
+ )
+
+ return phase_info
+
+
+class PMSVariableReference(PhaseCheck):
+ """Check phase scope for references to variables specified by PMS"""
+
+ repoman_check_name = "variable.phase"
+ phase_info = _compile_phases()
+
+ def new(self, pkg):
+ self._eapi = pkg.eapi
+
+ def end(self):
+ self._eapi = None
+
+ def phase_check(self, num, line):
+ try:
+ phase_info = self.phase_info[self.in_phase]
+ except KeyError:
+ return
+
+ eapi = self._eapi
+ issues = []
+ for m in phase_info.var_reference.finditer(line):
+ open_brace = m.group(1)
+ var_name = m.group(2)
+ close_brace = m.group(3)
+ # discard \W if matched by (\}|\W)
+ close_brace = close_brace if close_brace == "}" else ""
+ if bool(open_brace) != bool(close_brace):
+ continue
+ var_name = m.group(2)
+ eapi_filter = phase_info.prohibited_vars[var_name]
+ if eapi_filter is not None and not eapi_filter(eapi):
+ continue
+ issues.append(
+ "phase %s: EAPI %s: variable %s: Forbidden reference to variable specified by PMS"
+ % (self.in_phase, eapi, var_name)
+ )
+ return issues
diff --git a/repoman/lib/repoman/modules/scan/module.py b/repoman/lib/repoman/modules/scan/module.py
index 3321cb224..41c1becfc 100644
--- a/repoman/lib/repoman/modules/scan/module.py
+++ b/repoman/lib/repoman/modules/scan/module.py
@@ -31,9 +31,9 @@ class ModuleConfig:
if repository_modules:
self.configpaths = [os.path.join(path, 'repository.yaml') for path in configpaths]
elif _not_installed:
- self.configpaths = [os.path.realpath(os.path.join(os.path.dirname(
+ self.configpaths = [os.path.realpath(os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(
- os.path.dirname(__file__)))))), 'repoman/cnf/repository/repository.yaml'))]
+ os.path.dirname(__file__))))), 'cnf/repository/repository.yaml'))]
else:
self.configpaths = [os.path.join(portage.const.EPREFIX or '/',
'usr/share/repoman/repository/repository.yaml')]
diff --git a/repoman/lib/repoman/repos.py b/repoman/lib/repoman/repos.py
index 31cb82caf..6df984a28 100644
--- a/repoman/lib/repoman/repos.py
+++ b/repoman/lib/repoman/repos.py
@@ -14,6 +14,7 @@ from portage import os
from portage import _encodings
from portage import _unicode_encode
from portage.checksum import get_valid_checksum_keys
+from portage.repository.config import allow_profile_repo_deps
# pylint: disable=ungrouped-imports
from repoman.errors import err
@@ -233,7 +234,9 @@ def repo_metadata(portdb, repoman_settings):
profile_list = []
global_pmasklines = []
- for path in portdb.porttrees:
+ for repo in portdb.repositories:
+ path = repo.location
+
try:
liclist.update(os.listdir(os.path.join(path, "licenses")))
except OSError:
@@ -265,7 +268,8 @@ def repo_metadata(portdb, repoman_settings):
global_pmasklines.append(
portage.util.grabfile_package(
os.path.join(path, 'profiles', 'package.mask'),
- recursive=1, verify_eapi=True))
+ recursive=1, verify_eapi=True,
+ allow_repo=allow_profile_repo_deps(repo)))
desc_path = os.path.join(path, 'profiles', 'profiles.desc')
try:
diff --git a/repoman/lib/repoman/tests/simple/test_simple.py b/repoman/lib/repoman/tests/simple/test_simple.py
index 2448bb117..9ec01df3b 100644
--- a/repoman/lib/repoman/tests/simple/test_simple.py
+++ b/repoman/lib/repoman/tests/simple/test_simple.py
@@ -1,23 +1,91 @@
# Copyright 2011-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
+import collections
import subprocess
import sys
import time
+import types
from repoman._portage import portage
from portage import os
-from portage import _unicode_decode
from portage.process import find_binary
from portage.tests.resolver.ResolverPlayground import ResolverPlayground
from portage.util import ensure_dirs
+from portage.util.futures import asyncio
+from portage.util.futures._asyncio.streams import _reader
+from portage.util._async.AsyncFunction import AsyncFunction
# pylint: disable=ungrouped-imports
from repoman import REPOMAN_BASE_PATH
from repoman.copyrights import update_copyright_year
+from repoman.main import _repoman_init, _repoman_scan, _handle_result
from repoman.tests import TestCase
+class RepomanRun(types.SimpleNamespace):
+ async def run(self):
+ self.expected = getattr(self, "expected", None) or {"returncode": 0}
+ if self.debug:
+ fd_pipes = {}
+ pr = None
+ pw = None
+ else:
+ pr, pw = os.pipe()
+ fd_pipes = {1: pw, 2: pw}
+ pr = open(pr, "rb", 0)
+
+ proc = AsyncFunction(
+ scheduler=asyncio.get_event_loop(),
+ target=self._subprocess,
+ args=(self.args, self.cwd, self.env, self.expected, self.debug),
+ fd_pipes=fd_pipes,
+ )
+
+ proc.start()
+ if pw is not None:
+ os.close(pw)
+
+ await proc.async_wait()
+
+ if pr is None:
+ stdio = None
+ else:
+ stdio = await _reader(pr)
+
+ self.result = {
+ "stdio": stdio,
+ "result": proc.result,
+ }
+
+ @staticmethod
+ def _subprocess(args, cwd, env, expected, debug):
+ os.chdir(cwd)
+ os.environ.update(env)
+ portage.const.EPREFIX = env["PORTAGE_OVERRIDE_EPREFIX"]
+ if debug:
+ args = ["-vvvv"] + args
+ repoman_vars = _repoman_init(["repoman"] + args)
+ if repoman_vars.exitcode is not None:
+ return {"returncode": repoman_vars.exitcode}
+ result = _repoman_scan(*repoman_vars)
+ returncode = _handle_result(*repoman_vars, result)
+ qawarnings = repoman_vars.vcs_settings.qatracker.qawarnings
+ warns = collections.defaultdict(list)
+ fails = collections.defaultdict(list)
+ for qacat, issues in repoman_vars.vcs_settings.qatracker.fails.items():
+ if qacat in qawarnings:
+ warns[qacat].extend(issues)
+ else:
+ fails[qacat].extend(issues)
+ result = {"returncode": returncode}
+ if fails:
+ result["fails"] = fails
+ if warns:
+ result["warns"] = warns
+ return result
+
+
class SimpleRepomanTestCase(TestCase):
def testCopyrightUpdate(self):
@@ -72,11 +140,17 @@ class SimpleRepomanTestCase(TestCase):
self.assertFalse(True, skip_reason)
return
- copyright_header = """# Copyright 1999-%s Gentoo Foundation
+ copyright_header = """# Copyright 1999-%s Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
-# $Header: $
+
""" % time.gmtime().tm_year
+ pkg_preinst_references_forbidden_var = """
+pkg_preinst() {
+ echo "This ${A} reference is not allowed. Neither is this $BROOT reference."
+}
+"""
+
repo_configs = {
"test_repo": {
"layout.conf":
@@ -130,13 +204,14 @@ class SimpleRepomanTestCase(TestCase):
"dev-libs/C-0": {
"COPYRIGHT_HEADER" : copyright_header,
"DESCRIPTION" : "Desc goes here",
- "EAPI" : "4",
+ "EAPI" : "7",
"HOMEPAGE" : "https://example.com",
"IUSE" : "flag",
# must be unstable, since dev-libs/A[flag] is stable masked
"KEYWORDS": "~x86",
"LICENSE": "GPL-2",
"RDEPEND": "flag? ( dev-libs/A[flag] )",
+ "MISC_CONTENT": pkg_preinst_references_forbidden_var,
},
}
licenses = ["GPL-2"]
@@ -169,6 +244,39 @@ class SimpleRepomanTestCase(TestCase):
playground = ResolverPlayground(ebuilds=ebuilds,
profile=profile, repo_configs=repo_configs, debug=debug)
+
+ loop = asyncio._wrap_loop()
+ loop.run_until_complete(
+ asyncio.ensure_future(
+ self._async_test_simple(
+ playground,
+ metadata_xml_files,
+ profiles,
+ profile,
+ licenses,
+ arch_list,
+ use_desc,
+ metadata_xsd,
+ copyright_header,
+ debug,
+ ),
+ loop=loop,
+ )
+ )
+
+ async def _async_test_simple(
+ self,
+ playground,
+ metadata_xml_files,
+ profiles,
+ profile,
+ licenses,
+ arch_list,
+ use_desc,
+ metadata_xsd,
+ copyright_header,
+ debug,
+ ):
settings = playground.settings
eprefix = settings["EPREFIX"]
eroot = settings["EROOT"]
@@ -195,25 +303,35 @@ class SimpleRepomanTestCase(TestCase):
committer_name = "Gentoo Dev"
committer_email = "gentoo-dev@gentoo.org"
+ expected_warnings = {
+ "returncode": 0,
+ "warns": {
+ "variable.phase": [
+ "dev-libs/C/C-0.ebuild: line 15: phase pkg_preinst: EAPI 7: variable A: Forbidden reference to variable specified by PMS",
+ "dev-libs/C/C-0.ebuild: line 15: phase pkg_preinst: EAPI 7: variable BROOT: Forbidden reference to variable specified by PMS",
+ ]
+ },
+ }
git_test = (
- ("", repoman_cmd + ("manifest",)),
+ ("", RepomanRun(args=["--version"])),
+ ("", RepomanRun(args=["manifest"])),
("", git_cmd + ("config", "--global", "user.name", committer_name,)),
("", git_cmd + ("config", "--global", "user.email", committer_email,)),
("", git_cmd + ("init-db",)),
("", git_cmd + ("add", ".")),
("", git_cmd + ("commit", "-a", "-m", "add whole repo")),
- ("", repoman_cmd + ("full", "-d")),
- ("", repoman_cmd + ("full", "--include-profiles", "default/linux/x86/test_profile")),
+ ("", RepomanRun(args=["full", "-d"], expected=expected_warnings)),
+ ("", RepomanRun(args=["full", "--include-profiles", "default/linux/x86/test_profile"], expected=expected_warnings)),
("", cp_cmd + (test_ebuild, test_ebuild[:-8] + "2.ebuild")),
("", git_cmd + ("add", test_ebuild[:-8] + "2.ebuild")),
- ("", repoman_cmd + ("commit", "-m", "cat/pkg: bump to version 2")),
+ ("", RepomanRun(args=["commit", "-m", "cat/pkg: bump to version 2"], expected=expected_warnings)),
("", cp_cmd + (test_ebuild, test_ebuild[:-8] + "3.ebuild")),
("", git_cmd + ("add", test_ebuild[:-8] + "3.ebuild")),
- ("dev-libs", repoman_cmd + ("commit", "-m", "cat/pkg: bump to version 3")),
+ ("dev-libs", RepomanRun(args=["commit", "-m", "cat/pkg: bump to version 3"], expected=expected_warnings)),
("", cp_cmd + (test_ebuild, test_ebuild[:-8] + "4.ebuild")),
("", git_cmd + ("add", test_ebuild[:-8] + "4.ebuild")),
- ("dev-libs/A", repoman_cmd + ("commit", "-m", "cat/pkg: bump to version 4")),
+ ("dev-libs/A", RepomanRun(args=["commit", "-m", "cat/pkg: bump to version 4"])),
)
env = {
@@ -282,41 +400,66 @@ class SimpleRepomanTestCase(TestCase):
# triggered by python -Wd will be visible.
stdout = subprocess.PIPE
- for cwd in ("", "dev-libs", "dev-libs/A", "dev-libs/B"):
+ for cwd in ("", "dev-libs", "dev-libs/A", "dev-libs/B", "dev-libs/C"):
abs_cwd = os.path.join(test_repo_symlink, cwd)
- proc = subprocess.Popen(repoman_cmd + ("full",),
- cwd=abs_cwd, env=env, stdout=stdout)
+
+ proc = await asyncio.create_subprocess_exec(
+ *(repoman_cmd + ("full",)),
+ env=env,
+ stderr=None,
+ stdout=stdout,
+ cwd=abs_cwd
+ )
if debug:
- proc.wait()
+ await proc.wait()
else:
- output = proc.stdout.readlines()
- proc.wait()
- proc.stdout.close()
+ output, _err = await proc.communicate()
+ await proc.wait()
if proc.returncode != os.EX_OK:
- for line in output:
- sys.stderr.write(_unicode_decode(line))
+ portage.writemsg(output)
- self.assertEqual(os.EX_OK, proc.returncode,
- "repoman failed in %s" % (cwd,))
+ self.assertEqual(
+ os.EX_OK, proc.returncode, "repoman failed in %s" % (cwd,)
+ )
if git_binary is not None:
for cwd, cmd in git_test:
abs_cwd = os.path.join(test_repo_symlink, cwd)
- proc = subprocess.Popen(cmd,
- cwd=abs_cwd, env=env, stdout=stdout)
+ if isinstance(cmd, RepomanRun):
+ cmd.cwd = abs_cwd
+ cmd.env = env
+ cmd.debug = debug
+ await cmd.run()
+ if cmd.result["result"] != cmd.expected and cmd.result.get("stdio"):
+ portage.writemsg(cmd.result["stdio"])
+ try:
+ self.assertEqual(cmd.result["result"], cmd.expected)
+ except Exception:
+ print(cmd.result["result"], file=sys.stderr, flush=True)
+ raise
+ continue
+
+ proc = await asyncio.create_subprocess_exec(
+ *cmd, env=env, stderr=None, stdout=stdout, cwd=abs_cwd
+ )
if debug:
- proc.wait()
+ await proc.wait()
else:
- output = proc.stdout.readlines()
- proc.wait()
- proc.stdout.close()
+ output, _err = await proc.communicate()
+ await proc.wait()
if proc.returncode != os.EX_OK:
- for line in output:
- sys.stderr.write(_unicode_decode(line))
-
- self.assertEqual(os.EX_OK, proc.returncode,
- "%s failed in %s" % (cmd, cwd,))
+ portage.writemsg(output)
+
+ self.assertEqual(
+ os.EX_OK,
+ proc.returncode,
+ "%s failed in %s"
+ % (
+ cmd,
+ cwd,
+ ),
+ )
finally:
playground.cleanup()
diff --git a/repoman/man/repoman.1 b/repoman/man/repoman.1
index 0926e806c..5dbc41560 100644
--- a/repoman/man/repoman.1
+++ b/repoman/man/repoman.1
@@ -1,4 +1,4 @@
-.TH "REPOMAN" "1" "Aug 2020" "Repoman VERSION" "Repoman"
+.TH "REPOMAN" "1" "March 2021" "Repoman VERSION" "Repoman"
.SH NAME
repoman \- Gentoo's program to enforce a minimal level of quality assurance in
packages added to the ebuild repository
@@ -445,6 +445,9 @@ The ebuild makes use of an obsolete construct
A variable contains an invalid character that is not part of the ASCII
character set.
.TP
+.B variable.phase
+Variable referenced found within scope of incorrect ebuild phase as specified by PMS.
+.TP
.B variable.readonly
Assigning a readonly variable
.TP
diff --git a/repoman/setup.py b/repoman/setup.py
index a405e7816..e99189d06 100755
--- a/repoman/setup.py
+++ b/repoman/setup.py
@@ -448,7 +448,7 @@ def get_manpages():
setup(
name = 'repoman',
- version = '3.0.2',
+ version = '3.0.3',
url = 'https://wiki.gentoo.org/wiki/Project:Portage',
author = 'Gentoo Portage Development Team',
author_email = 'dev-portage@gentoo.org',
diff --git a/setup.py b/setup.py
index 5bd899e2c..06e67f369 100755
--- a/setup.py
+++ b/setup.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright 1998-2020 Gentoo Authors
+# Copyright 1998-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from distutils.core import setup, Command, Extension
@@ -19,6 +19,7 @@ from distutils.util import change_root, subst_vars
import codecs
import collections
import glob
+import itertools
import os
import os.path
import platform
@@ -26,6 +27,13 @@ import re
import subprocess
import sys
+autodetect_pip = os.path.basename(os.environ.get("_", "")) == "pip" or os.path.basename(
+ os.path.dirname(__file__)
+).startswith("pip-")
+venv_prefix = "" if sys.prefix == sys.base_prefix else sys.prefix
+create_entry_points = bool(autodetect_pip or venv_prefix)
+with open(os.path.join(os.path.dirname(__file__), 'README'), 'rt') as f:
+ long_description = f.read()
# TODO:
# - smarter rebuilds of docs w/ 'install_docbook' and 'install_apidoc'.
@@ -220,8 +228,9 @@ class x_build_scripts_custom(build_scripts):
self.scripts = x_scripts[self.dir_name]
else:
self.scripts = set(self.scripts)
- for other_files in x_scripts.values():
- self.scripts.difference_update(other_files)
+ if not (create_entry_points and self.dir_name == "portage"):
+ for other_files in x_scripts.values():
+ self.scripts.difference_update(other_files)
def run(self):
# group scripts by subdirectory
@@ -412,6 +421,18 @@ class x_install_data(install_data):
('subst_paths', 'paths'))
def run(self):
+ def re_sub_file(path, pattern, repl):
+ print('Rewriting %s' % path)
+ with codecs.open(path, 'r', 'utf-8') as f:
+ data = f.read()
+ data = re.sub(pattern, repl, data, flags=re.MULTILINE)
+ with codecs.open(path, 'w', 'utf-8') as f:
+ f.write(data)
+
+ if create_entry_points:
+ re_sub_file('cnf/repos.conf', r'= /', '= %(EPREFIX)s/')
+ re_sub_file('cnf/make.globals', r'DIR="/', 'DIR="${EPREFIX}/')
+
self.run_command('build_man')
def process_data_files(df):
@@ -470,11 +491,51 @@ class x_install_lib(install_lib):
rewrite_file('portage/__init__.py', {
'VERSION': self.distribution.get_version(),
})
- rewrite_file('portage/const.py', {
- 'PORTAGE_BASE_PATH': self.portage_base,
- 'PORTAGE_BIN_PATH': self.portage_bindir,
- 'PORTAGE_CONFIG_PATH': self.portage_confdir,
- })
+
+ def re_sub_file(path, pattern_repl_items):
+ path = os.path.join(self.install_dir, path)
+ print("Rewriting %s" % path)
+ with codecs.open(path, "r", "utf-8") as f:
+ data = f.read()
+ for pattern, repl in pattern_repl_items:
+ data = re.sub(pattern, repl, data, flags=re.MULTILINE)
+ with codecs.open(path, "w", "utf-8") as f:
+ f.write(data)
+
+ val_dict = {}
+ if create_entry_points:
+ val_dict.update(
+ {
+ "GLOBAL_CONFIG_PATH": self.portage_confdir,
+ }
+ )
+ re_sub_file(
+ "portage/const.py",
+ (
+ (
+ r"^(PORTAGE_BASE_PATH\s*=\s*)(.*)",
+ lambda m: "{}{}".format(
+ m.group(1),
+ 'os.path.realpath(os.path.join(__file__, "../../usr/lib/portage"))',
+ ),
+ ),
+ (
+ r"^(EPREFIX\s*=\s*)(.*)",
+ lambda m: "{}{}".format(
+ m.group(1),
+ 'os.path.realpath(os.path.join(__file__, "../.."))',
+ ),
+ ),
+ ),
+ )
+ else:
+ val_dict.update(
+ {
+ "PORTAGE_BASE_PATH": self.portage_base,
+ "PORTAGE_BIN_PATH": self.portage_bindir,
+ }
+ )
+ rewrite_file("portage/const.py", val_dict)
return ret
@@ -527,8 +588,11 @@ class x_install_scripts(install_scripts):
class x_sdist(sdist):
""" sdist defaulting to .tar.bz2 format, and archive files owned by root """
+ def initialize_options(self):
+ super().initialize_options()
+ self.formats = ['xztar']
+
def finalize_options(self):
- self.formats = ['bztar']
if self.owner is None:
self.owner = 'root'
if self.group is None:
@@ -655,10 +719,18 @@ class build_ext(_build_ext):
setup(
name = 'portage',
- version = '3.0.14',
+ version = '3.0.18',
url = 'https://wiki.gentoo.org/wiki/Project:Portage',
+ project_urls = {
+ 'Release Notes': 'https://gitweb.gentoo.org/proj/portage.git/plain/RELEASE-NOTES',
+ 'Documentation': 'https://wiki.gentoo.org/wiki/Handbook:AMD64/Working/Portage',
+ },
author = 'Gentoo Portage Development Team',
author_email = 'dev-portage@gentoo.org',
+ description = 'Portage is the package management and distribution system for Gentoo',
+ license = 'GPLV2',
+ long_description = long_description,
+ long_description_content_type = 'text/plain',
package_dir = {'': 'lib'},
packages = list(find_packages()),
@@ -675,8 +747,15 @@ setup(
['$portage_base/bin', ['bin/deprecated-path']],
['$sysconfdir/portage/repo.postsync.d', ['cnf/repo.postsync.d/example']],
],
-
- ext_modules = [Extension(name=n, sources=m,
+ entry_points={
+ "console_scripts": [
+ "{}=portage.util.bin_entry_point:bin_entry_point".format(os.path.basename(path))
+ for path in itertools.chain.from_iterable(x_scripts.values())
+ ],
+ } if create_entry_points else {},
+ # create_entry_points disables ext_modules, for pure python
+ ext_modules = [] if create_entry_points else [
+ Extension(name=n, sources=m,
extra_compile_args=['-D_FILE_OFFSET_BITS=64',
'-D_LARGEFILE_SOURCE', '-D_LARGEFILE64_SOURCE'])
for n, m in x_c_helpers.items()],
@@ -714,5 +793,7 @@ setup(
'Operating System :: POSIX',
'Programming Language :: Python :: 3',
'Topic :: System :: Installation/Setup'
- ]
+ ],
+
+ python_requires = ">=3.6",
)
diff --git a/tox.ini b/tox.ini
index 585752686..b0cfa7da3 100644
--- a/tox.ini
+++ b/tox.ini
@@ -9,7 +9,7 @@ python =
3.8: py38
3.9: py39
3.10: py310
- pypy-3.6: pypy3
+ pypy-3.7-v7.3.3: pypy3
[testenv]
deps =