Commit 8b56e3f6 authored by bertagaz's avatar bertagaz
Browse files

Merge branch 'master' into stable

parents 52eaf89d 0e901e24
#!/bin/sh
#! /usr/bin/env python3
"""
Tails upgrade frontend wrapper.
set -e
set -u
Test with "python3 tails-upgrade-frontend-wrapper.py doctest".
The tests will start the upgrade process which could pop up a dialog box
so you probably want to use a tester that handles user interaction or
run the tests from the command line and answer prompts as needed.
CMD=$(basename ${0})
goodcrypto.com converted from bash to python and added basic tests.
. gettext.sh
TEXTDOMAIN="tails"
export TEXTDOMAIN
>>> # run this script (without waiting 30 seconds)
>>> sh.Command(sys.argv[0])("--no-wait")
<BLANKLINE>
"""
# Import no_abort()
. /usr/local/lib/tails-shell-library/common.sh
import os
import sys
import time
from gettext import gettext
TORDATE_DIR=/run/tordate
TORDATE_DONE_FILE="${TORDATE_DIR}/done"
INOTIFY_TIMEOUT=60
MIN_REAL_MEMFREE=$((300 * 1024))
RUN_AS_USER=tails-upgrade-frontend
import sh
import psutil
### Functions
os.environ['PATH'] = '/usr/local/bin:/usr/bin:/bin'
os.environ['TEXTDOMAIN'] = 'tails'
error () {
local cli_text="${CMD}: `gettext \"error:\"` ${@}"
local dialog_text="<b><big>`gettext \"Error\"`</big></b>
CMD = os.path.basename(sys.argv[0])
TORDATE_DIR = '/run/tordate'
TORDATE_DONE_FILE = '{}/done'.format(TORDATE_DIR)
INOTIFY_TIMEOUT = 60
MIN_AVAILABLE_MEMORY = (300 * 1024 * 1024) # In Bytes
RUN_AS_USER = 'tails-upgrade-frontend'
${@}"
echo "${cli_text}" >&2
zenity --error --title "" --text "${dialog_text}"
exit 1
}
check_free_memory () {
local min_real_memfree memfree buffers cached real_memfree
local errormsg
min_real_memfree="$1"
memfree=$(awk '/^MemFree:/{print $2}' /proc/meminfo)
buffers=$(awk '/^Buffers:/{print $2}' /proc/meminfo)
cached=$(awk '/^Cached:/{print $2}' /proc/meminfo)
tmpfs=$(df --type=tmpfs --local --output=used --total | tail -n1)
real_memfree=$(($memfree + $buffers + $cached - $tmpfs))
errormsg="`gettext \"<b>Not enough memory available to check for upgrades.</b>
ERROR_MESSAGE = gettext('''\"<b>Not enough memory available to check for upgrades.</b>
Make sure this system satisfies the requirements for running Tails.
See file:///usr/share/doc/tails/website/doc/about/requirements.en.html
......@@ -50,26 +40,67 @@ See file:///usr/share/doc/tails/website/doc/about/requirements.en.html
Try to restart Tails to check for upgrades again.
Or do a manual upgrade.
See https://tails.boum.org/doc/first_steps/upgrade#manual\"`"
if [ "$real_memfree" -lt "$MIN_REAL_MEMFREE" ] ; then
echo "Only $real_memfree MemFree + Buffers + Cached - usage of tmpfs, while $MIN_REAL_MEMFREE is needed." >&2
error "$errormsg"
fi
}
### Main
sleep 30
check_free_memory "$MIN_REAL_MEMFREE"
# Go to a place where everyone, especially Archive::Tar::Wrapper called by
# tails-install-iuk, can chdir back after it has chdir'd elsewhere to do
# its job.
cd /
xhost +SI:localuser:"$RUN_AS_USER"
no_abort sudo -u "$RUN_AS_USER" /usr/bin/tails-upgrade-frontend "${@}"
xhost -SI:localuser:"$RUN_AS_USER"
exit ${_NO_ABORT_RET}
See https://tails.boum.org/doc/first_steps/upgrade#manual\"''')
def main(*args):
if "--no-wait" not in args:
time.sleep(30)
else:
args = (arg for arg in args if arg != "--no-wait")
check_free_memory(MIN_AVAILABLE_MEMORY)
# Go to a place where everyone, especially Archive::Tar::Wrapper called by
# tails-install-iuk, can chdir back after it has chdir'd elsewhere to do
# its job.
os.chdir('/')
os.execv(
"/bin/sh",
(
"/bin/sh", "-c",
"xhost +SI:localuser:{user};"
"sudo -u {user} /usr/bin/tails-upgrade-frontend {args};"
"xhost -SI:localuser:{user}".format(user=RUN_AS_USER, args=" ".join(args))
)
)
def error(msg):
"""Show error and exit."""
cli_text = '{}: {} {}'.format(CMD, gettext('error:'), msg)
dialog_text = '''<b><big>{}</big></b>\n\n{}'''.format(gettext('Error'), msg)
print(cli_text, file=sys.stderr)
sh.zenity('--error', '--title', "", '--text', dialog_text, _ok_code=[0,1,5])
sys.exit(1)
def check_free_memory(min_available_memory):
"""Check for enough free memory.
# 1 KiB should be available when running the doctest
>>> check_free_memory(1024)
# 1 TiB should not be available, an error prompt should be displayed
>>> try:
... check_free_memory(1024*1024*1024*1024)
... fail()
... except SystemExit:
... pass
"""
available_memory = psutil.virtual_memory().available
if available_memory < min_available_memory:
print('Only {} Bytes memory available, while {} is needed'.format(
available_memory, min_available_memory), file=sys.stderr)
error(ERROR_MESSAGE)
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'doctest':
import doctest
doctest.testmod()
else:
main(*sys.argv[1:])
......@@ -9,14 +9,38 @@ strip_nondeterminism_wrapper() {
apt-get --yes purge strip-nondeterminism '^libfile-stripnondeterminism-perl'
}
# Ensure that the packages whose names are passed as arguments are
# installed. If they are installed now, they will be marked as
# "automatically installed" so the next `apt-get autoremove` action
# *unless* they are later explicitly installed (or other packages
# depends on them).
ensure_hook_dependency_is_installed() {
# Filter out already installed packages from $@.
for p in "${@}"; do
shift
if ! echo "${p}" | grep -q --extended-regexp '^[a-z0-9.+-]+$'; then
echo "ensure_hook_dependency_is_installed():" \
"doesn't look like a package name: ${p}" >&2
exit 1
fi
if is_package_installed "${p}"; then
continue
fi
set -- "${@}" "${p}"
done
if [ -z "${*}" ]; then
return
fi
apt-get install --yes "${@}"
apt-mark auto "${@}"
}
install_fake_package() {
local name version section tmp control_file
name="${1}"
version="${2}"
section="${3:-misc}"
if ! is_package_installed equivs; then
apt-get install --yes equivs
fi
ensure_hook_dependency_is_installed equivs
tmp="$(mktemp -d)"
control_file="${tmp}/${name}_${version}.control"
cat > "${control_file}" << EOF
......
......@@ -21,7 +21,13 @@ try_cleanup_browser_chroot () {
local user="${3}"
try_for 10 "pkill -u ${user} 1>/dev/null 2>&1" 0.1 || \
pkill -9 -u "${user}" || :
for mnt in "${chroot}/dev" "${chroot}/proc" "${chroot}" "${cow}"; do
# findmnt sorts submounts so we just have to revert the list to
# have the proper umount order. We use `tail` to suppress the
# "TARGET" column header.
local chroot_mounts="$(
findmnt --output TARGET --list --submounts "${chroot}" | tail -n+2 | tac
)"
for mnt in ${chroot_mounts} "${cow}"; do
try_for 10 "umount ${mnt} 2>/dev/null" 0.1
done
rmdir "${cow}" "${chroot}"
......@@ -87,18 +93,6 @@ chroot_browser_profile_dir () {
echo "${conf_dir}/profile.default"
}
# Set the chroot's DNS servers (IPv4 only)
configure_chroot_dns_servers () {
local chroot="${1}" ; shift
local ip4_nameservers="${@}"
rm -f "${chroot}/etc/resolv.conf"
for ns in ${ip4_nameservers}; do
echo "nameserver ${ns}" >> "${chroot}/etc/resolv.conf"
done
chmod a+r "${chroot}/etc/resolv.conf"
}
set_chroot_browser_permissions () {
local chroot="${1}"
local browser_name="${2}"
......@@ -219,12 +213,10 @@ configure_chroot_browser () {
local browser_name="${1}" ; shift
local human_readable_name="${1}" ; shift
local home_page="${1}" ; shift
local dns_servers="${1}" ; shift
# Now $@ is a list of paths (that must be valid after chrooting)
# to extensions to enable.
local best_locale="$(guess_best_tor_browser_locale)"
configure_chroot_dns_servers "${chroot}" "${dns_servers}"
configure_chroot_browser_profile "${chroot}" "${browser_name}" \
"${browser_user}" "${home_page}" "${@}"
set_chroot_browser_locale "${chroot}" "${browser_name}" "${browser_user}" \
......
......@@ -276,6 +276,13 @@ disable_and_create_empty_persistence_conf_file ()
mv "$conf" "${conf}.insecure_disabled" \
|| error "Failed to disable '$conf': $?"
create_empty_persistence_conf_file "$conf"
}
create_empty_persistence_conf_file ()
{
local conf="$1"
install --owner tails-persistence-setup \
--group tails-persistence-setup --mode 0600 \
/dev/null "$conf" \
......@@ -344,6 +351,15 @@ activate_volumes ()
fi
done
# Create live-additional-software.conf if there is none
for mountpoint in $(ls -d /live/persistence/*_unlocked || true)
do
if test ! -f "$mountpoint/live-additional-software.conf"
then
create_empty_persistence_conf_file "$mountpoint/live-additional-software.conf"
fi
done
# Disable all persistence configuration files if the mountpoint
# has wrong access rights.
if [ "$ACCESS_RIGHTS_ARE_CORRECT" != true ]
......
#!/bin/sh
#! /usr/bin/env python3
set -e
import logging
import time
import sys
# Import try_for()
. /usr/local/lib/tails-shell-library/common.sh
import sh
# Import tor_bootstrap_progress()
. /usr/local/lib/tails-shell-library/tor.sh
from tailslib.tor import tor_has_bootstrapped
from tailslib.exceptions import TorFailedToBoostrapError
# Import log()
. /usr/local/lib/tails-shell-library/log.sh
_LOG_TAG="$(basename $0)"
systemctl restart tor@default.service
logger = logging.getLogger(__name__)
if try_for 270 '[ "$(tor_bootstrap_progress)" -eq 100 ]'; then
log "Tor has successfully bootstrapped."
else
log "Tor failed to bootstrap"
exit 1
fi
TIMEOUT = 270
def main():
restart_tor()
def restart_tor():
""" Restart the Tor systemd service
>>> restart_tor()
"""
sh.systemctl('restart', 'tor@default.service')
for i in range(TIMEOUT):
if tor_has_bootstrapped():
logger.info("Tor has successfully bootstrapped")
return
time.sleep(1)
raise TorFailedToBoostrapError("Tor failed to bootstrap within %s seconds" % TIMEOUT)
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == 'doctest':
import doctest
doctest.testmod()
else:
main()
#!/usr/bin/env python
#!/usr/bin/env python3
import gettext
import glob
import os
import os.path
import pwd
import shutil
import subprocess
import sys
import syslog
......@@ -11,18 +11,18 @@ import syslog
_ = gettext.gettext
PERSISTENCE_DIR = "/live/persistence/TailsData_unlocked"
PACKAGES_LIST_FILE = PERSISTENCE_DIR + "/live-additional-software.conf"
ACTIVATION_FILE = "/run/live-additional-software/activated"
PACKAGES_LIST_FILE = os.path.join(
PERSISTENCE_DIR, "live-additional-software.conf")
OLD_APT_LISTS_DIR = os.path.join(PERSISTENCE_DIR, 'apt', 'lists.old')
APT_ARCHIVES_DIR = "/var/cache/apt/archives"
APT_LISTS_DIR = "/var/lib/apt/lists"
OBSOLETE_APT_LIST_SUFFIX = "_binary-i386_Packages"
def _launch_apt_get(specific_args):
"""Launch apt-get with given args
"""Launch apt-get with given arguments.
Launch apt-get with given arguments list, log its standard and error output
and return its returncode"""
and return its returncode."""
apt_get_env = os.environ.copy()
# The environnment provided in GDM PostLogin hooks doesn't contain /sbin/
# which is required by dpkg. Let's use the default path for root in Tails.
......@@ -34,11 +34,11 @@ def _launch_apt_get(specific_args):
apt_get_env['DEBIAN_PRIORITY'] = "critical"
args = ["apt-get", "--quiet", "--yes"]
args.extend(specific_args)
apt_get = subprocess.Popen(
args,
env=apt_get_env,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE)
apt_get = subprocess.Popen(args,
env=apt_get_env,
universal_newlines=True,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE)
for line in iter(apt_get.stdout.readline, ''):
if not line.startswith('('):
syslog.syslog(line.rstrip())
......@@ -50,18 +50,14 @@ def _launch_apt_get(specific_args):
def _notify(title, body):
"""Display a notification to the user of the live system
"""
"""Display a notification to the user of the live system."""
cmd = "/usr/local/sbin/tails-notify-user"
try:
notify_user_output = subprocess.check_output([cmd, title, body],
stderr=subprocess.STDOUT)
subprocess.check_call([cmd, title, body], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
syslog.syslog(syslog.LOG_WARNING,
"Warning: unable to notify the user. %s returned "
"with exit code %s" % (cmd, e.returncode))
syslog.syslog(syslog.LOG_WARNING,
"%s output follows: %s." % (cmd, notify_user_output))
syslog.syslog(syslog.LOG_WARNING,
"The notification was: %s %s" % (title, body))
except OSError as e:
......@@ -72,14 +68,12 @@ def _notify(title, body):
def has_additional_packages_list():
"""Return true iff PACKAGES_LIST_FILE exists
"""
"""Return true iff PACKAGES_LIST_FILE exists."""
return os.path.isfile(PACKAGES_LIST_FILE)
def get_additional_packages():
"""Returns the list of all the additional packages
"""
"""Return the list of all additional packages configured."""
packages = []
if has_additional_packages_list():
with open(PACKAGES_LIST_FILE) as f:
......@@ -87,37 +81,82 @@ def get_additional_packages():
line = line.strip()
if line:
packages.append(line)
f.closed
return packages
def install_additional_packages():
"""The subcommand which activates and installs all additional packages
"""
def delete_old_apt_lists(old_apt_lists_dir=OLD_APT_LISTS_DIR):
"""Delete the copy of the old APT lists, if any."""
shutil.rmtree(old_apt_lists_dir)
def save_old_apt_lists(srcdir=APT_LISTS_DIR, destdir=OLD_APT_LISTS_DIR):
"""Save a copy of the APT lists"""
if os.path.exists(destdir):
syslog.syslog(syslog.LOG_WARNING,
"Warning: a copy of the APT lists already exists, "
"which should never happen. Removing it.")
delete_old_apt_lists(destdir)
shutil.copytree(srcdir, destdir, symlinks=True)
# Note: we can't do nicer delete + move operations because the directory
# we want to replace is bind-mounted. So we have to delete the content
# we want to replace, and then move the content we want to restore.
def restore_old_apt_lists(srcdir=OLD_APT_LISTS_DIR, dstdir=APT_LISTS_DIR):
"""Restore the copy of the old APT lists."""
# Empty dstdir
for basename in os.listdir(dstdir):
path = os.path.join(dstdir, basename)
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
# Move the content of srcdir to dstdir
for basename in os.listdir(srcdir):
path = os.path.join(srcdir, basename)
shutil.move(path, dstdir)
def install_additional_packages(ignore_old_apt_lists=False):
"""Subcommand which activates and installs all additional packages."""
syslog.syslog("Starting to install additional software...")
if has_additional_packages_list():
syslog.syslog("Found additional packages list")
elif os.path.isdir(PERSISTENCE_DIR):
syslog.syslog(syslog.LOG_WARNING,
"Warning: no configuration file found, creating an "
"empty one.")
create_additional_packages_list()
return True
else:
syslog.syslog(syslog.LOG_WARNING,
"Warning: persistence is not mounted, exiting")
"Warning: no configuration file found, exiting")
return True
try:
clear_obsolete_cache(OBSOLETE_APT_LIST_SUFFIX)
except:
# If a copy of old APT lists is found, then the previous upgrade
# attempt has not completed successfully (it may have failed e.g.
# due to network problems, or it may have been interrupted).
# In many of these cases, the APT package cache lacks some
# packages the new APT lists reference, so the (offline)
# installation step below in this function will fail. To avoid
# that, we restore the old APT lists: there are greater chances
# that the APT packages cache still has the corresponding packages.
if os.path.isdir(OLD_APT_LISTS_DIR) and not ignore_old_apt_lists:
syslog.syslog(syslog.LOG_WARNING,
"Warning: failed to clear obsolete cached packages")
"Found a copy of old APT lists, restoring it.")
try:
restore_old_apt_lists()
except Exception as e:
syslog.syslog(syslog.LOG_WARNING,
"Restoring old APT lists failed with %r, "
"deleting them and proceeding anyway." % e)
# In all cases, delete the old APT lists: if they could be
# restored we don't need them anymore (and we don't want to
# restore them again next time); if they could not be
# restored, chances are restoration will fail next time
# as well.
delete_old_apt_lists()
packages = get_additional_packages()
if not packages:
syslog.syslog(syslog.LOG_WARNING,
"Warning: no packages to install, exiting")
return True
set_activated()
syslog.syslog("Will install the following packages: %s"
% " ".join(packages))
apt_get_returncode = _launch_apt_get(
......@@ -128,105 +167,72 @@ def install_additional_packages():
syslog.syslog(syslog.LOG_WARNING,
"Warning: installation of %s failed"
% " ".join(packages))
_notify(_("Your additional software installation failed"),
_("The installation failed. Please check your additional "
"software configuration, or read the system log to "
"understand better the problem."))
return False
else:
syslog.syslog("Installation completed successfully.")
_notify(_("Your additional software are installed"),
_("Your additional software are ready to use."))
return True
def upgrade_additional_packages():
"""The subcommand which upgrades all additional packages if they are activated
"""
if not is_activated():
syslog.syslog(syslog.LOG_WARNING,
"Warning: additional packages not activated, exiting")
return True
"""Subcommand which upgrades all additional packages."""
# Save a copy of APT lists that we'll delete only once the upgrade
# has succeeded, to ensure that the APT packages cache is up-to-date
# wrt. the APT lists.
syslog.syslog("Saving old APT lists...")
save_old_apt_lists()
syslog.syslog("Starting to upgrade additional software...")
apt_get_returncode = _launch_apt_get(["update"])
if apt_get_returncode:
syslog.syslog(syslog.LOG_WARNING, "Warning: the update failed.")
_notify(_("Your additional software"),
_("The upgrade failed. This might be due to a network "
"problem. Please check your network connection, try to "
"restart Tails, or read the system log to understand better "
"the problem."))
_notify(_("Your additional software upgrade failed"),
_("The check for upgrades failed. This might be due to a "
"network problem. Please check your network connection, try "
"to restart Tails, or read the system log to understand "
"better the problem."))
return False
# Remove outdated packages from the local package cache. This is needed as
# we disable apt-daily.timer, which would else take care of this cleanup.
# Note: this does not remove packages from other architectures, hence
# the need for the clear_obsolete_cache() function.
apt_get_returncode = _launch_apt_get(["autoclean"])
if apt_get_returncode:
syslog.syslog(syslog.LOG_WARNING,