Add releng scripts to the repository.

This commit is contained in:
Jorge Manuel B. S. Vicetto (jmbsvicetto) 2012-07-09 17:47:42 +00:00
parent bd95b08675
commit 498f935c11
13 changed files with 1142 additions and 0 deletions

10
scripts/backup_snapshot_repo Executable file
View file

@ -0,0 +1,10 @@
#!/bin/bash
# Start our rsyncs
RSYNC_OPTS="--archive --delete --sparse --whole-file"
if [ -e /release/repos/snapshot-tree/hooks/post-commit ]
then
echo "$(date): Starting rsync of trees from tmpfs to disk..." >> /var/log/snapshot-tree-backup.log
rsync ${RSYNC_OPTS} /release/repos/snapshot-tree/* /release/repos/snapshot-tree-disk 2>&1 >> /var/log/snapshot-tree-backup.log || echo "$(date): rsync failed!" >> /var/log/snapshot-tree-backup.log
fi

700
scripts/cache-tools.py Executable file
View file

@ -0,0 +1,700 @@
#!/usr/bin/env python
# Copyright 1999-2006 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# $Header: $
#
# Zac Medico <zmedico@gentoo.org>
#
import errno, fpformat, os, sys, time
if not hasattr(__builtins__, "set"):
from sets import Set as set
from itertools import chain
def create_syncronized_func(myfunc, mylock):
def newfunc(*pargs, **kwargs):
mylock.acquire()
try:
myfunc(*pargs, **kwargs)
finally:
mylock.release()
return myfunc
class ConsoleUpdate(object):
_synchronized_methods = ["append", "carriageReturn",
"newLine", "reset", "update"]
def __init__(self):
self.offset = 0
import sys
self.stream = sys.stdout
self.quiet = False
import threading
self._lock = threading.RLock()
for method_name in self._synchronized_methods:
myfunc = create_syncronized_func(
getattr(self, method_name), self._lock)
setattr(self, method_name, myfunc)
# ANSI code that clears from the cursor to the end of the line
self._CLEAR_EOL = None
try:
import curses
try:
curses.setupterm()
self._CLEAR_EOL = curses.tigetstr('el')
except curses.error:
pass
except ImportError:
pass
if not self._CLEAR_EOL:
self._CLEAR_EOL = '\x1b[K'
def acquire(self, **kwargs):
return self._lock.acquire(**kwargs)
def release(self):
self._lock.release()
def reset(self):
self.offset = 0
def carriageReturn(self):
if not self.quiet:
self.stream.write("\r")
self.stream.write(self._CLEAR_EOL)
self.offset = 0
def newLine(self):
if not self.quiet:
self.stream.write("\n")
self.stream.flush()
self.reset()
def update(self, msg):
if not self.quiet:
self.carriageReturn()
self.append(msg)
def append(self, msg):
if not self.quiet:
self.offset += len(msg)
self.stream.write(msg)
self.stream.flush()
class ProgressCounter(object):
def __init__(self):
self.total = 0
self.current = 0
class ProgressAnalyzer(ProgressCounter):
def __init__(self):
self.start_time = time.time()
self.currentTime = self.start_time
self._samples = []
self.sampleCount = 20
def percentage(self, digs=0):
if self.total > 0:
float_percent = 100 * float(self.current) / float(self.total)
else:
float_percent = 0.0
return fpformat.fix(float_percent,digs)
def totalTime(self):
self._samples.append((self.currentTime, self.current))
while len(self._samples) > self.sampleCount:
self._samples.pop(0)
prev_time, prev_count = self._samples[0]
time_delta = self.currentTime - prev_time
if time_delta > 0:
rate = (self.current - prev_count) / time_delta
if rate > 0:
return self.total / rate
return 0
def remaining_time(self):
return self.totalTime() - self.elapsed_time()
def elapsed_time(self):
return self.currentTime - self.start_time
class ConsoleProgress(object):
def __init__(self, name="Progress", console=None):
self.name = name
self.analyzer = ProgressAnalyzer()
if console is None:
self.console = ConsoleUpdate()
else:
self.console = console
self.time_format="%H:%M:%S"
self.quiet = False
self.lastUpdate = 0
self.latency = 0.5
def formatTime(self, t):
return time.strftime(self.time_format, time.gmtime(t))
def displayProgress(self, current, total):
if self.quiet:
return
self.analyzer.currentTime = time.time()
if self.analyzer.currentTime - self.lastUpdate < self.latency:
return
self.lastUpdate = self.analyzer.currentTime
self.analyzer.current = current
self.analyzer.total = total
output = ((self.name, self.analyzer.percentage(1).rjust(4) + "%"),
("Elapsed", self.formatTime(self.analyzer.elapsed_time())),
("Remaining", self.formatTime(self.analyzer.remaining_time())),
("Total", self.formatTime(self.analyzer.totalTime())))
self.console.update(" ".join([ x[0] + ": " + x[1] for x in output ]))
class ProgressHandler(object):
def __init__(self):
self.curval = 0
self.maxval = 0
self.last_update = 0
self.min_display_latency = 0.2
def onProgress(self, maxval, curval):
self.maxval = maxval
self.curval = curval
cur_time = time.time()
if cur_time - self.last_update >= self.min_display_latency:
self.last_update = cur_time
self.display()
def display(self):
raise NotImplementedError(self)
def open_file(filename=None):
if filename is None:
f = sys.stderr
elif filename == "-":
f = sys.stdout
else:
try:
filename = os.path.expanduser(filename)
f = open(filename, "a")
except (IOError, OSError), e:
sys.stderr.write("%s\n" % e)
sys.exit(e.errno)
return f
def create_log(name="", logfile=None, loglevel=0):
import logging
log = logging.getLogger(name)
log.setLevel(loglevel)
handler = logging.StreamHandler(open_file(logfile))
handler.setFormatter(logging.Formatter("%(levelname)s %(message)s"))
log.addHandler(handler)
return log
def is_interrupt(e):
if isinstance(e, (SystemExit, KeyboardInterrupt)):
return True
return hasattr(e, "errno") and e.errno == errno.EINTR
def mirror_cache(valid_nodes_iterable, src_cache, trg_cache, log,
eclass_cache, cleanse_on_transfer_failure):
cleanse_candidates = set(trg_cache.iterkeys())
update_count = 0
# Since the loop below is mission critical, we continue after *any*
# exception that is not an interrupt.
for x in valid_nodes_iterable:
log.debug("%s mirroring" % x)
if not cleanse_on_transfer_failure:
cleanse_candidates.discard(x)
try:
entry = copy_dict(src_cache[x])
except KeyError, e:
log.error("%s missing source: %s" % (x, str(e)))
del e
continue
except Exception, e:
if is_interrupt(e):
raise
log.error("%s reading source: %s" % (x, str(e)))
del e
continue
write_it = True
trg = None
try:
trg = copy_dict(trg_cache[x])
if long(trg["_mtime_"]) == long(entry["_mtime_"]) and \
eclass_cache.is_eclass_data_valid(trg["_eclasses_"]) and \
set(trg["_eclasses_"]) == set(entry["_eclasses_"]):
write_it = False
except KeyError:
pass
except Exception, e:
if is_interrupt(e):
raise
log.error("%s reading target: %s" % (x, str(e)))
del e
if trg and not write_it:
""" We don't want to skip the write unless we're really sure that
the existing cache is identical, so don't trust _mtime_ and
_eclasses_ alone."""
for d in (entry, trg):
if "EAPI" in d and d["EAPI"] in ("", "0"):
del d["EAPI"]
for k in set(chain(entry, trg)).difference(
("_mtime_", "_eclasses_")):
if trg.get(k, "") != entry.get(k, ""):
write_it = True
break
if write_it:
update_count += 1
log.info("%s transferring" % x)
inherited = entry.get("INHERITED", None)
if inherited:
if src_cache.complete_eclass_entries:
if not "_eclasses_" in entry:
log.error("%s missing _eclasses_" % x)
continue
if not eclass_cache.is_eclass_data_valid(entry["_eclasses_"]):
log.error("%s stale _eclasses_" % x)
continue
else:
entry["_eclasses_"] = eclass_cache.get_eclass_data(entry["INHERITED"].split(), \
from_master_only=True)
if not entry["_eclasses_"]:
log.error("%s stale _eclasses_" % x)
continue
try:
trg_cache[x] = entry
cleanse_candidates.discard(x)
except Exception, e:
if is_interrupt(e):
raise
log.error("%s writing target: %s" % (x, str(e)))
del e
else:
cleanse_candidates.discard(x)
if not trg_cache.autocommits:
try:
trg_cache.commit()
except Exception, e:
if is_interrupt(e):
raise
log.error("committing target: %s" % str(e))
del e
return update_count, cleanse_candidates
def copy_dict(src, dest=None):
"""Some cache implementations throw cache errors when accessing the values.
We grab all the values at once here so that we don't have to be concerned
about exceptions later."""
if dest is None:
dest = {}
for k, v in src.iteritems():
dest[k] = v
return dest
class ListPackages(object):
def __init__(self, portdb, log, shuffle=False):
self._portdb = portdb
self._log = log
self._shuffle = shuffle
def run(self):
log = self._log
cp_list = self._portdb.cp_list
cp_all = self._portdb.cp_all()
if self._shuffle:
from random import shuffle
shuffle(cp_all)
else:
cp_all.sort()
cpv_all = []
# Since the loop below is mission critical, we continue after *any*
# exception that is not an interrupt.
for cp in cp_all:
log.debug("%s cp_list" % cp)
try:
cpv_all.extend(cp_list(cp))
except Exception, e:
if is_interrupt(e):
raise
self._log.error("%s cp_list: %s" % (cp, str(e)))
self.cpv_all = cpv_all
class MetadataGenerate(object):
"""When cache generation fails for some reason, cleanse the stale cache
entry if it exists. This prevents the master mirror from distributing
stale cache, and will allow clients to safely assume that all cache is
valid. The mtime requirement is especially annoying due to bug #139134
(timestamps of cache entries don't change when an eclass changes) and the
interaction of timestamps with rsync."""
def __init__(self, portdb, cpv_all, log):
self._portdb = portdb
self._cpv_all = cpv_all
self._log = log
def run(self, onProgress=None):
log = self._log
portdb = self._portdb
cpv_all = self._cpv_all
auxdb = portdb.auxdb[portdb.porttree_root]
cleanse_candidates = set(auxdb.iterkeys())
# Since the loop below is mission critical, we continue after *any*
# exception that is not an interrupt.
maxval = len(cpv_all)
curval = 0
if onProgress:
onProgress(maxval, curval)
while cpv_all:
cpv = cpv_all.pop(0)
log.debug("%s generating" % cpv)
try:
portdb.aux_get(cpv, ["EAPI"])
# Cleanse if the above doesn't succeed (prevent clients from
# receiving stale cache, and let them assume it is valid).
cleanse_candidates.discard(cpv)
except Exception, e:
if is_interrupt(e):
raise
log.error("%s generating: %s" % (cpv, str(e)))
del e
curval += 1
if onProgress:
onProgress(maxval, curval)
self.target_cache = auxdb
self.dead_nodes = cleanse_candidates
class MetadataTransfer(object):
def __init__(self, portdb, cpv_all, forward, cleanse_on_transfer_failure,
log):
self._portdb = portdb
self._cpv_all = cpv_all
self._log = log
self._forward = forward
self._cleanse_on_transfer_failure = cleanse_on_transfer_failure
def run(self, onProgress=None):
log = self._log
portdb = self._portdb
cpv_all = self._cpv_all
aux_cache = portdb.auxdb[portdb.porttree_root]
import portage
auxdbkeys = portage.auxdbkeys[:]
metadbmodule = portdb.mysettings.load_best_module("portdbapi.metadbmodule")
portdir_cache = metadbmodule(portdb.porttree_root, "metadata/cache",
auxdbkeys)
maxval = len(cpv_all)
curval = 0
if onProgress:
onProgress(maxval, curval)
class pkg_iter(object):
def __init__(self, pkg_list, onProgress=None):
self.pkg_list = pkg_list
self.maxval = len(pkg_list)
self.curval = 0
self.onProgress = onProgress
def __iter__(self):
while self.pkg_list:
yield self.pkg_list.pop()
self.curval += 1
if self.onProgress:
self.onProgress(self.maxval, self.curval)
if self._forward:
src_cache = portdir_cache
trg_cache = aux_cache
else:
src_cache = aux_cache
trg_cache = portdir_cache
""" This encapsulates validation of eclass timestamps and also fills in
missing data (mtimes and/or paths) as necessary for the given cache
format."""
eclass_cache = portage.eclass_cache.cache(portdb.porttree_root)
if not trg_cache.autocommits:
trg_cache.sync(100)
self.target_cache = trg_cache
self.update_count, self.dead_nodes = mirror_cache(
pkg_iter(cpv_all, onProgress=onProgress),
src_cache, trg_cache, log, eclass_cache,
self._cleanse_on_transfer_failure)
class CacheCleanse(object):
def __init__(self, auxdb, dead_nodes, log):
self._auxdb = auxdb
self._dead_nodes = dead_nodes
self._log = log
def run(self):
auxdb = self._auxdb
log = self._log
for cpv in self._dead_nodes:
try:
log.info("%s cleansing" % cpv)
del auxdb[cpv]
except Exception, e:
if is_interrupt(e):
raise
log.error("%s cleansing: %s" % (cpv, str(e)))
del e
def import_portage():
try:
from portage import data as portage_data
except ImportError:
import portage_data
# If we're not already root or in the portage group, we make the gid of the
# current process become portage_gid.
if os.getgid() != 0 and portage_data.portage_gid not in os.getgroups():
portage_data.portage_gid = os.getgid()
portage_data.secpass = 1
os.environ["PORTAGE_LEGACY_GLOBALS"] = "false"
import portage
del os.environ["PORTAGE_LEGACY_GLOBALS"]
return portage
def create_portdb(portdir=None, cachedir=None, config_root=None,
target_root=None, profile=None, **kwargs):
if cachedir is not None:
os.environ["PORTAGE_DEPCACHEDIR"] = cachedir
if config_root is None:
config_root = os.environ.get("PORTAGE_CONFIGROOT", "/")
if target_root is None:
target_root = os.environ.get("ROOT", "/")
if profile is None:
profile = ""
portage = import_portage()
try:
from portage import const as portage_const
except ImportError:
import portage_const
# Disable overlays because we only generate metadata for the main repo.
os.environ["PORTDIR_OVERLAY"] = ""
conf = portage.config(config_profile_path=profile,
config_incrementals=portage_const.INCREMENTALS,
target_root=target_root,
config_root=config_root)
if portdir is None:
portdir = conf["PORTDIR"]
# The cannonical path is the key for portdb.auxdb.
portdir = os.path.realpath(portdir)
conf["PORTDIR"] = portdir
conf.backup_changes("PORTDIR")
portdb = portage.portdbapi(portdir,
mysettings=conf)
return portdb
def parse_args(myargv):
description = "This program will ensure that the metadata cache is up to date for entire portage tree."
usage = "usage: cache-tools [options] --generate || --transfer"
from optparse import OptionParser
parser = OptionParser(description=description, usage=usage)
parser.add_option("--portdir",
help="location of the portage tree",
dest="portdir")
parser.add_option("--cachedir",
help="location of the metadata cache",
dest="cachedir")
parser.add_option("--profile",
help="location of the profile",
dest="profile")
parser.add_option("--generate",
help="generate metadata as necessary to ensure that the cache is fully populated",
action="store_true", dest="generate", default=False)
parser.add_option("--shuffle",
help="generate cache in random rather than sorted order (useful to prevent two separate instances from competing to generate metadata for the same packages simultaneously)",
action="store_true", dest="shuffle", default=False)
parser.add_option("--transfer",
help="transfer metadata from portdir to cachedir or vice versa",
action="store_true", dest="transfer", default=False)
parser.add_option("--cleanse-on-transfer-failure",
help="cleanse target cache when transfer fails for any reason (such as the source being unavailable)",
action="store_true", dest="cleanse_on_transfer_failure", default=False)
parser.add_option("--forward",
help="forward metadata transfer flows from portdir to cachedir (default)",
action="store_true", dest="forward", default=True)
parser.add_option("--reverse",
help="reverse metadata transfer flows from cachedir to portdir",
action="store_false", dest="forward", default=True)
parser.add_option("--logfile",
help="send status messages to a file (default is stderr)",
dest="logfile", default=None)
parser.add_option("--loglevel",
help="numeric log level (defauls to 0 and may range from 0 to 50 corresponding to the default levels of the python logging module)",
dest="loglevel", default="0")
parser.add_option("--reportfile",
help="send a report to a file",
dest="reportfile", default=None)
parser.add_option("--spawn-outfile",
help="redirect ouput of spawned processes to a file instead of stdout/stderr",
dest="spawn_outfile", default=None)
parser.add_option("--no-progress",
action="store_false", dest="progress", default=True,
help="disable progress output to tty")
options, args = parser.parse_args(args=myargv)
# Conversion to dict allows us to use **opts as function args later on.
opts = {}
all_options = ("portdir", "cachedir", "profile", "progress", "logfile",
"loglevel", "generate", "transfer", "forward", "shuffle",
"spawn_outfile", "reportfile", "cleanse_on_transfer_failure")
for opt_name in all_options:
v = getattr(options, opt_name)
opts[opt_name] = v
return opts
def run_command(args):
opts = parse_args(sys.argv[1:])
if opts["spawn_outfile"]:
fd = os.dup(1)
sys.stdout = os.fdopen(fd, 'w')
fd = os.dup(2)
sys.stderr = os.fdopen(fd, 'w')
f = open_file(opts["spawn_outfile"])
os.dup2(f.fileno(), 1)
os.dup2(f.fileno(), 2)
del fd, f
console = ConsoleUpdate()
if not opts["progress"] or not sys.stdout.isatty():
console.quiet = True
job = None
import signal, thread, threading
shutdown_initiated = threading.Event()
shutdown_complete = threading.Event()
def shutdown_console():
console.acquire()
try:
console.update("Interrupted.")
console.newLine()
console.quiet = True
shutdown_complete.set()
# Kill the main thread if necessary.
# This causes the SIGINT signal handler to be invoked in the
# main thread. The signal handler needs to be an actual
# callable object (rather than something like signal.SIG_DFL)
# in order to avoid TypeError: 'int' object is not callable.
thread.interrupt_main()
thread.exit()
finally:
console.release()
def handle_interrupt(*args):
if shutdown_complete.isSet():
sys.exit(1)
# Lock the console from a new thread so that the main thread is allowed
# to cleanly complete any console interaction that may have been in
# progress when this interrupt arrived.
if not shutdown_initiated.isSet():
thread.start_new_thread(shutdown_console, ())
shutdown_initiated.set()
signal.signal(signal.SIGINT, handle_interrupt)
signal.signal(signal.SIGTERM, handle_interrupt)
try:
import datetime
datestamp = str(datetime.datetime.now())
time_begin = time.time()
log = create_log(name="MetadataGenerate",
logfile=opts["logfile"], loglevel=int(opts["loglevel"]))
if opts["reportfile"]:
reportfile = open_file(opts["reportfile"])
portdb = create_portdb(**opts)
try:
os.nice(int(portdb.mysettings.get("PORTAGE_NICENESS", "0")))
except (OSError, ValueError), e:
log.error("PORTAGE_NICENESS failed: '%s'" % str(e))
del e
job = ListPackages(portdb, log, shuffle=opts["shuffle"])
console.update("Listing packages in repository...")
job.run()
cpv_all = job.cpv_all
total_count = len(cpv_all)
if opts["generate"]:
job = MetadataGenerate(portdb, cpv_all, log)
name = "Cache generation"
complete_msg = "Metadata generation is complete."
elif opts["transfer"]:
job = MetadataTransfer(portdb, cpv_all, opts["forward"],
opts["cleanse_on_transfer_failure"], log)
if opts["forward"]:
name = "Forward transfer"
complete_msg = "Forward metadata transfer is complete."
else:
name = "Reverse transfer"
complete_msg = "Reverse metadata transfer is complete."
else:
sys.stderr.write("required options: --generate || --transfer\n")
sys.exit(os.EX_USAGE)
job.opts = opts
onProgress = None
if not console.quiet:
ui = ConsoleProgress(name=name, console=console)
progressHandler = ProgressHandler()
onProgress = progressHandler.onProgress
def display():
ui.displayProgress(progressHandler.curval, progressHandler.maxval)
progressHandler.display = display
job.run(onProgress=onProgress)
if not console.quiet:
# make sure the final progress is displayed
progressHandler.display()
update_count = None
if opts["transfer"]:
update_count = job.update_count
target_cache = job.target_cache
dead_nodes = job.dead_nodes
cleanse_count = len(dead_nodes)
console.update("Cleansing cache...")
job = CacheCleanse(target_cache, dead_nodes, log)
job.run()
console.update(complete_msg)
console.newLine()
time_end = time.time()
if opts["reportfile"]:
width = 20
reportfile.write(name.ljust(width) + "%s\n" % datestamp)
reportfile.write("Elapsed seconds".ljust(width) + "%f\n" % (time_end - time_begin))
reportfile.write("Total packages".ljust(width) + "%i\n" % total_count)
if update_count is not None:
reportfile.write("Updated packages".ljust(width) + "%i\n" % update_count)
reportfile.write("Cleansed packages".ljust(width) + "%i\n" % cleanse_count)
reportfile.write(("-"*50)+"\n")
except Exception, e:
if not is_interrupt(e):
raise
del e
handle_interrupt()
sys.exit(0)
if __name__ == "__main__":
run_command(sys.argv[1:])

127
scripts/copy_buildsync.sh Executable file
View file

@ -0,0 +1,127 @@
#!/bin/bash
ARCHES="alpha amd64 arm hppa ia64 ppc sparc x86 sh s390"
#alpha amd64 arm hppa ia64 mips ppc s390 sh sparc x86
#ARCHES="s390"
RSYNC_OPTS="-aO --delay-updates"
DEBUG=
VERBOSE=
OUT_STAGE3="latest-stage3.txt"
OUT_ISO="latest-iso.txt"
# Nothing to edit beyond this point
DEBUGP=
VERBOSEP=
[ -n "$DEBUG" ] && DEBUGP=echo
[ -n "$DEBUG" ] && RSYNC_OPTS="${RSYNC_OPTS} -n"
[ -n "$VERBOSE" ] && RSYNC_OPTS="${RSYNC_OPTS} -v"
[ -n "$VERBOSEP" ] && VERBOSEP="-v"
for ARCH in $ARCHES; do
rc=0
fail=0
indir=/home/buildsync/builds/${ARCH}
outdir=/release/weekly/${ARCH}
tmpdir=/release/tmp/buildsync/partial/${ARCH}
mkdir -p ${tmpdir} 2>/dev/null
# Copying
if [ -d "${indir}" ]; then
for i in $(find ${indir} -type f | grep -- '-20[0123][0-9]\{5\}' | sed -e 's:^.*-\(20[^.]\+\).*$:\1:' | sort -ur); do
#echo "Doing $i"
t="${outdir}/${i}"
mkdir -p ${t} 2>/dev/null
rsync ${RSYNC_OPTS} --temp-dir=${tmpdir} --partial-dir=${tmpdir} ${indir}/ --filter "S *${i}*" --filter 'S **/' --filter 'H *' ${t}
rc=$?
if [ $rc -eq 0 ]; then
find ${indir} -type f -name "*${i}*" -print0 | xargs -0 --no-run-if-empty $DEBUGP rm $VERBOSEP -f
else
echo "Not deleting ${indir}/*${i}*, rsync failed!" 1>&2
fail=1
fi
done
find ${outdir} -mindepth 1 -type d \
| egrep -v current \
| sort -r \
| tr '\n' '\0' \
|xargs -0 --no-run-if-empty rmdir --ignore-fail-on-non-empty
fi
# ================================================================
# Build data for revealing latest:
# *.iso
# stage3*bz2
cd "${outdir}"
# %T@
iso_list="$(find 20* -name '*.iso' -printf '%h %f %h/%f\n' |grep -v hardened | sort -n)"
stage3_list="$(find 20* -name 'stage3*bz2' -printf '%h %f %h/%f\n' |grep -v hardened | sort -n)"
latest_iso_date="$(echo -e "${iso_list}" |awk '{print $1}' |cut -d/ -f1 | tail -n1)"
latest_stage3_date="$(echo -e "${stage3_list}" |awk '{print $1}' |cut -d/ -f1 | tail -n1)"
header="$(echo -e "# Latest as of $(date -uR)\n# ts=$(date -u +%s)")"
# Do not remove this
[ -z "${latest_iso_date}" ] && latest_iso_date="NONE-FOUND"
[ -z "${latest_stage3_date}" ] && latest_stage3_date="NONE-FOUND"
if [ -n "${iso_list}" ]; then
echo -e "${header}" >"${OUT_ISO}"
echo -e "${iso_list}" |awk '{print $3}' | grep "$latest_iso_date" >>${OUT_ISO}
rm -f current-iso
ln -sf "$latest_iso_date" current-iso
fi
if [ -n "${stage3_list}" ]; then
echo -e "${header}" >"${OUT_STAGE3}"
echo -e "${stage3_list}" |awk '{print $3}' |grep "$latest_stage3_date" >>${OUT_STAGE3}
rm -f current-stage3
# The "latest stage3" concept doesn't apply to the arm variants
# that are pushed on different days of the week.
if [[ ! $(echo ${outdir} | grep arm) ]]; then
ln -sf "$latest_stage3_date" current-stage3
fi
fi
# new variant preserve code
variants="$(find 20* \( -iname '*.iso' -o -iname '*.tar.bz2' \) -printf '%f\n' |sed -e 's,-20[012][0-9]\{5\}.*,,g' -r | sort | uniq)"
echo -n '' >"${tmpdir}"/.keep.${ARCH}.txt
for v in $variants ; do
#date_variant=$(find 20* -iname "${v}*" \( -name '*.tar.bz2' -o -iname '*.iso' \) -printf '%h\n' | sed -e "s,.*/$a/autobuilds/,,g" -e 's,/.*,,g' |sort -n | tail -n1 )
variant_path=$(find 20* -iname "${v}*" \( -name '*.tar.bz2' -o -iname '*.iso' \) -print | sed -e "s,.*/$a/autobuilds/,,g" | sort -k1,1 -t/ | tail -n1 )
f="latest-${v}.txt"
echo -e "${header}" >"${f}"
echo -e "${variant_path}" >>${f}
rm -f "current-$v"
ln -sf "${variant_path%/*}" "current-$v"
echo "${variant_path}" | sed -e 's,/.*,,g' -e 's,^,/,g' -e 's,$,$,g' >>"${tmpdir}"/.keep.${ARCH}.txt
done
#echo "$date_variant" \
#| sort | uniq | sed -e 's,^,/,g' -e 's,$,$,g' >"${tmpdir}"/.keep.${ARCH}.txt
# ================================================================
# Cleanup
if [ $fail -eq 0 ]; then
# Clean up all but latest 4 from mirror dir
cd "${outdir}"
#echo regex "/${latest_iso_date}\$|/${latest_stage3_date}\$"
for i in $(find -regextype posix-basic -mindepth 1 -maxdepth 1 -type d -regex '.*20[012][0-9]\{5\}.*' \
| sed -e 's:^.*-\(20[^.]\+\).*$:\1:' \
| sort -ur \
| egrep -v "/${latest_iso_date}\$|/${latest_stage3_date}\$" \
| egrep -v -f "${tmpdir}"/.keep.${ARCH}.txt \
| tail -n +5); do
$DEBUGP rm $VERBOSEP -rf $(pwd)/${i}
done
$DEBUGP rm $VERBOSEP -rf ${tmpdir}
else
echo "There was some failure for $ARCH during the weekly sync. Not doing cleanup for fear of dataloss." 1>&2
fi
done
# vim:ts=2 sw=2 noet ft=sh:

2
scripts/run_catalyst Executable file
View file

@ -0,0 +1,2 @@
#!/bin/bash
sudo /release/bin/sudo_catalyst "$@"

39
scripts/run_official Executable file
View file

@ -0,0 +1,39 @@
#!/bin/bash
email_from="auto"
email_to="releng@gentoo.org"
url="https://poseidon.amd64.dev.gentoo.org/snapshots"
snapshot_uri="/release/webroot/snapshots"
svn_repo="/release/repos/snapshot-tree"
send_email() {
subject="[Snapshot] ${1}"
echo -e "From: ${email_from}\r\nTo: ${email_to}\r\nSubject: ${subject}\r\n\r\nA new snapshot has been built from revision `svnlook history ${svn_repo} | head -n 3 | tail -n 1 | sed -e 's:^ *::' -e 's: .*$::'` of ${svn_repo}. You can find it at ${url}.\r\n\r\n$(cat /release/snapshots/portage-${1}.tar.bz2.DIGESTS)\r\n" | /usr/sbin/sendmail -f ${email_from} ${email_to}
}
if [ "${email_from}" == "auto" ]
then
username="$(whoami)"
if [ "${username}" == "root" ]
then
email_from="catalyst@poseidon.amd64.dev.gentoo.org"
else
email_from="${username}@gentoo.org"
fi
fi
sudo /release/bin/sudo_official "$@" && \
echo "Starting rsync from /release/snapshots/portage-${1}.tar.bz2* to ${snapshot_uri}" && \
rsync --archive --stats --progress /release/snapshots/portage-${1}.tar.bz2* \
${snapshot_uri}
ret=$?
if [ "${email_from}" == "none" ]
then
echo "Skipping email step as configured..."
else
[ $ret -eq 0 ] && send_email ${1}
fi
exit $ret

2
scripts/run_snapshot Executable file
View file

@ -0,0 +1,2 @@
#!/bin/bash
sudo /release/bin/sudo_snapshot "$@"

162
scripts/stage_build.sh Executable file
View file

@ -0,0 +1,162 @@
#!/bin/bash
PID=$$
profile=
version_stamp=
subarch=
stage1_seed=
snapshot=
config=/etc/catalyst/catalyst.conf
email_from="catalyst@localhost"
email_to="root@localhost"
verbose=0
usage() {
msg=$1
if [ -n "${msg}" ]; then
echo -e "${msg}\n";
fi
cat <<EOH
Usage:
stage_build [-p|--profile <profile>] [-v|--version-stamp <stamp>]
[-a|--arch <arch>] [-s|--stage1-seed <seed>] [--verbose]
[-f|--email-from <from>] [-t|--email-to <to>] [-h|--help]
Options:
-p|--profile Sets the portage profile (required)
-v|--version-stamp Sets the version stamp (required)
-a|--arch Sets the 'subarch' in the spec (required)
-s|--stage1-seed Sets the seed for the stage1 (required)
-S|--snapshot Sets the snapshot name (if not given defaults to today's
date)
-c|--config catalyst config to use, defaults to catalyst default
--verbose Send output of commands to console as well as log
-f|--email-from Sets the 'From' on emails sent from this script (defaults
to catalyst@localhost)
-t|--email-to Sets the 'To' on emails sent from this script (defaults
to root@localhost)
-h|--help Show this message and quit
Example:
stage_build -p default-linux/x86/2006.1 -v 2007.0_pre -a i686 -s default/stage3-i686-2006.1
EOH
}
send_email() {
subject="[${subarch}] $1"
body=$2
echo -e "From: ${email_from}\r\nTo: ${email_to}\r\nSubject: ${subject}\r\n\r\nArch: ${subarch}\r\nProfile: ${profile}\r\nVersion stamp: ${version_stamp}\r\nStage1 seed: ${stage1_seed}\r\nSnapshot: ${snapshot}\r\n\r\n${body}\r\n" | /usr/sbin/sendmail -f ${email_from} ${email_to}
}
run_cmd() {
cmd=$1
logfile=$2
if [ $verbose = 1 ]; then
${cmd} 2>&1 | tee ${logfile}
else
${cmd} &> ${logfile}
fi
}
# Parse args
params=${#}
while [ ${#} -gt 0 ]
do
a=${1}
shift
case "${a}" in
-h|--help)
usage
exit 0
;;
-p|--profile)
profile=$1
shift
;;
-v|--version-stamp)
version_stamp=$1
shift
;;
-a|--arch)
subarch=$1
shift
;;
-f|--email-from)
email_from=$1
shift
;;
-t|--email-to)
email_to=$1
shift
;;
-s|--stage1-seed)
stage1_seed=$1
shift
;;
-S|--snapshot)
snapshot=$1
shift
;;
-c|--config)
config=$1
shift
;;
--verbose)
verbose=1
;;
-*)
echo "You have specified an invalid option: ${a}"
usage
exit 1
;;
esac
done
# Make sure all required values were specified
if [ -z "${profile}" ]; then
usage "You must specify a profile."
exit 1
fi
if [ -z "${version_stamp}" ]; then
usage "You must specify a version stamp."
exit 1
fi
if [ -z "${subarch}" ]; then
usage "You must specify an arch."
exit 1
fi
if [ -z "${stage1_seed}" ]; then
usage "You must specify a stage1 seed."
exit 1
fi
cd /tmp
if [ -z "${snapshot}" ]; then
snapshot=`date +%Y%m%d`
run_cmd "catalyst -c ${config} -s '${snapshot}'" "/tmp/catalyst_build_snapshot.${PID}.log"
if [ $? != 0 ]; then
send_email "Catalyst build error - snapshot" "$(</tmp/catalyst_build_snapshot.${PID}.log)"
exit 1
fi
fi
for i in 1 2 3; do
echo -e "subarch: ${subarch}\ntarget: stage${i}\nversion_stamp: ${version_stamp}\nrel_type: default\nprofile: ${profile}\nsnapshot: ${snapshot}" > stage${i}.spec
if [ ${i} = 1 ]; then
echo "source_subpath: ${stage1_seed}" >> stage${i}.spec
else
echo "source_subpath: default/stage$(expr ${i} - 1)-${subarch}-${version_stamp}" >> stage${i}.spec
fi
run_cmd "catalyst -a -p -c ${config} -f stage${i}.spec" "/tmp/catalyst_build_stage${i}.${PID}.log"
if [ $? != 0 ]; then
send_email "Catalyst build error - stage${i}" "$(tail -n 200 /tmp/catalyst_build_stage${i}.${PID}.log)\r\n\r\nFull build log at /tmp/catalyst_build_stage${i}.${PID}.log"
exit 1
fi
done
send_email "Catalyst build success" "Everything finished successfully."

28
scripts/sudo_catalyst Executable file
View file

@ -0,0 +1,28 @@
#!/bin/bash
usage() {
echo "Usage: $(basename ${0}) <arch> <target> <spec>"
echo "Where arch is either amd64 or x86, target is default, dev, hardened,"
echo "or uclibc, and spec is your spec file."
echo
}
if [ -z "$1" -o -z "$2" -o -z "$3" ]
then
usage
else
target="$(grep target ${3} | cut -d' ' -f2)"
/usr/bin/catalyst -c /etc/catalyst/${1}-${2}.conf -f ${3} ${4} ${5}
# && \
# case ${target} in
# stage*|grp*|livecd-stage2)
# echo "Cleaning out ${target} temp files"
# rel_type="$(grep rel_type ${3} | cut -d' ' -f2)"
# subarch="$(grep subarch ${3} | cut -d' ' -f2)"
# version="$(grep version ${3} | cut -d' ' -f2)"
# storedir="$(grep storedir /etc/catalyst/${1}-${2}.conf | cut -d\" -f2)"
# echo "Removing ${storedir}/tmp/${rel_type}/${target}-${subarch}-${version}"
# rm -rf ${storedir}/tmp/${rel_type}/${target}-${subarch}-${version}
# ;;
# esac
fi

46
scripts/sudo_official Executable file
View file

@ -0,0 +1,46 @@
#!/bin/bash
tree="/release/trees/snapshot-tree"
portdir="${tree}/${1/_beta2/}/portage"
cache_args="--portdir=${portdir} --cachedir=/release/tmp/depcache"
usage() {
echo "Usage: $(basename ${0}) <version>"
}
if [ -z "${1}" ]
then
usage
else
cd ${tree}
echo "Clearing out old metadata cache"
rm -rf ${portdir}/metadata/cache
echo "Performing a svn up on ${tree}"
svn up || exit 1
mkdir -p ${portdir}/metadata/cache
echo "Recreating portage metadata cache"
cache-tools.py ${cache_args} --generate || exit 1
cache-tools.py ${cache_args} --transfer --reverse \
--cleanse-on-transfer-failure || exit 1
if [ ! -d ${portdir}/metadata/cache/sys-kernel ]
then
echo "Metadata update failed! Bailing out!"
exit 1
fi
catalyst -c /etc/catalyst/snapshot-official.conf -s ${1} \
-C portdir="${portdir}" || exit 1
for i in amd64 x86
do
for j in default dev hardened uclibc
do
cd /release/buildroot/${i}-${j}/snapshots
rm -f portage-official.tar.bz2 portage-${1}.tar.bz2*
ln -sf /release/snapshots/portage-${1}.tar.bz2 \
portage-official.tar.bz2
ln -sf /release/snapshots/portage-${1}.tar.bz2 \
portage-${1}.tar.bz2
ln -sf /release/snapshots/portage-${1}.tar.bz2.DIGESTS \
portage-${1}.tar.bz2.DIGESTS
done
done
fi

20
scripts/sudo_snapshot Executable file
View file

@ -0,0 +1,20 @@
#!/bin/bash
usage() {
echo "Usage: $(basename ${0}) <version>"
}
if [ -z "${1}" ]
then
usage
else
catalyst -c /etc/catalyst/snapshot.conf -s ${1}
for i in amd64 x86
do
for j in default dev hardened uclibc
do
cd /release/buildroot/${i}-${j}/snapshots
rm -f portage-${1}.tar.bz2
ln -sf /release/snapshots/portage-${1}.tar.bz2 \
portage-${1}.tar.bz2
done
done
fi

2
scripts/update_auto_tree Executable file
View file

@ -0,0 +1,2 @@
#!/bin/bash
PORTDIR="/release/trees/portage-auto/" FEATURES="$FEATURES -news" emerge --sync -q

2
scripts/update_official_tree Executable file
View file

@ -0,0 +1,2 @@
#!/bin/bash
PORTDIR="/release/trees/portage-official/" emerge --sync

2
scripts/update_snapshot_tree Executable file
View file

@ -0,0 +1,2 @@
#!/bin/bash
PORTDIR="/release/trees/portage-snapshot/" emerge --sync