+++ /dev/null
-#!/bin/bash
-
-current_slave="$1"
-current_builder="$2"
-current_mode="$3"
-
-running_builders="$(wget -qO- "http://builds.lede-project.org:8010/json/slaves/$current_slave?as_text=1" | sed -ne 's,^.*"builderName": "\(.*\)".*$,\1,p')"
-
-is_running() {
- local running_builder
- for running_builder in $running_builders; do
- if [ "${running_builder//\//_}" = "${1//\//_}" ]; then
- return 0
- fi
- done
- return 1
-}
-
-do_cleanup() {
- echo "Cleaning up '$current_builder' work directory..."
-
- find . -mindepth 1 -maxdepth 1 -print0 | xargs -r -0 rm -vrf | while read entry do
- printf "."
- done
-}
-
-#
-# Sanity check, current builder should be in running builders list
-#
-
-if ! is_running "$current_builder"; then
- echo "Current builder '$current_builder' not found in current builders list, aborting cleanup."
- exit 0
-fi
-
-
-#
-# Clean up leftovers
-#
-
-if [ "$current_mode" = full ]; then
-(
- if ! flock -x -w 2700 200; then
- echo "Unable to obtain exclusive lock, aborting cleanup."
- exit 1
- fi
-
- for build_dir in ../../*; do
-
- build_dir="$(readlink -f "$build_dir")"
-
- if [ -z "$build_dir" ] || [ ! -d "$build_dir/build/sdk" ]; then
- continue
- fi
-
- current_builder="${build_dir##*/}"
-
- if is_running "$current_builder"; then
- echo "Skipping currently active '$current_builder' work directory."
- continue
- fi
-
- (
- cd "$build_dir/build"
-
- #if [ -n "$(git status --porcelain | grep -v update_hostkey.sh | grep -v cleanup.sh)" ]; then
- if [ -d sdk ] || [ -f sdk.tar.bz2 ]; then
- do_cleanup
- else
- echo "Skipping clean '$current_builder' work directory."
- fi
- )
- done
-
-) 200>../../cleanup.lock
-
-#
-# Clean up current build
-#
-
-else
- do_cleanup
-fi
-
-exit 0
+++ /dev/null
-[general]
-title = LEDE Project
-title_url = http://lede-project.org/
-buildbot_url = http://phase1.builds.lede-project.org/
-homedir = /home/buildbot/lede
-
-[status]
-bind = tcp:8010:interface=127.0.0.1
-user = example
-password = example
-
-[repo]
-url = https://git.lede-project.org/source.git
-
-[rsync]
-url = user@example.org::upload
-password = example
-
-[slave 1]
-name = example-slave-1
-password = example
-builds = 3
-
-[slave 2]
-name = example-slave-2
-password = example2
-builds = 1
+++ /dev/null
-#!/usr/bin/env perl
-
-use strict;
-use warnings;
-use Cwd;
-
-my (%targets, %architectures);
-
-$ENV{'TOPDIR'} = Cwd::getcwd();
-
-
-sub parse_targetinfo {
- my ($target_dir, $subtarget) = @_;
-
- if (open M, "make -C '$target_dir' --no-print-directory DUMP=1 TARGET_BUILD=1 SUBTARGET='$subtarget' |") {
- my ($target_name, $target_arch, @target_features);
- while (defined(my $line = readline M)) {
- chomp $line;
-
- if ($line =~ /^Target: (.+)$/) {
- $target_name = $1;
- }
- elsif ($line =~ /^Target-Arch-Packages: (.+)$/) {
- $target_arch = $1;
- }
- elsif ($line =~ /^Target-Features: (.+)$/) {
- @target_features = split /\s+/, $1;
- }
- elsif ($line =~ /^@\@$/) {
- if ($target_name && $target_arch && $target_name ne 'uml/generic' &&
- !grep { $_ eq 'broken' } @target_features) {
- $targets{$target_name} = $target_arch;
- $architectures{$target_arch} ||= [];
- push @{$architectures{$target_arch}}, $target_name;
- }
-
- undef $target_name;
- undef $target_arch;
- @target_features = ();
- }
- }
- close M;
- }
-}
-
-sub get_targetinfo {
- foreach my $target_makefile (glob "target/linux/*/Makefile") {
- my ($target_dir) = $target_makefile =~ m!^(.+)/Makefile$!;
- my @subtargets;
-
- if (open M, "make -C '$target_dir' --no-print-directory DUMP=1 TARGET_BUILD=1 val.FEATURES V=s 2>/dev/null |") {
- if (defined(my $line = readline M)) {
- chomp $line;
- if (grep { $_ eq 'broken' } split /\s+/, $line) {
- next;
- }
- }
- }
-
- if (open M, "make -C '$target_dir' --no-print-directory DUMP=1 TARGET_BUILD=1 val.SUBTARGETS V=s 2>/dev/null |") {
- if (defined(my $line = readline M)) {
- chomp $line;
- @subtargets = split /\s+/, $line;
- }
- close M;
- }
-
- push @subtargets, 'generic' if @subtargets == 0;
-
- foreach my $subtarget (@subtargets) {
- parse_targetinfo($target_dir, $subtarget);
- }
- }
-}
-
-if (@ARGV == 1 && $ARGV[0] eq 'targets') {
- get_targetinfo();
- foreach my $target_name (sort keys %targets) {
- printf "%s %s\n", $target_name, $targets{$target_name};
- }
-}
-elsif (@ARGV == 1 && $ARGV[0] eq 'architectures') {
- get_targetinfo();
- foreach my $target_arch (sort keys %architectures) {
- printf "%s %s\n", $target_arch, join ' ', @{$architectures{$target_arch}};
- }
-}
-else {
- print "Usage: $0 targets\n";
- print "Usage: $0 architectures\n";
-}
+++ /dev/null
-# -*- python -*-
-# ex: set syntax=python:
-
-import os
-import re
-import subprocess
-import ConfigParser
-
-from buildbot import locks
-
-# This is a sample buildmaster config file. It must be installed as
-# 'master.cfg' in your buildmaster's base directory.
-
-ini = ConfigParser.ConfigParser()
-ini.read("./config.ini")
-
-# This is the dictionary that the buildmaster pays attention to. We also use
-# a shorter alias to save typing.
-c = BuildmasterConfig = {}
-
-####### BUILDSLAVES
-
-# The 'slaves' list defines the set of recognized buildslaves. Each element is
-# a BuildSlave object, specifying a unique slave name and password. The same
-# slave name and password must be configured on the slave.
-from buildbot.buildslave import BuildSlave
-
-c['slaves'] = []
-
-for section in ini.sections():
- if section.startswith("slave "):
- if ini.has_option(section, "name") and ini.has_option(section, "password"):
- name = ini.get(section, "name")
- password = ini.get(section, "password")
- max_builds = 1
- if ini.has_option(section, "builds"):
- max_builds = ini.getint(section, "builds")
- c['slaves'].append(BuildSlave(name, password, max_builds = max_builds))
-
-# 'slavePortnum' defines the TCP port to listen on for connections from slaves.
-# This must match the value configured into the buildslaves (with their
-# --master option)
-c['slavePortnum'] = 9989
-
-# coalesce builds
-c['mergeRequests'] = True
-
-####### CHANGESOURCES
-
-home_dir = ini.get("general", "homedir")
-
-repo_url = ini.get("repo", "url")
-
-rsync_url = ini.get("rsync", "url")
-rsync_key = ini.get("rsync", "password")
-
-# find targets
-targets = [ ]
-
-findtargets = subprocess.Popen([home_dir+'/dumpinfo.pl', 'targets'],
- stdout = subprocess.PIPE, cwd = home_dir+'/source.git')
-
-while True:
- line = findtargets.stdout.readline()
- if not line:
- break
- ta = line.strip().split(' ')
- targets.append(ta[0])
-
-
-# the 'change_source' setting tells the buildmaster how it should find out
-# about source code changes. Here we point to the buildbot clone of pyflakes.
-
-from buildbot.changes.gitpoller import GitPoller
-c['change_source'] = []
-c['change_source'].append(GitPoller(
- repo_url,
- workdir=home_dir+'/source.git', branch='master',
- pollinterval=300))
-
-####### SCHEDULERS
-
-# Configure the Schedulers, which decide how to react to incoming changes. In this
-# case, just kick off a 'basebuild' build
-
-from buildbot.schedulers.basic import SingleBranchScheduler
-from buildbot.schedulers.forcesched import ForceScheduler
-from buildbot.changes import filter
-c['schedulers'] = []
-c['schedulers'].append(SingleBranchScheduler(
- name="all",
- change_filter=filter.ChangeFilter(branch='master'),
- treeStableTimer=60,
- builderNames=targets))
-
-c['schedulers'].append(ForceScheduler(
- name="force",
- builderNames=targets))
-
-####### BUILDERS
-
-# The 'builders' list defines the Builders, which tell Buildbot how to perform a build:
-# what steps, and which slaves can execute them. Note that any particular build will
-# only take place on one slave.
-
-from buildbot.process.factory import BuildFactory
-from buildbot.steps.source import Git
-from buildbot.steps.shell import ShellCommand
-from buildbot.steps.transfer import FileDownload
-
-
-MakeTargetMap = {
- "^tools/": "tools/clean",
- "^toolchain/": "toolchain/clean",
- "^target/linux/": "target/linux/clean",
- "^(config|include)/": "dirclean"
-}
-
-def IsAffected(pattern):
- def CheckAffected(change):
- for request in change.build.requests:
- for source in request.sources:
- for change in source.changes:
- for file in change.files:
- if re.match(pattern, file):
- return True
- return False
- return CheckAffected
-
-def isPathBuiltin(path):
- incl = {}
- pkgs = {}
- conf = open(".config", "r")
-
- while True:
- line = conf.readline()
- if line == '':
- break
- m = re.match("^(CONFIG_PACKAGE_.+?)=y", line)
- if m:
- incl[m.group(1)] = True
-
- conf.close()
-
- deps = open("tmp/.packagedeps", "r")
-
- while True:
- line = deps.readline()
- if line == '':
- break
- m = re.match("^package-\$\((CONFIG_PACKAGE_.+?)\) \+= (\S+)", line)
- if m and incl.get(m.group(1)) == True:
- pkgs["package/%s" % m.group(2)] = True
-
- deps.close()
-
- while path != '':
- if pkgs.get(path) == True:
- return True
- path = os.path.dirname(path)
-
- return False
-
-def isChangeBuiltin(change):
- return True
-# for request in change.build.requests:
-# for source in request.sources:
-# for change in source.changes:
-# for file in change.files:
-# if isPathBuiltin(file):
-# return True
-# return False
-
-
-c['builders'] = []
-
-dlLock = locks.SlaveLock("slave_dl")
-
-checkBuiltin = re.sub('[\t\n ]+', ' ', """
- checkBuiltin() {
- local symbol op path file;
- for file in $CHANGED_FILES; do
- case "$file" in
- package/*/*) : ;;
- *) return 0 ;;
- esac;
- done;
- while read symbol op path; do
- case "$symbol" in package-*)
- symbol="${symbol##*(}";
- symbol="${symbol%)}";
- for file in $CHANGED_FILES; do
- case "$file" in "package/$path/"*)
- grep -qsx "$symbol=y" .config && return 0
- ;; esac;
- done;
- esac;
- done < tmp/.packagedeps;
- return 1;
- }
-""").strip()
-
-
-class IfBuiltinShellCommand(ShellCommand):
- def _quote(self, str):
- if re.search("[^a-zA-Z0-9/_.-]", str):
- return "'%s'" %(re.sub("'", "'\"'\"'", str))
- return str
-
- def setCommand(self, command):
- if not isinstance(command, (str, unicode)):
- command = ' '.join(map(self._quote, command))
- self.command = [
- '/bin/sh', '-c',
- '%s; if checkBuiltin; then %s; else exit 0; fi' %(checkBuiltin, command)
- ]
-
- def setupEnvironment(self, cmd):
- slaveEnv = self.slaveEnvironment
- if slaveEnv is None:
- slaveEnv = { }
- changedFiles = { }
- for request in self.build.requests:
- for source in request.sources:
- for change in source.changes:
- for file in change.files:
- changedFiles[file] = True
- fullSlaveEnv = slaveEnv.copy()
- fullSlaveEnv['CHANGED_FILES'] = ' '.join(changedFiles.keys())
- cmd.args['env'] = fullSlaveEnv
-
-slaveNames = [ ]
-
-for slave in c['slaves']:
- slaveNames.append(slave.slavename)
-
-for target in targets:
- ts = target.split('/')
-
- factory = BuildFactory()
-
- # check out the source
- factory.addStep(Git(repourl=repo_url, mode='update'))
-
- factory.addStep(ShellCommand(
- name = "rmtmp",
- description = "Remove tmp folder",
- command=["rm", "-rf", "tmp/"]))
-
- # feed
-# factory.addStep(ShellCommand(
-# name = "feedsconf",
-# description = "Copy the feeds.conf",
-# command='''cp ~/feeds.conf ./feeds.conf''' ))
-
- # feed
- factory.addStep(ShellCommand(
- name = "updatefeeds",
- description = "Updating feeds",
- command=["./scripts/feeds", "update"]))
-
- # feed
- factory.addStep(ShellCommand(
- name = "installfeeds",
- description = "Installing feeds",
- command=["./scripts/feeds", "install", "-a"]))
-
- # configure
- factory.addStep(ShellCommand(
- name = "newconfig",
- description = "Seeding .config",
- command='''cat <<EOT > .config
-CONFIG_TARGET_%s=y
-CONFIG_TARGET_%s_%s=y
-CONFIG_ALL_NONSHARED=y
-CONFIG_SDK=y
-CONFIG_IB=y
-# CONFIG_IB_STANDALONE is not set
-CONFIG_DEVEL=y
-CONFIG_CCACHE=y
-CONFIG_SIGNED_PACKAGES=y
-# CONFIG_PER_FEED_REPO_ADD_COMMENTED is not set
-CONFIG_KERNEL_KALLSYMS=y
-CONFIG_COLLECT_KERNEL_DEBUG=y
-EOT''' %(ts[0], ts[0], ts[1]) ))
-
- factory.addStep(ShellCommand(
- name = "delbin",
- description = "Removing output directory",
- command = ["rm", "-rf", "bin/"]
- ))
-
- factory.addStep(ShellCommand(
- name = "defconfig",
- description = "Populating .config",
- command = ["make", "defconfig"]
- ))
-
- # check arch
- factory.addStep(ShellCommand(
- name = "checkarch",
- description = "Checking architecture",
- command = ["grep", "-sq", "CONFIG_TARGET_%s=y" %(ts[0]), ".config"],
- logEnviron = False,
- want_stdout = False,
- want_stderr = False,
- haltOnFailure = True
- ))
-
- # install build key
- factory.addStep(FileDownload(mastersrc=home_dir+'/key-build', slavedest="key-build", mode=0600))
-
- # prepare dl
- factory.addStep(ShellCommand(
- name = "dldir",
- description = "Preparing dl/",
- command = "mkdir -p $HOME/dl && ln -sf $HOME/dl ./dl",
- logEnviron = False,
- want_stdout = False
- ))
-
- # populate dl
- factory.addStep(ShellCommand(
- name = "dlrun",
- description = "Populating dl/",
- command = ["make", "-j4", "download", "V=s"],
- logEnviron = False,
- locks = [dlLock.access('exclusive')]
- ))
-
- factory.addStep(ShellCommand(
- name = "cleanbase",
- description = "Cleaning base-files",
- command=["make", "package/base-files/clean", "V=s"]
- ))
-
- # optional clean steps
- for pattern, maketarget in MakeTargetMap.items():
- factory.addStep(ShellCommand(
- name = maketarget,
- description = maketarget,
- command=["make", maketarget, "V=s"], doStepIf=IsAffected(pattern)
- ))
-
- # build
- factory.addStep(ShellCommand(
- name = "tools",
- description = "Building tools",
- command = ["make", "-j4", "tools/install", "V=s"],
- haltOnFailure = True
- ))
-
- factory.addStep(ShellCommand(
- name = "toolchain",
- description = "Building toolchain",
- command=["make", "-j4", "toolchain/install", "V=s"],
- haltOnFailure = True
- ))
-
- factory.addStep(ShellCommand(
- name = "kmods",
- description = "Building kmods",
- command=["make", "-j4", "target/compile", "V=s", "IGNORE_ERRORS=n m", "BUILD_LOG=1"],
- #env={'BUILD_LOG_DIR': 'bin/%s' %(ts[0])},
- haltOnFailure = True
- ))
-
- factory.addStep(ShellCommand(
- name = "pkgbuild",
- description = "Building packages",
- command=["make", "-j4", "package/compile", "V=s", "IGNORE_ERRORS=n m", "BUILD_LOG=1"],
- #env={'BUILD_LOG_DIR': 'bin/%s' %(ts[0])},
- haltOnFailure = True
- ))
-
- # factory.addStep(IfBuiltinShellCommand(
- factory.addStep(ShellCommand(
- name = "pkginstall",
- description = "Installing packages",
- command=["make", "-j4", "package/install", "V=s"],
- doStepIf = isChangeBuiltin,
- haltOnFailure = True
- ))
-
- factory.addStep(ShellCommand(
- name = "pkgindex",
- description = "Indexing packages",
- command=["make", "-j4", "package/index", "V=s"],
- haltOnFailure = True
- ))
-
- #factory.addStep(IfBuiltinShellCommand(
- factory.addStep(ShellCommand(
- name = "images",
- description = "Building images",
- command=["make", "-j1", "target/install", "V=s"],
- doStepIf = isChangeBuiltin,
- haltOnFailure = True
- ))
-
- # upload
- factory.addStep(ShellCommand(
- name = "uploadprepare",
- description = "Preparing target directory",
- command=["rsync", "-av", "--include", "/%s/" %(ts[0]), "--include", "/%s/%s/" %(ts[0], ts[1]), "--exclude", "/*", "--exclude", "/*/*", "--exclude", "/%s/%s/*" %(ts[0], ts[1]), "bin/targets/", "%s/targets/" %(rsync_url)],
- env={'RSYNC_PASSWORD': rsync_key},
- haltOnFailure = True,
- logEnviron = False
- ))
-
- factory.addStep(ShellCommand(
- name = "targetupload",
- description = "Uploading target files",
- command=["rsync", "--delete", "-avz", "bin/targets/%s/%s/" %(ts[0], ts[1]), "%s/targets/%s/%s/" %(rsync_url, ts[0], ts[1])],
- env={'RSYNC_PASSWORD': rsync_key},
- haltOnFailure = True,
- logEnviron = False
- ))
-
- if False:
- factory.addStep(ShellCommand(
- name = "packageupload",
- description = "Uploading package files",
- command=["rsync", "--delete", "-avz", "bin/packages/", "%s/packages/" %(rsync_url)],
- env={'RSYNC_PASSWORD': rsync_key},
- haltOnFailure = False,
- logEnviron = False
- ))
-
- # logs
- if False:
- factory.addStep(ShellCommand(
- name = "upload",
- description = "Uploading logs",
- command=["rsync", "--delete", "-avz", "logs/", "%s/logs/%s/%s/" %(rsync_url, ts[0], ts[1])],
- env={'RSYNC_PASSWORD': rsync_key},
- haltOnFailure = False,
- alwaysRun = True,
- logEnviron = False
- ))
-
- from buildbot.config import BuilderConfig
-
- c['builders'].append(BuilderConfig(name=target, slavenames=slaveNames, factory=factory))
-
-
-####### STATUS TARGETS
-
-# 'status' is a list of Status Targets. The results of each build will be
-# pushed to these targets. buildbot/status/*.py has a variety to choose from,
-# including web pages, email senders, and IRC bots.
-
-c['status'] = []
-
-from buildbot.status import html
-from buildbot.status.web import authz, auth
-
-if ini.has_option("status", "bind"):
- if ini.has_option("status", "user") and ini.has_option("status", "password"):
- authz_cfg=authz.Authz(
- # change any of these to True to enable; see the manual for more
- # options
- auth=auth.BasicAuth([(ini.get("status", "user"), ini.get("status", "password"))]),
- gracefulShutdown = False,
- forceBuild = 'auth', # use this to test your slave once it is set up
- forceAllBuilds = 'auth',
- pingBuilder = False,
- stopBuild = 'auth',
- stopAllBuilds = 'auth',
- cancelPendingBuild = 'auth',
- )
- c['status'].append(html.WebStatus(http_port=ini.get("status", "bind"), authz=authz_cfg))
- else:
- c['status'].append(html.WebStatus(http_port=ini.get("status", "bind")))
-
-####### PROJECT IDENTITY
-
-# the 'title' string will appear at the top of this buildbot
-# installation's html.WebStatus home page (linked to the
-# 'titleURL') and is embedded in the title of the waterfall HTML page.
-
-c['title'] = ini.get("general", "title")
-c['titleURL'] = ini.get("general", "title_url")
-
-# the 'buildbotURL' string should point to the location where the buildbot's
-# internal web server (usually the html.WebStatus page) is visible. This
-# typically uses the port number set in the Waterfall 'status' entry, but
-# with an externally-visible host name which the buildbot cannot figure out
-# without some help.
-
-c['buildbotURL'] = ini.get("general", "buildbot_url")
-
-####### DB URL
-
-c['db'] = {
- # This specifies what database buildbot uses to store its state. You can leave
- # this at its default for all but the largest installations.
- 'db_url' : "sqlite:///state.sqlite",
-}
-
+++ /dev/null
-#!/bin/bash
-
-export LC_ALL=C
-
-current_slave="$1"
-current_builder="$2"
-current_mode="$3"
-
-running_builders="$(wget -qO- "http://phase2.builds.lede-project.org/json/slaves/$current_slave?as_text=1" | sed -ne 's,^.*"builderName": "\(.*\)".*$,\1,p')"
-
-is_running() {
- local running_builder
- for running_builder in $running_builders; do
- if [ "${running_builder//\//_}" = "${1//\//_}" ]; then
- return 0
- fi
- done
- return 1
-}
-
-do_cleanup() {
- printf "Cleaning up '$current_builder' work directory"
-
- rm -f cleanup.sh
- rm -vrf sdk/ | while read entry; do
- case "$entry" in *directory:*)
- printf "."
- esac
- done
-
- echo ""
-}
-
-#
-# Sanity check, current builder should be in running builders list
-#
-
-if ! is_running "$current_builder"; then
- echo "Current builder '$current_builder' not found in current builders list, aborting cleanup."
- exit 1
-fi
-
-
-#
-# Clean up leftovers
-#
-
-if [ "$current_mode" = full ]; then
-(
- if ! flock -x -w 2700 200; then
- echo "Unable to obtain exclusive lock, aborting cleanup."
- exit 1
- fi
-
- for build_dir in ../../*; do
-
- build_dir="$(readlink -f "$build_dir")"
-
- if [ -z "$build_dir" ] || [ ! -d "$build_dir/build/sdk" ]; then
- continue
- fi
-
- current_builder="${build_dir##*/}"
-
- if is_running "$current_builder"; then
- echo "Skipping currently active '$current_builder' work directory."
- continue
- fi
-
- (
- cd "$build_dir/build"
-
- #if [ -n "$(git status --porcelain | grep -v update_hostkey.sh | grep -v cleanup.sh)" ]; then
- if [ -d sdk ]; then
- do_cleanup
- else
- echo "Skipping clean '$current_builder' work directory."
- fi
- )
- done
-
-) 200>../../cleanup.lock
-
-#
-# Clean up current build
-#
-
-else
- do_cleanup
-fi
-
-exit 0
+++ /dev/null
-[general]
-title = LEDE Project
-title_url = http://lede-project.org/
-buildbot_url = http://phase2.builds.lede-project.org/
-homedir = /home/buildbot/lede
-
-[status]
-bind = tcp:8011:interface=127.0.0.1
-user = example
-password = example
-
-[repo]
-url = https://git.lede-project.org/source.git
-
-[rsync]
-url = user@example.org::upload
-password = example
-
-[slave 1]
-name = slave-example-1
-password = example
-builds = 1
-
-[slave 2]
-name = slave-example-2
-password = example2
-builds = 3
-
+++ /dev/null
-# -*- python -*-
-# ex: set syntax=python:
-
-import os
-import re
-import subprocess
-import ConfigParser
-
-from buildbot import locks
-
-ini = ConfigParser.ConfigParser()
-ini.read("./config.ini")
-
-# This is a sample buildmaster config file. It must be installed as
-# 'master.cfg' in your buildmaster's base directory.
-
-# This is the dictionary that the buildmaster pays attention to. We also use
-# a shorter alias to save typing.
-c = BuildmasterConfig = {}
-
-####### BUILDSLAVES
-
-# The 'slaves' list defines the set of recognized buildslaves. Each element is
-# a BuildSlave object, specifying a unique slave name and password. The same
-# slave name and password must be configured on the slave.
-from buildbot.buildslave import BuildSlave
-
-c['slaves'] = []
-
-for section in ini.sections():
- if section.startswith("slave "):
- if ini.has_option(section, "name") and ini.has_option(section, "password"):
- name = ini.get(section, "name")
- password = ini.get(section, "password")
- max_builds = 1
- if ini.has_option(section, "builds"):
- max_builds = ini.getint(section, "builds")
- c['slaves'].append(BuildSlave(name, password, max_builds = max_builds))
-
-# 'slavePortnum' defines the TCP port to listen on for connections from slaves.
-# This must match the value configured into the buildslaves (with their
-# --master option)
-c['slavePortnum'] = 9990
-
-# coalesce builds
-c['mergeRequests'] = True
-
-####### CHANGESOURCES
-
-home_dir = ini.get("general", "homedir")
-
-repo_url = ini.get("repo", "url")
-
-rsync_url = ini.get("rsync", "url")
-rsync_key = ini.get("rsync", "password")
-
-# find arches
-arches = [ ]
-archnames = [ ]
-
-findarches = subprocess.Popen([home_dir+'/dumpinfo.pl', 'architectures'],
- stdout = subprocess.PIPE, cwd = home_dir+'/source.git')
-
-while True:
- line = findarches.stdout.readline()
- if not line:
- break
- at = line.strip().split()
- arches.append(at)
- archnames.append(at[0])
-
-
-# find feeds
-feeds = []
-
-from buildbot.changes.gitpoller import GitPoller
-c['change_source'] = []
-
-with open(home_dir+'/source.git/feeds.conf.default', 'r') as f:
- for line in f:
- parts = line.strip().split()
- if parts[0] == "src-git":
- feeds.append(parts)
- c['change_source'].append(GitPoller(parts[2], workdir='%s/%s.git' %(os.getcwd(), parts[1]), branch='master', pollinterval=300))
-
-
-####### SCHEDULERS
-
-# Configure the Schedulers, which decide how to react to incoming changes. In this
-# case, just kick off a 'basebuild' build
-
-from buildbot.schedulers.basic import SingleBranchScheduler
-from buildbot.schedulers.forcesched import ForceScheduler
-from buildbot.changes import filter
-c['schedulers'] = []
-c['schedulers'].append(SingleBranchScheduler(
- name="all",
- change_filter=filter.ChangeFilter(branch='master'),
- treeStableTimer=60,
- builderNames=archnames))
-
-c['schedulers'].append(ForceScheduler(
- name="force",
- builderNames=archnames))
-
-####### BUILDERS
-
-# The 'builders' list defines the Builders, which tell Buildbot how to perform a build:
-# what steps, and which slaves can execute them. Note that any particular build will
-# only take place on one slave.
-
-from buildbot.process.factory import BuildFactory
-from buildbot.steps.source import Git
-from buildbot.steps.shell import ShellCommand
-from buildbot.steps.transfer import FileDownload
-from buildbot.process.properties import WithProperties
-
-c['builders'] = []
-
-dlLock = locks.SlaveLock("slave_dl")
-
-slaveNames = [ ]
-
-for slave in c['slaves']:
- slaveNames.append(slave.slavename)
-
-for arch in arches:
- ts = arch[1].split('/')
-
- factory = BuildFactory()
-
- # prepare workspace
- factory.addStep(FileDownload(mastersrc="cleanup.sh", slavedest="cleanup.sh", mode=0755))
-
- factory.addStep(ShellCommand(
- name = "cleanold",
- description = "Cleaning previous builds",
- command = ["./cleanup.sh", WithProperties("%(slavename)s"), WithProperties("%(buildername)s"), "full"],
- haltOnFailure = True,
- timeout = 2400))
-
- factory.addStep(ShellCommand(
- name = "cleanup",
- description = "Cleaning work area",
- command = ["./cleanup.sh", WithProperties("%(slavename)s"), WithProperties("%(buildername)s"), "single"],
- haltOnFailure = True,
- timeout = 2400))
-
- factory.addStep(ShellCommand(
- name = "mksdkdir",
- description = "Preparing SDK directory",
- command = ["mkdir", "sdk"],
- haltOnFailure = True))
-
- factory.addStep(ShellCommand(
- name = "downloadsdk",
- description = "Downloading SDK archive",
- command = ["rsync", "-va", "downloads.lede-project.org::downloads/snapshots/targets/%s/%s/LEDE-SDK-*.tar.bz2" %(ts[0], ts[1]), "sdk.tar.bz2"],
- haltOnFailure = True))
-
- factory.addStep(ShellCommand(
- name = "unpacksdk",
- description = "Unpacking SDK archive",
- command = ["tar", "--strip-components=1", "-C", "sdk/", "-vxjf", "sdk.tar.bz2"],
- haltOnFailure = True))
-
- factory.addStep(FileDownload(mastersrc=home_dir+'/key-build', slavedest="sdk/key-build", mode=0400))
-
- factory.addStep(ShellCommand(
- name = "mkdldir",
- description = "Preparing download directory",
- command = ["sh", "-c", "mkdir -p $HOME/dl && rmdir ./sdk/dl && ln -sf $HOME/dl ./sdk/dl"]))
-
- factory.addStep(ShellCommand(
- name = "updatefeeds",
- description = "Updating feeds",
- workdir = "build/sdk",
- command = ["./scripts/feeds", "update"]))
-
- factory.addStep(ShellCommand(
- name = "installfeeds",
- description = "Installing feeds",
- workdir = "build/sdk",
- command = ["./scripts/feeds", "install", "-a"]))
-
- factory.addStep(ShellCommand(
- name = "compile",
- description = "Building packages",
- workdir = "build/sdk",
- command = ["make", "-j4", "V=s", "IGNORE_ERRORS=n m y", "BUILD_LOG=1", "CONFIG_SIGNED_PACKAGES=y"]))
-
- factory.addStep(ShellCommand(
- name = "uploadprepare",
- description = "Preparing package directory",
- workdir = "build/sdk",
- command = ["rsync", "-av", "--include", "/%s/" %(arch[0]), "--exclude", "/*", "--exclude", "/%s/*" %(arch[0]), "bin/packages/", "%s/packages/" %(rsync_url)],
- env={'RSYNC_PASSWORD': rsync_key},
- haltOnFailure = True,
- logEnviron = False
- ))
-
- factory.addStep(ShellCommand(
- name = "packageupload",
- description = "Uploading package files",
- workdir = "build/sdk",
- command = ["rsync", "--delete", "-avz", "bin/packages/%s/" %(arch[0]), "%s/packages/%s/" %(rsync_url, arch[0])],
- env={'RSYNC_PASSWORD': rsync_key},
- haltOnFailure = True,
- logEnviron = False
- ))
-
- factory.addStep(ShellCommand(
- name = "logprepare",
- description = "Preparing log directory",
- workdir = "build/sdk",
- command = ["rsync", "-av", "--include", "/%s/" %(arch[0]), "--exclude", "/*", "--exclude", "/%s/*" %(arch[0]), "bin/packages/", "%s/faillogs/" %(rsync_url)],
- env={'RSYNC_PASSWORD': rsync_key},
- haltOnFailure = True,
- logEnviron = False
- ))
-
- factory.addStep(ShellCommand(
- name = "logfind",
- description = "Finding failure logs",
- workdir = "build/sdk/logs/package/feeds",
- command = ["sh", "-c", "sed -ne 's!^ *ERROR: package/feeds/\\([^ ]*\\) .*$!\\1!p' ../error.txt | sort -u | xargs -r find > ../../../logs.txt"],
- haltOnFailure = False
- ))
-
- factory.addStep(ShellCommand(
- name = "logcollect",
- description = "Collecting failure logs",
- workdir = "build/sdk",
- command = ["rsync", "-av", "--files-from=logs.txt", "logs/package/feeds/", "faillogs/"],
- haltOnFailure = False
- ))
-
- factory.addStep(ShellCommand(
- name = "logupload",
- description = "Uploading failure logs",
- workdir = "build/sdk",
- command = ["rsync", "--delete", "-avz", "faillogs/", "%s/faillogs/%s/" %(rsync_url, arch[0])],
- env={'RSYNC_PASSWORD': rsync_key},
- haltOnFailure = False,
- logEnviron = False
- ))
-
- from buildbot.config import BuilderConfig
-
- c['builders'].append(BuilderConfig(name=arch[0], slavenames=slaveNames, factory=factory))
-
-
-####### STATUS arches
-
-# 'status' is a list of Status arches. The results of each build will be
-# pushed to these arches. buildbot/status/*.py has a variety to choose from,
-# including web pages, email senders, and IRC bots.
-
-c['status'] = []
-
-from buildbot.status import html
-from buildbot.status.web import authz, auth
-
-if ini.has_option("status", "bind"):
- if ini.has_option("status", "user") and ini.has_option("status", "password"):
- authz_cfg=authz.Authz(
- # change any of these to True to enable; see the manual for more
- # options
- auth=auth.BasicAuth([(ini.get("status", "user"), ini.get("status", "password"))]),
- gracefulShutdown = False,
- forceBuild = 'auth', # use this to test your slave once it is set up
- forceAllBuilds = 'auth',
- pingBuilder = False,
- stopBuild = 'auth',
- stopAllBuilds = 'auth',
- cancelPendingBuild = 'auth',
- )
- c['status'].append(html.WebStatus(http_port=ini.get("status", "bind"), authz=authz_cfg))
- else:
- c['status'].append(html.WebStatus(http_port=ini.get("status", "bind")))
-
-####### PROJECT IDENTITY
-
-# the 'title' string will appear at the top of this buildbot
-# installation's html.WebStatus home page (linked to the
-# 'titleURL') and is embedded in the title of the waterfall HTML page.
-
-c['title'] = ini.get("general", "title")
-c['titleURL'] = ini.get("general", "title_url")
-
-# the 'buildbotURL' string should point to the location where the buildbot's
-# internal web server (usually the html.WebStatus page) is visible. This
-# typically uses the port number set in the Waterfall 'status' entry, but
-# with an externally-visible host name which the buildbot cannot figure out
-# without some help.
-
-c['buildbotURL'] = ini.get("general", "buildbot_url")
-
-####### DB URL
-
-c['db'] = {
- # This specifies what database buildbot uses to store its state. You can leave
- # this at its default for all but the largest installations.
- 'db_url' : "sqlite:///state.sqlite",
-}
-
--- /dev/null
+#!/bin/bash
+
+current_slave="$1"
+current_builder="$2"
+current_mode="$3"
+
+running_builders="$(wget -qO- "http://builds.lede-project.org:8010/json/slaves/$current_slave?as_text=1" | sed -ne 's,^.*"builderName": "\(.*\)".*$,\1,p')"
+
+is_running() {
+ local running_builder
+ for running_builder in $running_builders; do
+ if [ "${running_builder//\//_}" = "${1//\//_}" ]; then
+ return 0
+ fi
+ done
+ return 1
+}
+
+do_cleanup() {
+ echo "Cleaning up '$current_builder' work directory..."
+
+ find . -mindepth 1 -maxdepth 1 -print0 | xargs -r -0 rm -vrf | while read entry do
+ printf "."
+ done
+}
+
+#
+# Sanity check, current builder should be in running builders list
+#
+
+if ! is_running "$current_builder"; then
+ echo "Current builder '$current_builder' not found in current builders list, aborting cleanup."
+ exit 0
+fi
+
+
+#
+# Clean up leftovers
+#
+
+if [ "$current_mode" = full ]; then
+(
+ if ! flock -x -w 2700 200; then
+ echo "Unable to obtain exclusive lock, aborting cleanup."
+ exit 1
+ fi
+
+ for build_dir in ../../*; do
+
+ build_dir="$(readlink -f "$build_dir")"
+
+ if [ -z "$build_dir" ] || [ ! -d "$build_dir/build/sdk" ]; then
+ continue
+ fi
+
+ current_builder="${build_dir##*/}"
+
+ if is_running "$current_builder"; then
+ echo "Skipping currently active '$current_builder' work directory."
+ continue
+ fi
+
+ (
+ cd "$build_dir/build"
+
+ #if [ -n "$(git status --porcelain | grep -v update_hostkey.sh | grep -v cleanup.sh)" ]; then
+ if [ -d sdk ] || [ -f sdk.tar.bz2 ]; then
+ do_cleanup
+ else
+ echo "Skipping clean '$current_builder' work directory."
+ fi
+ )
+ done
+
+) 200>../../cleanup.lock
+
+#
+# Clean up current build
+#
+
+else
+ do_cleanup
+fi
+
+exit 0
--- /dev/null
+[general]
+title = LEDE Project
+title_url = http://lede-project.org/
+buildbot_url = http://phase1.builds.lede-project.org/
+homedir = /home/buildbot/lede
+
+[status]
+bind = tcp:8010:interface=127.0.0.1
+user = example
+password = example
+
+[repo]
+url = https://git.lede-project.org/source.git
+
+[rsync]
+url = user@example.org::upload
+password = example
+
+[slave 1]
+name = example-slave-1
+password = example
+builds = 3
+
+[slave 2]
+name = example-slave-2
+password = example2
+builds = 1
--- /dev/null
+#!/usr/bin/env perl
+
+use strict;
+use warnings;
+use Cwd;
+
+my (%targets, %architectures);
+
+$ENV{'TOPDIR'} = Cwd::getcwd();
+
+
+sub parse_targetinfo {
+ my ($target_dir, $subtarget) = @_;
+
+ if (open M, "make -C '$target_dir' --no-print-directory DUMP=1 TARGET_BUILD=1 SUBTARGET='$subtarget' |") {
+ my ($target_name, $target_arch, @target_features);
+ while (defined(my $line = readline M)) {
+ chomp $line;
+
+ if ($line =~ /^Target: (.+)$/) {
+ $target_name = $1;
+ }
+ elsif ($line =~ /^Target-Arch-Packages: (.+)$/) {
+ $target_arch = $1;
+ }
+ elsif ($line =~ /^Target-Features: (.+)$/) {
+ @target_features = split /\s+/, $1;
+ }
+ elsif ($line =~ /^@\@$/) {
+ if ($target_name && $target_arch && $target_name ne 'uml/generic' &&
+ !grep { $_ eq 'broken' } @target_features) {
+ $targets{$target_name} = $target_arch;
+ $architectures{$target_arch} ||= [];
+ push @{$architectures{$target_arch}}, $target_name;
+ }
+
+ undef $target_name;
+ undef $target_arch;
+ @target_features = ();
+ }
+ }
+ close M;
+ }
+}
+
+sub get_targetinfo {
+ foreach my $target_makefile (glob "target/linux/*/Makefile") {
+ my ($target_dir) = $target_makefile =~ m!^(.+)/Makefile$!;
+ my @subtargets;
+
+ if (open M, "make -C '$target_dir' --no-print-directory DUMP=1 TARGET_BUILD=1 val.FEATURES V=s 2>/dev/null |") {
+ if (defined(my $line = readline M)) {
+ chomp $line;
+ if (grep { $_ eq 'broken' } split /\s+/, $line) {
+ next;
+ }
+ }
+ }
+
+ if (open M, "make -C '$target_dir' --no-print-directory DUMP=1 TARGET_BUILD=1 val.SUBTARGETS V=s 2>/dev/null |") {
+ if (defined(my $line = readline M)) {
+ chomp $line;
+ @subtargets = split /\s+/, $line;
+ }
+ close M;
+ }
+
+ push @subtargets, 'generic' if @subtargets == 0;
+
+ foreach my $subtarget (@subtargets) {
+ parse_targetinfo($target_dir, $subtarget);
+ }
+ }
+}
+
+if (@ARGV == 1 && $ARGV[0] eq 'targets') {
+ get_targetinfo();
+ foreach my $target_name (sort keys %targets) {
+ printf "%s %s\n", $target_name, $targets{$target_name};
+ }
+}
+elsif (@ARGV == 1 && $ARGV[0] eq 'architectures') {
+ get_targetinfo();
+ foreach my $target_arch (sort keys %architectures) {
+ printf "%s %s\n", $target_arch, join ' ', @{$architectures{$target_arch}};
+ }
+}
+else {
+ print "Usage: $0 targets\n";
+ print "Usage: $0 architectures\n";
+}
--- /dev/null
+# -*- python -*-
+# ex: set syntax=python:
+
+import os
+import re
+import subprocess
+import ConfigParser
+
+from buildbot import locks
+
+# This is a sample buildmaster config file. It must be installed as
+# 'master.cfg' in your buildmaster's base directory.
+
+ini = ConfigParser.ConfigParser()
+ini.read("./config.ini")
+
+# This is the dictionary that the buildmaster pays attention to. We also use
+# a shorter alias to save typing.
+c = BuildmasterConfig = {}
+
+####### BUILDSLAVES
+
+# The 'slaves' list defines the set of recognized buildslaves. Each element is
+# a BuildSlave object, specifying a unique slave name and password. The same
+# slave name and password must be configured on the slave.
+from buildbot.buildslave import BuildSlave
+
+c['slaves'] = []
+
+for section in ini.sections():
+ if section.startswith("slave "):
+ if ini.has_option(section, "name") and ini.has_option(section, "password"):
+ name = ini.get(section, "name")
+ password = ini.get(section, "password")
+ max_builds = 1
+ if ini.has_option(section, "builds"):
+ max_builds = ini.getint(section, "builds")
+ c['slaves'].append(BuildSlave(name, password, max_builds = max_builds))
+
+# 'slavePortnum' defines the TCP port to listen on for connections from slaves.
+# This must match the value configured into the buildslaves (with their
+# --master option)
+c['slavePortnum'] = 9989
+
+# coalesce builds
+c['mergeRequests'] = True
+
+####### CHANGESOURCES
+
+home_dir = ini.get("general", "homedir")
+
+repo_url = ini.get("repo", "url")
+
+rsync_url = ini.get("rsync", "url")
+rsync_key = ini.get("rsync", "password")
+
+# find targets
+targets = [ ]
+
+findtargets = subprocess.Popen([home_dir+'/dumpinfo.pl', 'targets'],
+ stdout = subprocess.PIPE, cwd = home_dir+'/source.git')
+
+while True:
+ line = findtargets.stdout.readline()
+ if not line:
+ break
+ ta = line.strip().split(' ')
+ targets.append(ta[0])
+
+
+# the 'change_source' setting tells the buildmaster how it should find out
+# about source code changes. Here we point to the buildbot clone of pyflakes.
+
+from buildbot.changes.gitpoller import GitPoller
+c['change_source'] = []
+c['change_source'].append(GitPoller(
+ repo_url,
+ workdir=home_dir+'/source.git', branch='master',
+ pollinterval=300))
+
+####### SCHEDULERS
+
+# Configure the Schedulers, which decide how to react to incoming changes. In this
+# case, just kick off a 'basebuild' build
+
+from buildbot.schedulers.basic import SingleBranchScheduler
+from buildbot.schedulers.forcesched import ForceScheduler
+from buildbot.changes import filter
+c['schedulers'] = []
+c['schedulers'].append(SingleBranchScheduler(
+ name="all",
+ change_filter=filter.ChangeFilter(branch='master'),
+ treeStableTimer=60,
+ builderNames=targets))
+
+c['schedulers'].append(ForceScheduler(
+ name="force",
+ builderNames=targets))
+
+####### BUILDERS
+
+# The 'builders' list defines the Builders, which tell Buildbot how to perform a build:
+# what steps, and which slaves can execute them. Note that any particular build will
+# only take place on one slave.
+
+from buildbot.process.factory import BuildFactory
+from buildbot.steps.source import Git
+from buildbot.steps.shell import ShellCommand
+from buildbot.steps.transfer import FileDownload
+
+
+MakeTargetMap = {
+ "^tools/": "tools/clean",
+ "^toolchain/": "toolchain/clean",
+ "^target/linux/": "target/linux/clean",
+ "^(config|include)/": "dirclean"
+}
+
+def IsAffected(pattern):
+ def CheckAffected(change):
+ for request in change.build.requests:
+ for source in request.sources:
+ for change in source.changes:
+ for file in change.files:
+ if re.match(pattern, file):
+ return True
+ return False
+ return CheckAffected
+
+def isPathBuiltin(path):
+ incl = {}
+ pkgs = {}
+ conf = open(".config", "r")
+
+ while True:
+ line = conf.readline()
+ if line == '':
+ break
+ m = re.match("^(CONFIG_PACKAGE_.+?)=y", line)
+ if m:
+ incl[m.group(1)] = True
+
+ conf.close()
+
+ deps = open("tmp/.packagedeps", "r")
+
+ while True:
+ line = deps.readline()
+ if line == '':
+ break
+ m = re.match("^package-\$\((CONFIG_PACKAGE_.+?)\) \+= (\S+)", line)
+ if m and incl.get(m.group(1)) == True:
+ pkgs["package/%s" % m.group(2)] = True
+
+ deps.close()
+
+ while path != '':
+ if pkgs.get(path) == True:
+ return True
+ path = os.path.dirname(path)
+
+ return False
+
+def isChangeBuiltin(change):
+ return True
+# for request in change.build.requests:
+# for source in request.sources:
+# for change in source.changes:
+# for file in change.files:
+# if isPathBuiltin(file):
+# return True
+# return False
+
+
+c['builders'] = []
+
+dlLock = locks.SlaveLock("slave_dl")
+
+checkBuiltin = re.sub('[\t\n ]+', ' ', """
+ checkBuiltin() {
+ local symbol op path file;
+ for file in $CHANGED_FILES; do
+ case "$file" in
+ package/*/*) : ;;
+ *) return 0 ;;
+ esac;
+ done;
+ while read symbol op path; do
+ case "$symbol" in package-*)
+ symbol="${symbol##*(}";
+ symbol="${symbol%)}";
+ for file in $CHANGED_FILES; do
+ case "$file" in "package/$path/"*)
+ grep -qsx "$symbol=y" .config && return 0
+ ;; esac;
+ done;
+ esac;
+ done < tmp/.packagedeps;
+ return 1;
+ }
+""").strip()
+
+
+class IfBuiltinShellCommand(ShellCommand):
+ def _quote(self, str):
+ if re.search("[^a-zA-Z0-9/_.-]", str):
+ return "'%s'" %(re.sub("'", "'\"'\"'", str))
+ return str
+
+ def setCommand(self, command):
+ if not isinstance(command, (str, unicode)):
+ command = ' '.join(map(self._quote, command))
+ self.command = [
+ '/bin/sh', '-c',
+ '%s; if checkBuiltin; then %s; else exit 0; fi' %(checkBuiltin, command)
+ ]
+
+ def setupEnvironment(self, cmd):
+ slaveEnv = self.slaveEnvironment
+ if slaveEnv is None:
+ slaveEnv = { }
+ changedFiles = { }
+ for request in self.build.requests:
+ for source in request.sources:
+ for change in source.changes:
+ for file in change.files:
+ changedFiles[file] = True
+ fullSlaveEnv = slaveEnv.copy()
+ fullSlaveEnv['CHANGED_FILES'] = ' '.join(changedFiles.keys())
+ cmd.args['env'] = fullSlaveEnv
+
+slaveNames = [ ]
+
+for slave in c['slaves']:
+ slaveNames.append(slave.slavename)
+
+for target in targets:
+ ts = target.split('/')
+
+ factory = BuildFactory()
+
+ # check out the source
+ factory.addStep(Git(repourl=repo_url, mode='update'))
+
+ factory.addStep(ShellCommand(
+ name = "rmtmp",
+ description = "Remove tmp folder",
+ command=["rm", "-rf", "tmp/"]))
+
+ # feed
+# factory.addStep(ShellCommand(
+# name = "feedsconf",
+# description = "Copy the feeds.conf",
+# command='''cp ~/feeds.conf ./feeds.conf''' ))
+
+ # feed
+ factory.addStep(ShellCommand(
+ name = "updatefeeds",
+ description = "Updating feeds",
+ command=["./scripts/feeds", "update"]))
+
+ # feed
+ factory.addStep(ShellCommand(
+ name = "installfeeds",
+ description = "Installing feeds",
+ command=["./scripts/feeds", "install", "-a"]))
+
+ # configure
+ factory.addStep(ShellCommand(
+ name = "newconfig",
+ description = "Seeding .config",
+ command='''cat <<EOT > .config
+CONFIG_TARGET_%s=y
+CONFIG_TARGET_%s_%s=y
+CONFIG_ALL_NONSHARED=y
+CONFIG_SDK=y
+CONFIG_IB=y
+# CONFIG_IB_STANDALONE is not set
+CONFIG_DEVEL=y
+CONFIG_CCACHE=y
+CONFIG_SIGNED_PACKAGES=y
+# CONFIG_PER_FEED_REPO_ADD_COMMENTED is not set
+CONFIG_KERNEL_KALLSYMS=y
+CONFIG_COLLECT_KERNEL_DEBUG=y
+EOT''' %(ts[0], ts[0], ts[1]) ))
+
+ factory.addStep(ShellCommand(
+ name = "delbin",
+ description = "Removing output directory",
+ command = ["rm", "-rf", "bin/"]
+ ))
+
+ factory.addStep(ShellCommand(
+ name = "defconfig",
+ description = "Populating .config",
+ command = ["make", "defconfig"]
+ ))
+
+ # check arch
+ factory.addStep(ShellCommand(
+ name = "checkarch",
+ description = "Checking architecture",
+ command = ["grep", "-sq", "CONFIG_TARGET_%s=y" %(ts[0]), ".config"],
+ logEnviron = False,
+ want_stdout = False,
+ want_stderr = False,
+ haltOnFailure = True
+ ))
+
+ # install build key
+ factory.addStep(FileDownload(mastersrc=home_dir+'/key-build', slavedest="key-build", mode=0600))
+
+ # prepare dl
+ factory.addStep(ShellCommand(
+ name = "dldir",
+ description = "Preparing dl/",
+ command = "mkdir -p $HOME/dl && ln -sf $HOME/dl ./dl",
+ logEnviron = False,
+ want_stdout = False
+ ))
+
+ # populate dl
+ factory.addStep(ShellCommand(
+ name = "dlrun",
+ description = "Populating dl/",
+ command = ["make", "-j4", "download", "V=s"],
+ logEnviron = False,
+ locks = [dlLock.access('exclusive')]
+ ))
+
+ factory.addStep(ShellCommand(
+ name = "cleanbase",
+ description = "Cleaning base-files",
+ command=["make", "package/base-files/clean", "V=s"]
+ ))
+
+ # optional clean steps
+ for pattern, maketarget in MakeTargetMap.items():
+ factory.addStep(ShellCommand(
+ name = maketarget,
+ description = maketarget,
+ command=["make", maketarget, "V=s"], doStepIf=IsAffected(pattern)
+ ))
+
+ # build
+ factory.addStep(ShellCommand(
+ name = "tools",
+ description = "Building tools",
+ command = ["make", "-j4", "tools/install", "V=s"],
+ haltOnFailure = True
+ ))
+
+ factory.addStep(ShellCommand(
+ name = "toolchain",
+ description = "Building toolchain",
+ command=["make", "-j4", "toolchain/install", "V=s"],
+ haltOnFailure = True
+ ))
+
+ factory.addStep(ShellCommand(
+ name = "kmods",
+ description = "Building kmods",
+ command=["make", "-j4", "target/compile", "V=s", "IGNORE_ERRORS=n m", "BUILD_LOG=1"],
+ #env={'BUILD_LOG_DIR': 'bin/%s' %(ts[0])},
+ haltOnFailure = True
+ ))
+
+ factory.addStep(ShellCommand(
+ name = "pkgbuild",
+ description = "Building packages",
+ command=["make", "-j4", "package/compile", "V=s", "IGNORE_ERRORS=n m", "BUILD_LOG=1"],
+ #env={'BUILD_LOG_DIR': 'bin/%s' %(ts[0])},
+ haltOnFailure = True
+ ))
+
+ # factory.addStep(IfBuiltinShellCommand(
+ factory.addStep(ShellCommand(
+ name = "pkginstall",
+ description = "Installing packages",
+ command=["make", "-j4", "package/install", "V=s"],
+ doStepIf = isChangeBuiltin,
+ haltOnFailure = True
+ ))
+
+ factory.addStep(ShellCommand(
+ name = "pkgindex",
+ description = "Indexing packages",
+ command=["make", "-j4", "package/index", "V=s"],
+ haltOnFailure = True
+ ))
+
+ #factory.addStep(IfBuiltinShellCommand(
+ factory.addStep(ShellCommand(
+ name = "images",
+ description = "Building images",
+ command=["make", "-j1", "target/install", "V=s"],
+ doStepIf = isChangeBuiltin,
+ haltOnFailure = True
+ ))
+
+ # upload
+ factory.addStep(ShellCommand(
+ name = "uploadprepare",
+ description = "Preparing target directory",
+ command=["rsync", "-av", "--include", "/%s/" %(ts[0]), "--include", "/%s/%s/" %(ts[0], ts[1]), "--exclude", "/*", "--exclude", "/*/*", "--exclude", "/%s/%s/*" %(ts[0], ts[1]), "bin/targets/", "%s/targets/" %(rsync_url)],
+ env={'RSYNC_PASSWORD': rsync_key},
+ haltOnFailure = True,
+ logEnviron = False
+ ))
+
+ factory.addStep(ShellCommand(
+ name = "targetupload",
+ description = "Uploading target files",
+ command=["rsync", "--delete", "-avz", "bin/targets/%s/%s/" %(ts[0], ts[1]), "%s/targets/%s/%s/" %(rsync_url, ts[0], ts[1])],
+ env={'RSYNC_PASSWORD': rsync_key},
+ haltOnFailure = True,
+ logEnviron = False
+ ))
+
+ if False:
+ factory.addStep(ShellCommand(
+ name = "packageupload",
+ description = "Uploading package files",
+ command=["rsync", "--delete", "-avz", "bin/packages/", "%s/packages/" %(rsync_url)],
+ env={'RSYNC_PASSWORD': rsync_key},
+ haltOnFailure = False,
+ logEnviron = False
+ ))
+
+ # logs
+ if False:
+ factory.addStep(ShellCommand(
+ name = "upload",
+ description = "Uploading logs",
+ command=["rsync", "--delete", "-avz", "logs/", "%s/logs/%s/%s/" %(rsync_url, ts[0], ts[1])],
+ env={'RSYNC_PASSWORD': rsync_key},
+ haltOnFailure = False,
+ alwaysRun = True,
+ logEnviron = False
+ ))
+
+ from buildbot.config import BuilderConfig
+
+ c['builders'].append(BuilderConfig(name=target, slavenames=slaveNames, factory=factory))
+
+
+####### STATUS TARGETS
+
+# 'status' is a list of Status Targets. The results of each build will be
+# pushed to these targets. buildbot/status/*.py has a variety to choose from,
+# including web pages, email senders, and IRC bots.
+
+c['status'] = []
+
+from buildbot.status import html
+from buildbot.status.web import authz, auth
+
+if ini.has_option("status", "bind"):
+ if ini.has_option("status", "user") and ini.has_option("status", "password"):
+ authz_cfg=authz.Authz(
+ # change any of these to True to enable; see the manual for more
+ # options
+ auth=auth.BasicAuth([(ini.get("status", "user"), ini.get("status", "password"))]),
+ gracefulShutdown = False,
+ forceBuild = 'auth', # use this to test your slave once it is set up
+ forceAllBuilds = 'auth',
+ pingBuilder = False,
+ stopBuild = 'auth',
+ stopAllBuilds = 'auth',
+ cancelPendingBuild = 'auth',
+ )
+ c['status'].append(html.WebStatus(http_port=ini.get("status", "bind"), authz=authz_cfg))
+ else:
+ c['status'].append(html.WebStatus(http_port=ini.get("status", "bind")))
+
+####### PROJECT IDENTITY
+
+# the 'title' string will appear at the top of this buildbot
+# installation's html.WebStatus home page (linked to the
+# 'titleURL') and is embedded in the title of the waterfall HTML page.
+
+c['title'] = ini.get("general", "title")
+c['titleURL'] = ini.get("general", "title_url")
+
+# the 'buildbotURL' string should point to the location where the buildbot's
+# internal web server (usually the html.WebStatus page) is visible. This
+# typically uses the port number set in the Waterfall 'status' entry, but
+# with an externally-visible host name which the buildbot cannot figure out
+# without some help.
+
+c['buildbotURL'] = ini.get("general", "buildbot_url")
+
+####### DB URL
+
+c['db'] = {
+ # This specifies what database buildbot uses to store its state. You can leave
+ # this at its default for all but the largest installations.
+ 'db_url' : "sqlite:///state.sqlite",
+}
+
--- /dev/null
+#!/bin/bash
+
+export LC_ALL=C
+
+current_slave="$1"
+current_builder="$2"
+current_mode="$3"
+
+running_builders="$(wget -qO- "http://phase2.builds.lede-project.org/json/slaves/$current_slave?as_text=1" | sed -ne 's,^.*"builderName": "\(.*\)".*$,\1,p')"
+
+is_running() {
+ local running_builder
+ for running_builder in $running_builders; do
+ if [ "${running_builder//\//_}" = "${1//\//_}" ]; then
+ return 0
+ fi
+ done
+ return 1
+}
+
+do_cleanup() {
+ printf "Cleaning up '$current_builder' work directory"
+
+ rm -f cleanup.sh
+ rm -vrf sdk/ | while read entry; do
+ case "$entry" in *directory:*)
+ printf "."
+ esac
+ done
+
+ echo ""
+}
+
+#
+# Sanity check, current builder should be in running builders list
+#
+
+if ! is_running "$current_builder"; then
+ echo "Current builder '$current_builder' not found in current builders list, aborting cleanup."
+ exit 1
+fi
+
+
+#
+# Clean up leftovers
+#
+
+if [ "$current_mode" = full ]; then
+(
+ if ! flock -x -w 2700 200; then
+ echo "Unable to obtain exclusive lock, aborting cleanup."
+ exit 1
+ fi
+
+ for build_dir in ../../*; do
+
+ build_dir="$(readlink -f "$build_dir")"
+
+ if [ -z "$build_dir" ] || [ ! -d "$build_dir/build/sdk" ]; then
+ continue
+ fi
+
+ current_builder="${build_dir##*/}"
+
+ if is_running "$current_builder"; then
+ echo "Skipping currently active '$current_builder' work directory."
+ continue
+ fi
+
+ (
+ cd "$build_dir/build"
+
+ #if [ -n "$(git status --porcelain | grep -v update_hostkey.sh | grep -v cleanup.sh)" ]; then
+ if [ -d sdk ]; then
+ do_cleanup
+ else
+ echo "Skipping clean '$current_builder' work directory."
+ fi
+ )
+ done
+
+) 200>../../cleanup.lock
+
+#
+# Clean up current build
+#
+
+else
+ do_cleanup
+fi
+
+exit 0
--- /dev/null
+[general]
+title = LEDE Project
+title_url = http://lede-project.org/
+buildbot_url = http://phase2.builds.lede-project.org/
+homedir = /home/buildbot/lede
+
+[status]
+bind = tcp:8011:interface=127.0.0.1
+user = example
+password = example
+
+[repo]
+url = https://git.lede-project.org/source.git
+
+[rsync]
+url = user@example.org::upload
+password = example
+
+[slave 1]
+name = slave-example-1
+password = example
+builds = 1
+
+[slave 2]
+name = slave-example-2
+password = example2
+builds = 3
+
--- /dev/null
+# -*- python -*-
+# ex: set syntax=python:
+
+import os
+import re
+import subprocess
+import ConfigParser
+
+from buildbot import locks
+
+ini = ConfigParser.ConfigParser()
+ini.read("./config.ini")
+
+# This is a sample buildmaster config file. It must be installed as
+# 'master.cfg' in your buildmaster's base directory.
+
+# This is the dictionary that the buildmaster pays attention to. We also use
+# a shorter alias to save typing.
+c = BuildmasterConfig = {}
+
+####### BUILDSLAVES
+
+# The 'slaves' list defines the set of recognized buildslaves. Each element is
+# a BuildSlave object, specifying a unique slave name and password. The same
+# slave name and password must be configured on the slave.
+from buildbot.buildslave import BuildSlave
+
+c['slaves'] = []
+
+for section in ini.sections():
+ if section.startswith("slave "):
+ if ini.has_option(section, "name") and ini.has_option(section, "password"):
+ name = ini.get(section, "name")
+ password = ini.get(section, "password")
+ max_builds = 1
+ if ini.has_option(section, "builds"):
+ max_builds = ini.getint(section, "builds")
+ c['slaves'].append(BuildSlave(name, password, max_builds = max_builds))
+
+# 'slavePortnum' defines the TCP port to listen on for connections from slaves.
+# This must match the value configured into the buildslaves (with their
+# --master option)
+c['slavePortnum'] = 9990
+
+# coalesce builds
+c['mergeRequests'] = True
+
+####### CHANGESOURCES
+
+home_dir = ini.get("general", "homedir")
+
+repo_url = ini.get("repo", "url")
+
+rsync_url = ini.get("rsync", "url")
+rsync_key = ini.get("rsync", "password")
+
+# find arches
+arches = [ ]
+archnames = [ ]
+
+findarches = subprocess.Popen([home_dir+'/dumpinfo.pl', 'architectures'],
+ stdout = subprocess.PIPE, cwd = home_dir+'/source.git')
+
+while True:
+ line = findarches.stdout.readline()
+ if not line:
+ break
+ at = line.strip().split()
+ arches.append(at)
+ archnames.append(at[0])
+
+
+# find feeds
+feeds = []
+
+from buildbot.changes.gitpoller import GitPoller
+c['change_source'] = []
+
+with open(home_dir+'/source.git/feeds.conf.default', 'r') as f:
+ for line in f:
+ parts = line.strip().split()
+ if parts[0] == "src-git":
+ feeds.append(parts)
+ c['change_source'].append(GitPoller(parts[2], workdir='%s/%s.git' %(os.getcwd(), parts[1]), branch='master', pollinterval=300))
+
+
+####### SCHEDULERS
+
+# Configure the Schedulers, which decide how to react to incoming changes. In this
+# case, just kick off a 'basebuild' build
+
+from buildbot.schedulers.basic import SingleBranchScheduler
+from buildbot.schedulers.forcesched import ForceScheduler
+from buildbot.changes import filter
+c['schedulers'] = []
+c['schedulers'].append(SingleBranchScheduler(
+ name="all",
+ change_filter=filter.ChangeFilter(branch='master'),
+ treeStableTimer=60,
+ builderNames=archnames))
+
+c['schedulers'].append(ForceScheduler(
+ name="force",
+ builderNames=archnames))
+
+####### BUILDERS
+
+# The 'builders' list defines the Builders, which tell Buildbot how to perform a build:
+# what steps, and which slaves can execute them. Note that any particular build will
+# only take place on one slave.
+
+from buildbot.process.factory import BuildFactory
+from buildbot.steps.source import Git
+from buildbot.steps.shell import ShellCommand
+from buildbot.steps.transfer import FileDownload
+from buildbot.process.properties import WithProperties
+
+c['builders'] = []
+
+dlLock = locks.SlaveLock("slave_dl")
+
+slaveNames = [ ]
+
+for slave in c['slaves']:
+ slaveNames.append(slave.slavename)
+
+for arch in arches:
+ ts = arch[1].split('/')
+
+ factory = BuildFactory()
+
+ # prepare workspace
+ factory.addStep(FileDownload(mastersrc="cleanup.sh", slavedest="cleanup.sh", mode=0755))
+
+ factory.addStep(ShellCommand(
+ name = "cleanold",
+ description = "Cleaning previous builds",
+ command = ["./cleanup.sh", WithProperties("%(slavename)s"), WithProperties("%(buildername)s"), "full"],
+ haltOnFailure = True,
+ timeout = 2400))
+
+ factory.addStep(ShellCommand(
+ name = "cleanup",
+ description = "Cleaning work area",
+ command = ["./cleanup.sh", WithProperties("%(slavename)s"), WithProperties("%(buildername)s"), "single"],
+ haltOnFailure = True,
+ timeout = 2400))
+
+ factory.addStep(ShellCommand(
+ name = "mksdkdir",
+ description = "Preparing SDK directory",
+ command = ["mkdir", "sdk"],
+ haltOnFailure = True))
+
+ factory.addStep(ShellCommand(
+ name = "downloadsdk",
+ description = "Downloading SDK archive",
+ command = ["rsync", "-va", "downloads.lede-project.org::downloads/snapshots/targets/%s/%s/LEDE-SDK-*.tar.bz2" %(ts[0], ts[1]), "sdk.tar.bz2"],
+ haltOnFailure = True))
+
+ factory.addStep(ShellCommand(
+ name = "unpacksdk",
+ description = "Unpacking SDK archive",
+ command = ["tar", "--strip-components=1", "-C", "sdk/", "-vxjf", "sdk.tar.bz2"],
+ haltOnFailure = True))
+
+ factory.addStep(FileDownload(mastersrc=home_dir+'/key-build', slavedest="sdk/key-build", mode=0400))
+
+ factory.addStep(ShellCommand(
+ name = "mkdldir",
+ description = "Preparing download directory",
+ command = ["sh", "-c", "mkdir -p $HOME/dl && rmdir ./sdk/dl && ln -sf $HOME/dl ./sdk/dl"]))
+
+ factory.addStep(ShellCommand(
+ name = "updatefeeds",
+ description = "Updating feeds",
+ workdir = "build/sdk",
+ command = ["./scripts/feeds", "update"]))
+
+ factory.addStep(ShellCommand(
+ name = "installfeeds",
+ description = "Installing feeds",
+ workdir = "build/sdk",
+ command = ["./scripts/feeds", "install", "-a"]))
+
+ factory.addStep(ShellCommand(
+ name = "compile",
+ description = "Building packages",
+ workdir = "build/sdk",
+ command = ["make", "-j4", "V=s", "IGNORE_ERRORS=n m y", "BUILD_LOG=1", "CONFIG_SIGNED_PACKAGES=y"]))
+
+ factory.addStep(ShellCommand(
+ name = "uploadprepare",
+ description = "Preparing package directory",
+ workdir = "build/sdk",
+ command = ["rsync", "-av", "--include", "/%s/" %(arch[0]), "--exclude", "/*", "--exclude", "/%s/*" %(arch[0]), "bin/packages/", "%s/packages/" %(rsync_url)],
+ env={'RSYNC_PASSWORD': rsync_key},
+ haltOnFailure = True,
+ logEnviron = False
+ ))
+
+ factory.addStep(ShellCommand(
+ name = "packageupload",
+ description = "Uploading package files",
+ workdir = "build/sdk",
+ command = ["rsync", "--delete", "-avz", "bin/packages/%s/" %(arch[0]), "%s/packages/%s/" %(rsync_url, arch[0])],
+ env={'RSYNC_PASSWORD': rsync_key},
+ haltOnFailure = True,
+ logEnviron = False
+ ))
+
+ factory.addStep(ShellCommand(
+ name = "logprepare",
+ description = "Preparing log directory",
+ workdir = "build/sdk",
+ command = ["rsync", "-av", "--include", "/%s/" %(arch[0]), "--exclude", "/*", "--exclude", "/%s/*" %(arch[0]), "bin/packages/", "%s/faillogs/" %(rsync_url)],
+ env={'RSYNC_PASSWORD': rsync_key},
+ haltOnFailure = True,
+ logEnviron = False
+ ))
+
+ factory.addStep(ShellCommand(
+ name = "logfind",
+ description = "Finding failure logs",
+ workdir = "build/sdk/logs/package/feeds",
+ command = ["sh", "-c", "sed -ne 's!^ *ERROR: package/feeds/\\([^ ]*\\) .*$!\\1!p' ../error.txt | sort -u | xargs -r find > ../../../logs.txt"],
+ haltOnFailure = False
+ ))
+
+ factory.addStep(ShellCommand(
+ name = "logcollect",
+ description = "Collecting failure logs",
+ workdir = "build/sdk",
+ command = ["rsync", "-av", "--files-from=logs.txt", "logs/package/feeds/", "faillogs/"],
+ haltOnFailure = False
+ ))
+
+ factory.addStep(ShellCommand(
+ name = "logupload",
+ description = "Uploading failure logs",
+ workdir = "build/sdk",
+ command = ["rsync", "--delete", "-avz", "faillogs/", "%s/faillogs/%s/" %(rsync_url, arch[0])],
+ env={'RSYNC_PASSWORD': rsync_key},
+ haltOnFailure = False,
+ logEnviron = False
+ ))
+
+ from buildbot.config import BuilderConfig
+
+ c['builders'].append(BuilderConfig(name=arch[0], slavenames=slaveNames, factory=factory))
+
+
+####### STATUS arches
+
+# 'status' is a list of Status arches. The results of each build will be
+# pushed to these arches. buildbot/status/*.py has a variety to choose from,
+# including web pages, email senders, and IRC bots.
+
+c['status'] = []
+
+from buildbot.status import html
+from buildbot.status.web import authz, auth
+
+if ini.has_option("status", "bind"):
+ if ini.has_option("status", "user") and ini.has_option("status", "password"):
+ authz_cfg=authz.Authz(
+ # change any of these to True to enable; see the manual for more
+ # options
+ auth=auth.BasicAuth([(ini.get("status", "user"), ini.get("status", "password"))]),
+ gracefulShutdown = False,
+ forceBuild = 'auth', # use this to test your slave once it is set up
+ forceAllBuilds = 'auth',
+ pingBuilder = False,
+ stopBuild = 'auth',
+ stopAllBuilds = 'auth',
+ cancelPendingBuild = 'auth',
+ )
+ c['status'].append(html.WebStatus(http_port=ini.get("status", "bind"), authz=authz_cfg))
+ else:
+ c['status'].append(html.WebStatus(http_port=ini.get("status", "bind")))
+
+####### PROJECT IDENTITY
+
+# the 'title' string will appear at the top of this buildbot
+# installation's html.WebStatus home page (linked to the
+# 'titleURL') and is embedded in the title of the waterfall HTML page.
+
+c['title'] = ini.get("general", "title")
+c['titleURL'] = ini.get("general", "title_url")
+
+# the 'buildbotURL' string should point to the location where the buildbot's
+# internal web server (usually the html.WebStatus page) is visible. This
+# typically uses the port number set in the Waterfall 'status' entry, but
+# with an externally-visible host name which the buildbot cannot figure out
+# without some help.
+
+c['buildbotURL'] = ini.get("general", "buildbot_url")
+
+####### DB URL
+
+c['db'] = {
+ # This specifies what database buildbot uses to store its state. You can leave
+ # this at its default for all but the largest installations.
+ 'db_url' : "sqlite:///state.sqlite",
+}
+