gnunet-svn
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[taler-deployment] branch master updated (1782162 -> 863ee79)


From: gnunet
Subject: [taler-deployment] branch master updated (1782162 -> 863ee79)
Date: Wed, 12 Jul 2023 04:42:46 +0200

This is an automated email from the git hooks/post-receive script.

devan-carpenter pushed a change to branch master
in repository deployment.

    from 1782162  -glob fix for codespell
     new 878bf95  buildbot: add support for container job configs
     new 3c6698f  buildbot: do not assign pseudo-tty to podman cmd
     new 863ee79  buildbot: switch from podman to docker

The 3 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 buildbot/master.cfg | 206 ++++++++++++++++++++++++++++++++++++++++++----------
 1 file changed, 169 insertions(+), 37 deletions(-)

diff --git a/buildbot/master.cfg b/buildbot/master.cfg
index 1fddae2..5762e1c 100644
--- a/buildbot/master.cfg
+++ b/buildbot/master.cfg
@@ -24,9 +24,13 @@
 # @author ng0
 # @author Christian Grothoff
 # @author Devan Carpenter
+import ast
+import configparser
+import glob
 import os
 import pathlib
 import re
+import subprocess
 
 from buildbot.changes.pb import PBChangeSource
 from buildbot.steps.source.git import Git
@@ -35,8 +39,10 @@ from buildbot.plugins import reporters
 from buildbot.plugins import schedulers
 from buildbot.plugins import steps
 from buildbot.plugins import util
+from buildbot.process import buildstep, logobserver
 from buildbot.reporters.generators.build import BuildStatusGenerator
 from buildbot.worker import Worker
+from twisted.internet import defer
 
 # This is a sample buildmaster config file. It must be
 # installed as 'master.cfg' in your buildmaster's base
@@ -174,24 +180,40 @@ def update_deployment(factory):
 
 
 # Convenience function that builds and runs a container.
-def container_run_step(stepName, factory, WORK_DIR, containerName,
+def container_add_step(HALT_ON_FAILURE,
+                       WARN_ON_FAILURE,
+                       CONTAINER_BUILD,
+                       CONTAINER_NAME,
+                       factory,
+                       WORK_DIR,
+                       stepName,
                        jobCmd="/workdir/ci/ci.sh",
                        containerFile="ci/Containerfile"):
-    factory.addStep(steps.ShellSequence(
-        name=stepName,
-        commands=[
-            util.ShellArg(command=["podman", "build", "-t", containerName,
-                                   "-f", containerFile, "."],
-                          logname='build container', haltOnFailure=True),
-            util.ShellArg(command=["podman", "run", "-ti", "--rm",
-                                   "--volume", f"{WORK_DIR}:/workdir",
-                                   "--workdir", "/workdir",
-                                   containerName, jobCmd],
-                          logname='run inside container', haltOnFailure=True),
-        ],
-        haltOnFailure=True,
-        workdir=WORK_DIR
-    ))
+    print(f"HALT_ON_FAILURE: {HALT_ON_FAILURE}, WARN_ON_FAILURE: 
{WARN_ON_FAILURE}, CONTAINER_BUILD: {CONTAINER_BUILD}, CONTAINER_NAME: 
{CONTAINER_NAME}")
+    if not CONTAINER_BUILD:
+        return steps.ShellSequence(
+                name=stepName,
+                commands=[
+                    util.ShellArg(command=["sg", "docker", "-c", f"docker run 
--rm --user $(id -u):$(id -g) --volume {WORK_DIR}:/workdir --workdir /workdir 
{CONTAINER_NAME} {jobCmd}"],
+                                  logname='run inside container',
+                                  haltOnFailure=HALT_ON_FAILURE),
+                    ],
+                haltOnFailure=HALT_ON_FAILURE,
+                workdir=WORK_DIR
+                )
+    else:
+        return steps.ShellSequence(
+                name=stepName,
+                commands=[
+                    util.ShellArg(command=["sg", "docker", "-c", f"docker 
build -t {CONTAINER_NAME} -f {containerFile} ."],
+                                  logname='build container', 
haltOnFailure=True),
+                    util.ShellArg(command=["sg", "docker", "-c", f"docker run 
--rm --user $(id -u):$(id -g) --volume {WORK_DIR}:/workdir --workdir /workdir 
{CONTAINER_NAME} {jobCmd}"],
+                                  logname='run inside container',
+                                  haltOnFailure=HALT_ON_FAILURE),
+                    ],
+                haltOnFailure=HALT_ON_FAILURE,
+                workdir=WORK_DIR
+                )
 
 ##################################################################
 ######################## JOBS ####################################
@@ -1200,7 +1222,7 @@ EMAIL_ALERTS.append("packaging-ubuntu-builder")
 # 19: CONTAINER FACTORY #####################
 #############################################
 ##
-# These factories uses the standard podman worker.
+# These factories uses the standard container worker.
 WORKERS.append(Worker("container-worker", "container-pass"))
 
 #
@@ -1208,13 +1230,125 @@ WORKERS.append(Worker("container-worker", 
"container-pass"))
 #                    "merchant", "deployment", "twister", "sync",
 #                           "help", "taler-merchant-demos", "challenger"]
 #
-container_repos = ["wallet-core"]
+
+
+# Container Job Generator Functions
+# Parse config file and save values in a dict
+def ingest_job_config(configPath, jobName):
+    configDict = {jobName: {}}
+    print(configDict)
+    ini.read_string(configPath)
+    for key in ini["build"]:
+        value = ini['build'][key]
+        configDict[jobName][key] = value
+    print(configDict)
+    configDict.update(configDict)
+    print(configDict)
+    return configDict
+
+
+# Search for configs, and ingest
+def handle_job_config(jobDirPath, jobName, repoName, configPath, configExist):
+    print(configPath)
+    if configExist == 0:
+        print(f"Ingesting Job Config: {configPath}")
+        configDict = ingest_job_config(configPath, jobName)
+        print(configDict)
+        return configDict
+    else:
+        print("No job config; Using default params")
+        # Set default job config parameters
+        configDict = {jobName: {"HALT_ON_FAILURE": True,
+                                "WARN_ON_FAILURE": False,
+                                "CONTAINER_BUILD": True,
+                                "CONTAINER_NAME": repoName}}
+        return configDict
+
+
+class GenerateStagesCommand(buildstep.ShellMixin, steps.BuildStep):
+
+    def __init__(self, reponame, **kwargs):
+        self.reponame = reponame
+        kwargs = self.setupShellMixin(kwargs)
+        super().__init__(**kwargs)
+        self.observer = logobserver.BufferLogObserver()
+        self.addLogObserver('stdio', self.observer)
+
+    def extract_stages(self, stdout):
+        stages = []
+        for line in stdout.split('\n'):
+            stage = str(line.strip())
+            if stage:
+                stages.append(stage)
+        return stages
+
+    @defer.inlineCallbacks
+    def run(self):
+        CONTAINER_WORKDIR = f"/home/container-worker/workspace/{self.reponame}"
+        CI_JOBS_PATH = f"{CONTAINER_WORKDIR}/ci/jobs"
+        # run 'ls <project_root>/ci/jobs/' to get the list of stages
+        cmd = yield self.makeRemoteShellCommand()
+        yield self.runCommand(cmd)
+        jobDirs = []
+
+        # if the command passes extract the list of stages
+        result = cmd.results()
+        if result == util.SUCCESS:
+            jobDirs = self.extract_stages(self.observer.getStdout())
+            print(f"this is jobDirs list: {jobDirs}")
+            self.configDict = {}
+            print(f"Remote cmd stdout: {self.observer.getStdout()}")
+            print(f"cmd.results: {cmd.results()}")
+            for stage in jobDirs:
+                jobDirPath = f"{CI_JOBS_PATH}/{stage}"
+                observer = logobserver.BufferLogObserver()
+                self.addLogObserver('stdio', observer)
+                cmd1 = yield self.makeRemoteShellCommand(
+                        command=["cat", f"{jobDirPath}/config.ini"])
+                yield self.runCommand(cmd1)
+                print(f"cmd1.results: {cmd1.results()}")
+                print(f"Second command stdout: {observer.getStdout()}")
+                print(f"Current stage: {stage}")
+                print(jobDirPath)
+                self.configDict.update(
+                        handle_job_config(
+                            jobDirPath, stage, self.reponame,
+                            observer.getStdout(), cmd1.results()))
+                print(self.configDict)
+            # create a container step for each stage and
+            # add them to the build
+            convstr2bool = ast.literal_eval
+            self.build.addStepsAfterCurrentStep([
+                container_add_step(
+                    convstr2bool(
+                        str(self.configDict[stage]["HALT_ON_FAILURE"])),
+                    convstr2bool(
+                        str(self.configDict[stage]["WARN_ON_FAILURE"])),
+                    convstr2bool(
+                        str(self.configDict[stage]["CONTAINER_BUILD"])),
+                    self.configDict[stage]["CONTAINER_NAME"],
+                    container_factory,
+                    CONTAINER_WORKDIR,
+                    stage,
+                    f"ci/jobs/{stage}/job.sh")
+                for stage in jobDirs
+            ])
+
+        return result
+
+
+container_repos = ["wallet-core", "merchant"]
 
 for reponame in container_repos:
-    ##
+
+    # Prepare to read job configs
+    ini = configparser.ConfigParser()
+    ini.optionxform = str
+
     # Factory-wide variables
     REPO_URL = "https://git.taler.net/"; + reponame + ".git"
     CONTAINER_WORKDIR = f"/home/container-worker/workspace/{reponame}"
+    CI_JOBS_PATH = f"{CONTAINER_WORKDIR}/ci/jobs"
 
     # Create a factory
     container_factory = util.BuildFactory()
@@ -1237,37 +1371,35 @@ for reponame in container_repos:
     container_factory.addStep(Git(
         name="git",
         repourl=REPO_URL,
+        branch="dev/devan-carpenter/container-ci",
         mode='full',
         method='fresh',
         haltOnFailure=True,
     ))
 
-    # Run container step with default commands
-    CI_JOBS_PATH = f"{CONTAINER_WORKDIR}/ci/jobs"
-    if os.path.exists(CI_JOBS_PATH):
-        for parentDir, dirNames, fileNames in os.walk(CI_JOBS_PATH):
-            dirNames.sort()
-            fileNames.sort()
-            for filename in fileNames:
-                if filename.endswith('.sh'):
-                    basedir = pathlib.PurePath(parentDir).name
-                    container_run_step(basedir,
-                                       container_factory,
-                                       CONTAINER_WORKDIR, reponame,
-                                       f"ci/jobs/{basedir}/{filename}")
-            else:
-                print("No jobs found")
-    else:
-        print("Cannot find jobs directory")
+    container_factory.addStep(GenerateStagesCommand(
+        reponame,
+        name="Generate build stages",
+        command=["ls", CI_JOBS_PATH],
+        haltOnFailure=True))
 
     BUILDERS.append(util.BuilderConfig(
         name=f"{reponame}-builder",
         workernames=["container-worker"],
         factory=container_factory
     ))
+
+    # Only enable this scheduler for debugging!
+    # Will run builders with 1 minute of waiting inbetween builds
+    # SCHEDULERS.append(schedulers.Periodic(
+    #     name=f"{reponame}-minutely",
+    #     builderNames=[f"{reponame}-builder"],
+    #     periodicBuildTimer=60
+    #     ))
+
     # Buildmaster is notified whenever deployment.git changes
     SCHEDULERS.append(schedulers.SingleBranchScheduler(
-        name="container-scheduler",
+        name=f"{reponame}-container-scheduler",
         change_filter=util.ChangeFilter(
             branch="dev/devan-carpenter/container-ci",
             project_re=f"({reponame})"

-- 
To stop receiving notification emails like this one, please contact
gnunet@gnunet.org.



reply via email to

[Prev in Thread] Current Thread [Next in Thread]