switched to weighted build-queue

This commit is contained in:
Giovanni Harting 2022-02-19 18:03:55 +01:00
parent 0f98b9d18f
commit ba159e0a72
6 changed files with 434 additions and 490 deletions

View File

@ -58,10 +58,12 @@ status:
unknown: "dark"
build:
# number of worker per march (e.g. x86-64-v3)
worker: 2
# number of workers total
worker: 4
makej: 8
checks: true
# builds over this threshold are considered slow (in minutes)
slow_queue_threshold: 60
logging:
level: INFO

5
go.mod
View File

@ -7,9 +7,11 @@ require (
github.com/Jguer/go-alpm/v2 v2.0.6
github.com/Morganamilo/go-pacmanconf v0.0.0-20210502114700-cff030e927a5
github.com/Morganamilo/go-srcinfo v1.0.0
github.com/google/uuid v1.3.0
github.com/jackc/pgx/v4 v4.15.0
github.com/sirupsen/logrus v1.8.1
github.com/wercker/journalhook v0.0.0-20180428041537-5d0a5ae867b3
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
gopkg.in/yaml.v2 v2.4.0
lukechampine.com/blake3 v1.1.7
)
@ -21,7 +23,6 @@ require (
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect
github.com/go-openapi/inflect v0.19.0 // indirect
github.com/google/go-cmp v0.5.7 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/hashicorp/hcl/v2 v2.11.1 // indirect
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
github.com/jackc/pgconn v1.11.0 // indirect
@ -33,7 +34,7 @@ require (
github.com/klauspost/cpuid/v2 v2.0.11 // indirect
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
github.com/zclconf/go-cty v1.10.0 // indirect
golang.org/x/crypto v0.0.0-20220213190939-1e6e3497d506 // indirect
golang.org/x/crypto v0.0.0-20220214200702-86341886e292 // indirect
golang.org/x/mod v0.5.1 // indirect
golang.org/x/sys v0.0.0-20220209214540-3681064d5158 // indirect
golang.org/x/text v0.3.7 // indirect

11
go.sum
View File

@ -120,11 +120,9 @@ github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
@ -143,9 +141,7 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/spf13/cobra v1.3.0 h1:R7cSvGu+Vv+qX0gW5R/85dx2kmmJT5z5NM8ifdYjdn0=
github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
@ -187,8 +183,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220213190939-1e6e3497d506 h1:EuGTJDfeg/PGZJp3gq1K+14eSLFTsrj1eg8KQuiUyKg=
golang.org/x/crypto v0.0.0-20220213190939-1e6e3497d506/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220214200702-86341886e292 h1:f+lwQ+GtmgoY+A2YaQxlSOnDjXcQ7ZRLWOHbC6HtRqE=
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
@ -204,6 +200,8 @@ golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -237,7 +235,6 @@ golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.1.9-0.20211216111533-8d383106f7e7 h1:M1gcVrIb2lSn2FIL19DG0+/b8nNVKJ7W7b4WcAGZAYM=
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

497
main.go
View File

@ -1,7 +1,6 @@
package main
import (
"bytes"
"context"
"entgo.io/ent/dialect"
"entgo.io/ent/dialect/sql"
@ -14,15 +13,13 @@ import (
_ "github.com/jackc/pgx/v4/stdlib"
log "github.com/sirupsen/logrus"
"github.com/wercker/journalhook"
"golang.org/x/sync/semaphore"
"gopkg.in/yaml.v2"
"html/template"
"math/rand"
"os"
"os/exec"
"os/signal"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"syscall"
@ -39,378 +36,7 @@ var (
checkInterval = flag.Int("interval", 5, "How often svn2git should be checked in minutes (default: 5)")
)
func (b *BuildManager) buildWorker(id int, march string) {
err := syscall.Setpriority(syscall.PRIO_PROCESS, 0, 18)
if err != nil {
log.Warningf("[%s/worker-%d] Failed to drop priority: %v", march, id, err)
}
for {
select {
case pkg := <-b.build[march]:
if b.exit {
log.Infof("Worker %s/%d exited...", march, id)
return
}
b.buildWG.Add(1)
b.parseWG.Done()
rand.Seed(time.Now().UnixNano())
time.Sleep(time.Duration(rand.Float32()*60) * time.Second)
start := time.Now().UTC()
log.Infof("[%s/%s/%s] Build starting", pkg.FullRepo, pkg.Pkgbase, pkg.Version)
pkg.toDbPackage(true)
pkg.DbPackage = pkg.DbPackage.Update().SetStatus(dbpackage.StatusBuilding).ClearSkipReason().SaveX(context.Background())
err := pkg.importKeys()
if err != nil {
log.Warningf("[%s/%s/%s] Failed to import pgp keys: %v", pkg.FullRepo, pkg.Pkgbase, pkg.Version, err)
}
buildDir, err := pkg.setupBuildDir()
if err != nil {
log.Errorf("[%s/%s/%s] Error setting up builddir: %v", pkg.FullRepo, pkg.Pkgbase, pkg.Version, err)
b.buildWG.Done()
continue
}
buildNo := 1
versionSlice := strings.Split(pkg.DbPackage.LastVersionBuild, ".")
if strings.Join(versionSlice[:len(versionSlice)-1], ".") == pkg.Version {
buildNo, err = strconv.Atoi(versionSlice[len(versionSlice)-1])
if err != nil {
log.Errorf("[%s/%s/%s] Failed to read build from pkgrel: %v", pkg.FullRepo, pkg.Pkgbase, pkg.Version, err)
err = cleanBuildDir(buildDir)
if err != nil {
log.Warningf("[%s/%s/%s] Error removing builddir: %v", pkg.FullRepo, pkg.Pkgbase, pkg.Version, err)
}
b.buildWG.Done()
continue
}
buildNo++
}
err = pkg.increasePkgRel(buildNo)
if err != nil {
log.Errorf("[%s/%s/%s] Failed to increase pkgrel: %v", pkg.FullRepo, pkg.Pkgbase, pkg.Version, err)
err = cleanBuildDir(buildDir)
if err != nil {
log.Warningf("[%s/%s/%s] Error removing builddir: %v", pkg.FullRepo, pkg.Pkgbase, pkg.Version, err)
}
b.buildWG.Done()
continue
}
if contains(conf.KernelToPatch, pkg.Pkgbase) {
err = pkg.prepareKernelPatches()
if err != nil {
log.Warningf("[%s/%s/%s] Failed to modify PKGBUILD for kernel patch: %v", pkg.FullRepo, pkg.Pkgbase, pkg.Version, err)
pkg.DbPackage.Update().SetStatus(dbpackage.StatusFailed).SetSkipReason("failed to apply patch").SetHash(pkg.Hash).ExecX(context.Background())
err = cleanBuildDir(buildDir)
if err != nil {
log.Warningf("[%s/%s/%s] Error removing builddir: %v", pkg.FullRepo, pkg.Pkgbase, pkg.Version, err)
}
b.buildWG.Done()
continue
}
}
pkg.PkgFiles = []string{}
// default to LTO
makepkgFile := makepkg
if pkg.DbPackage.Lto == dbpackage.LtoDisabled || pkg.DbPackage.Lto == dbpackage.LtoAutoDisabled {
// use non-lto makepkg.conf if LTO is blacklisted for this package
makepkgFile = makepkgLTO
}
cmd := exec.Command("sh", "-c",
"cd "+filepath.Dir(pkg.Pkgbuild)+"&&makechrootpkg -c -D "+filepath.Join(conf.Basedir.Work, makepkgDir)+" -l worker-"+march+"-"+strconv.Itoa(id)+" -r "+filepath.Join(conf.Basedir.Work, chrootDir)+" -- "+
"-m --noprogressbar --config "+filepath.Join(conf.Basedir.Work, makepkgDir, fmt.Sprintf(makepkgFile, pkg.March)))
var out bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &out
err = cmd.Start()
if err != nil {
log.Errorf("[%s/%s/%s] Error starting build: %v", pkg.FullRepo, pkg.Pkgbase, pkg.Version, err)
}
b.buildProcMutex.Lock()
b.buildProcesses = append(b.buildProcesses, cmd.Process)
b.buildProcMutex.Unlock()
err = cmd.Wait()
b.buildProcMutex.Lock()
for i := range b.buildProcesses {
if b.buildProcesses[i].Pid == cmd.Process.Pid {
b.buildProcesses = append(b.buildProcesses[:i], b.buildProcesses[i+1:]...)
break
}
}
b.buildProcMutex.Unlock()
if err != nil {
if b.exit {
err = cleanBuildDir(buildDir)
if err != nil {
log.Warningf("[%s/%s/%s] Error removing builddir: %v", pkg.FullRepo, pkg.Pkgbase, pkg.Version, err)
}
b.buildWG.Done()
continue
}
if pkg.DbPackage.Lto != dbpackage.LtoAutoDisabled && pkg.DbPackage.Lto != dbpackage.LtoDisabled && reLdError.Match(out.Bytes()) {
log.Infof("[%s/%s/%s] ld error detected, disabling LTO", pkg.FullRepo, pkg.Pkgbase, pkg.Version)
pkg.DbPackage.Update().SetStatus(dbpackage.StatusQueued).SetSkipReason("non-LTO rebuild").SetLto(dbpackage.LtoAutoDisabled).ExecX(context.Background())
err = cleanBuildDir(buildDir)
if err != nil {
log.Warningf("[%s/%s/%s] Error removing builddir: %v", pkg.FullRepo, pkg.Pkgbase, pkg.Version, err)
}
b.buildWG.Done()
continue
}
if reDownloadError.Match(out.Bytes()) || rePortError.Match(out.Bytes()) || reSigError.Match(out.Bytes()) {
log.Infof("[%s/%s/%s] detected fixable error, rebuilding later", pkg.FullRepo, pkg.Pkgbase, pkg.Version)
pkg.DbPackage.Update().SetStatus(dbpackage.StatusQueued).ExecX(context.Background())
err = cleanBuildDir(buildDir)
if err != nil {
log.Warningf("[%s/%s/%s] Error removing builddir: %v", pkg.FullRepo, pkg.Pkgbase, pkg.Version, err)
}
b.buildWG.Done()
continue
}
log.Warningf("[%s/%s/%s] Build failed (%d)", pkg.FullRepo, pkg.Pkgbase, pkg.Version, cmd.ProcessState.ExitCode())
err = os.MkdirAll(filepath.Join(conf.Basedir.Repo, logDir, march), 0755)
if err != nil {
log.Warningf("[%s/%s/%s] Error creating logdir: %v", pkg.FullRepo, pkg.Pkgbase, pkg.Version, err)
}
err = os.WriteFile(filepath.Join(conf.Basedir.Repo, logDir, march, pkg.Pkgbase+".log"), out.Bytes(), 0644)
if err != nil {
log.Warningf("[%s/%s/%s] Error writing to logdir: %v", pkg.FullRepo, pkg.Pkgbase, pkg.Version, err)
}
pkg.DbPackage.Update().SetStatus(dbpackage.StatusFailed).ClearSkipReason().SetBuildTimeStart(start).SetBuildTimeEnd(time.Now().UTC()).SetHash(pkg.Hash).ExecX(context.Background())
// purge failed package from repo
b.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
err = cleanBuildDir(buildDir)
if err != nil {
log.Warningf("[%s/%s/%s] Error removing builddir: %v", pkg.FullRepo, pkg.Pkgbase, pkg.Version, err)
}
b.buildWG.Done()
continue
}
pkgFiles, err := filepath.Glob(filepath.Join(filepath.Dir(pkg.Pkgbuild), "*.pkg.tar.zst"))
if err != nil {
log.Warningf("[%s/%s/%s] Error scanning builddir for artifacts: %v", pkg.FullRepo, pkg.Pkgbase, pkg.Version, err)
}
if len(pkgFiles) == 0 {
log.Warningf("No packages found after building %s. Abort build.", pkg.Pkgbase)
err = cleanBuildDir(buildDir)
if err != nil {
log.Warningf("[%s/%s/%s] Error removing builddir: %v", pkg.FullRepo, pkg.Pkgbase, pkg.Version, err)
}
b.buildWG.Done()
continue
}
for _, file := range pkgFiles {
cmd = exec.Command("gpg", "--batch", "--detach-sign", file)
res, err := cmd.CombinedOutput()
if err != nil {
log.Warningf("Failed to sign %s (%s): %s", pkg.Pkgbase, err, string(res))
b.buildWG.Done()
continue
}
}
copyFiles, err := filepath.Glob(filepath.Join(filepath.Dir(pkg.Pkgbuild), "*.pkg.tar.zst*"))
if err != nil {
log.Warningf("[%s/%s/%s] Error scanning builddir for artifacts: %v", pkg.FullRepo, pkg.Pkgbase, pkg.Version, err)
}
for _, file := range copyFiles {
err = os.MkdirAll(filepath.Join(conf.Basedir.Work, waitingDir, pkg.FullRepo), 0755)
if err != nil {
log.Warningf("[%s/%s/%s] Error creating holding dir: %v", pkg.FullRepo, pkg.Pkgbase, pkg.Version, err)
}
_, err = copyFile(file, filepath.Join(conf.Basedir.Work, waitingDir, pkg.FullRepo, filepath.Base(file)))
if err != nil {
log.Warningf("[%s/%s/%s] Error coping file to holding dir: %v", pkg.FullRepo, pkg.Pkgbase, pkg.Version, err)
b.buildWG.Done()
continue
}
if filepath.Ext(file) != ".sig" {
pkg.PkgFiles = append(pkg.PkgFiles, filepath.Join(conf.Basedir.Work, waitingDir, pkg.FullRepo, filepath.Base(file)))
}
}
if _, err := os.Stat(filepath.Join(conf.Basedir.Repo, logDir, march, pkg.Pkgbase+".log")); err == nil {
err = os.Remove(filepath.Join(conf.Basedir.Repo, logDir, march, pkg.Pkgbase+".log"))
if err != nil {
log.Warningf("[%s/%s/%s] Error removing log: %v", pkg.FullRepo, pkg.Pkgbase, pkg.Version, err)
}
}
if pkg.DbPackage.Lto != dbpackage.LtoDisabled && pkg.DbPackage.Lto != dbpackage.LtoAutoDisabled {
pkg.DbPackage.Update().
SetStatus(dbpackage.StatusBuild).
SetLto(dbpackage.LtoEnabled).
SetBuildTimeStart(start).
SetLastVersionBuild(pkg.Version).
SetBuildTimeEnd(time.Now().UTC()).
SetHash(pkg.Hash).
ExecX(context.Background())
} else {
pkg.DbPackage.Update().
SetStatus(dbpackage.StatusBuild).
SetBuildTimeStart(start).
SetBuildTimeEnd(time.Now().UTC()).
SetLastVersionBuild(pkg.Version).
SetHash(pkg.Hash).ExecX(context.Background())
}
log.Infof("[%s/%s/%s] Build successful (%s)", pkg.FullRepo, pkg.Pkgbase, pkg.Version, time.Since(start))
err = cleanBuildDir(buildDir)
if err != nil {
log.Warningf("[%s/%s/%s] Error removing builddir: %v", pkg.FullRepo, pkg.Pkgbase, pkg.Version, err)
}
b.buildWG.Done()
}
}
}
func (b *BuildManager) parseWorker() {
for {
if b.exit {
return
}
select {
case pkg := <-b.parse:
if err := pkg.genSrcinfo(); err != nil {
log.Warningf("Failed to generate SRCINFO for %s: %v", pkg.Pkgbase, err)
b.parseWG.Done()
continue
}
pkg.Version = constructVersion(pkg.Srcinfo.Pkgver, pkg.Srcinfo.Pkgrel, pkg.Srcinfo.Epoch)
if !pkg.isAvailable(alpmHandle) {
log.Debugf("[%s/%s] Not available on mirror, skipping build", pkg.FullRepo, pkg.Pkgbase)
b.parseWG.Done()
continue
}
pkg.toDbPackage(true)
skipping := false
if contains(pkg.Srcinfo.Arch, "any") {
log.Debugf("Skipped %s: any-Package", pkg.Srcinfo.Pkgbase)
pkg.DbPackage.SkipReason = "arch = any"
pkg.DbPackage.Status = dbpackage.StatusSkipped
skipping = true
} else if contains(conf.Blacklist.Packages, pkg.Srcinfo.Pkgbase) {
log.Debugf("Skipped %s: blacklisted package", pkg.Srcinfo.Pkgbase)
pkg.DbPackage.SkipReason = "blacklisted"
pkg.DbPackage.Status = dbpackage.StatusSkipped
skipping = true
} else if contains(pkg.Srcinfo.MakeDepends, "ghc") || contains(pkg.Srcinfo.MakeDepends, "haskell-ghc") || contains(pkg.Srcinfo.Depends, "ghc") || contains(pkg.Srcinfo.Depends, "haskell-ghc") {
// Skip Haskell packages for now, as we are facing linking problems with them,
// most likely caused by not having a dependency check implemented yet and building at random.
// https://git.harting.dev/anonfunc/ALHP.GO/issues/11
log.Debugf("Skipped %s: haskell package", pkg.Srcinfo.Pkgbase)
pkg.DbPackage.SkipReason = "blacklisted (haskell)"
pkg.DbPackage.Status = dbpackage.StatusSkipped
skipping = true
} else if pkg.isPkgFailed() {
log.Debugf("Skipped %s: failed build", pkg.Srcinfo.Pkgbase)
skipping = true
}
if skipping {
pkg.DbPackage = pkg.DbPackage.Update().SetUpdated(time.Now()).SetVersion(pkg.Version).
SetPackages(packages2slice(pkg.Srcinfo.Packages)).SetStatus(pkg.DbPackage.Status).
SetSkipReason(pkg.DbPackage.SkipReason).SetHash(pkg.Hash).SaveX(context.Background())
b.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
b.parseWG.Done()
continue
} else {
pkg.DbPackage = pkg.DbPackage.Update().SetUpdated(time.Now()).SetPackages(packages2slice(pkg.Srcinfo.Packages)).SetVersion(pkg.Version).SaveX(context.Background())
}
if contains(conf.Blacklist.LTO, pkg.Pkgbase) {
pkg.DbPackage = pkg.DbPackage.Update().SetLto(dbpackage.LtoDisabled).SaveX(context.Background())
}
repoVer, err := pkg.repoVersion()
if err != nil {
pkg.DbPackage = pkg.DbPackage.Update().ClearRepoVersion().SaveX(context.Background())
} else if err == nil && alpm.VerCmp(repoVer, pkg.Version) > 0 {
log.Debugf("Skipped %s: Version in repo higher than in PKGBUILD (%s < %s)", pkg.Srcinfo.Pkgbase, pkg.Version, repoVer)
pkg.DbPackage = pkg.DbPackage.Update().SetStatus(dbpackage.StatusLatest).ClearSkipReason().SetHash(pkg.Hash).SaveX(context.Background())
b.parseWG.Done()
continue
}
isLatest, local, syncVersion, err := pkg.isMirrorLatest(alpmHandle)
if err != nil {
switch err.(type) {
default:
log.Warningf("[%s/%s] Problem solving dependencies: %v", pkg.FullRepo, pkg.Srcinfo.Pkgbase, err)
b.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
b.parseWG.Done()
continue
case MultiplePKGBUILDError:
log.Infof("Skipped %s: Multiple PKGBUILDs for dependency found: %v", pkg.Srcinfo.Pkgbase, err)
pkg.DbPackage = pkg.DbPackage.Update().SetStatus(dbpackage.StatusSkipped).SetSkipReason("multiple PKGBUILD for dep. found").SaveX(context.Background())
b.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
b.parseWG.Done()
continue
case UnableToSatisfyError:
log.Infof("Skipped %s: unable to resolve dependencies: %v", pkg.Srcinfo.Pkgbase, err)
pkg.DbPackage = pkg.DbPackage.Update().SetStatus(dbpackage.StatusSkipped).SetSkipReason("unable to resolve dependencies").SaveX(context.Background())
b.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
b.parseWG.Done()
continue
}
}
pkg.DbPackage = pkg.DbPackage.Update().SetStatus(dbpackage.StatusQueued).SaveX(context.Background())
if !isLatest {
if local != nil {
log.Infof("Delayed %s: not all dependencies are up to date (local: %s==%s, sync: %s==%s)", pkg.Srcinfo.Pkgbase, local.Name(), local.Version(), local.Name(), syncVersion)
pkg.DbPackage.Update().SetSkipReason(fmt.Sprintf("waiting for %s==%s", local.Name(), syncVersion)).ExecX(context.Background())
} else {
log.Infof("Delayed %s: not all dependencies are up to date or resolvable", pkg.Srcinfo.Pkgbase)
pkg.DbPackage.Update().SetSkipReason("waiting for mirror").ExecX(context.Background())
}
// Purge delayed packages in case delay is caused by inconsistencies in svn2git.
// Worst case would be clients downloading a package update twice, once from their official mirror,
// and then after build from ALHP. Best case we prevent a not buildable package from staying in the repos
// in an outdated version.
b.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
b.parseWG.Done()
continue
}
b.build[pkg.March] <- pkg
}
}
}
func (b *BuildManager) htmlWorker() {
func (b *BuildManager) htmlWorker(ctx context.Context) {
type Pkg struct {
Pkgbase string
Status string
@ -466,7 +92,7 @@ func (b *BuildManager) htmlWorker() {
Name: repo,
}
pkgs := db.DbPackage.Query().Order(ent.Asc(dbpackage.FieldPkgbase)).Where(dbpackage.MarchEQ(march), dbpackage.RepositoryEQ(dbpackage.Repository(repo))).AllX(context.Background())
pkgs := db.DbPackage.Query().Order(ent.Asc(dbpackage.FieldPkgbase)).Where(dbpackage.MarchEQ(march), dbpackage.RepositoryEQ(dbpackage.Repository(repo))).AllX(ctx)
for _, pkg := range pkgs {
@ -533,7 +159,7 @@ func (b *BuildManager) htmlWorker() {
Count int `json:"count"`
}
db.DbPackage.Query().GroupBy(dbpackage.FieldStatus).Aggregate(ent.Count()).ScanX(context.Background(), &v)
db.DbPackage.Query().GroupBy(dbpackage.FieldStatus).Aggregate(ent.Count()).ScanX(ctx, &v)
for _, c := range v {
switch c.Status {
@ -553,7 +179,7 @@ func (b *BuildManager) htmlWorker() {
Count int `json:"count"`
}
db.DbPackage.Query().Where(dbpackage.StatusNEQ(dbpackage.StatusSkipped)).GroupBy(dbpackage.FieldLto).Aggregate(ent.Count()).ScanX(context.Background(), &v2)
db.DbPackage.Query().Where(dbpackage.StatusNEQ(dbpackage.StatusSkipped)).GroupBy(dbpackage.FieldLto).Aggregate(ent.Count()).ScanX(ctx, &v2)
for _, c := range v2 {
switch c.Status {
@ -683,16 +309,12 @@ func (b *BuildManager) repoWorker(repo string) {
}
}
func (b *BuildManager) syncWorker() {
func (b *BuildManager) syncWorker(ctx context.Context) error {
err := os.MkdirAll(filepath.Join(conf.Basedir.Work, upstreamDir), 0755)
if err != nil {
log.Fatalf("Error creating upstream dir: %v", err)
}
for i := 0; i < runtime.NumCPU(); i++ {
go b.parseWorker()
}
for {
for gitDir, gitURL := range conf.Svn2git {
gitPath := filepath.Join(conf.Basedir.Work, upstreamDir, gitDir)
@ -760,75 +382,50 @@ func (b *BuildManager) syncWorker() {
}
b.alpmMutex.Unlock()
pkgBuilds, err := Glob(filepath.Join(conf.Basedir.Work, upstreamDir, "/**/PKGBUILD"))
queue, err := b.queue(filepath.Join(conf.Basedir.Work, upstreamDir, "/**/PKGBUILD"))
if err != nil {
log.Fatalf("Error scanning for PKGBUILDs: %v", err)
}
log.Warningf("Error building buildQueue: %v", err)
} else {
var fastQueue []*ProtoPackage
var slowQueue []*ProtoPackage
// Shuffle pkgbuilds to spread out long-running builds, otherwise pkgBuilds is alphabetically-sorted
rand.Seed(time.Now().UnixNano())
rand.Shuffle(len(pkgBuilds), func(i, j int) { pkgBuilds[i], pkgBuilds[j] = pkgBuilds[j], pkgBuilds[i] })
for _, pkgbuild := range pkgBuilds {
if b.exit {
return
for _, pkg := range queue {
if pkg.Priority() >= float64(conf.Build.SlowQueueThreshold) {
slowQueue = append(slowQueue, pkg)
} else {
fastQueue = append(fastQueue, pkg)
}
}
mPkgbuild := PKGBUILD(pkgbuild)
if mPkgbuild.FullRepo() == "trunk" || !contains(conf.Repos, mPkgbuild.Repo()) || containsSubStr(mPkgbuild.FullRepo(), conf.Blacklist.Repo) {
continue
if len(fastQueue) == 0 {
log.Infof("Skipping slowQueue=%d in favor of fastQueue=%d", len(slowQueue), len(fastQueue))
slowQueue = []*ProtoPackage{}
}
for _, march := range conf.March {
dbPkg, dbErr := db.DbPackage.Query().Where(
dbpackage.And(
dbpackage.Pkgbase(mPkgbuild.PkgBase()),
dbpackage.RepositoryEQ(dbpackage.Repository(mPkgbuild.Repo())),
dbpackage.March(march),
),
).Only(context.Background())
err = b.buildQueue(fastQueue, ctx)
if err != nil {
return err
}
if ent.IsNotFound(dbErr) {
log.Debugf("[%s/%s] Package not found in database", mPkgbuild.Repo(), mPkgbuild.PkgBase())
} else if err != nil {
log.Errorf("[%s/%s] Problem querying db for package: %v", mPkgbuild.Repo(), mPkgbuild.PkgBase(), dbErr)
}
err = b.buildQueue(slowQueue, ctx)
if err != nil {
return err
}
// compare b3sum of PKGBUILD file to hash in database, only proceed if hash differs
// reduces the amount of PKGBUILDs that need to be parsed with makepkg, which is _really_ slow, significantly
b3s, err := b3sum(pkgbuild)
if err != nil {
log.Fatalf("Error hashing PKGBUILD: %v", err)
}
if dbPkg != nil && b3s == dbPkg.Hash {
log.Debugf("[%s/%s] Skipped: PKGBUILD hash matches db (%s)", mPkgbuild.Repo(), mPkgbuild.PkgBase(), b3s)
continue
}
// send to parse
b.parseWG.Add(1)
b.parse <- &ProtoPackage{
Pkgbuild: pkgbuild,
Pkgbase: mPkgbuild.PkgBase(),
Repo: dbpackage.Repository(mPkgbuild.Repo()),
March: march,
FullRepo: mPkgbuild.Repo() + "-" + march,
Hash: b3s,
}
if err := b.sem.Acquire(ctx, int64(conf.Build.Worker)); err != nil {
return err
}
}
b.parseWG.Wait()
b.buildWG.Wait()
if !b.exit {
if ctx.Err() != nil {
for _, repo := range repos {
err = movePackagesLive(repo)
if err != nil {
log.Errorf("[%s] Error moving packages live: %v", repo, err)
}
}
} else {
return ctx.Err()
}
time.Sleep(time.Duration(*checkInterval) * time.Minute)
@ -896,11 +493,9 @@ func main() {
}
buildManager = &BuildManager{
build: make(map[string]chan *ProtoPackage),
parse: make(chan *ProtoPackage, 10000),
repoPurge: make(map[string]chan []*ProtoPackage),
repoAdd: make(map[string]chan []*ProtoPackage),
exit: false,
sem: semaphore.NewWeighted(int64(conf.Build.Worker)),
}
err = setupChroot()
@ -917,8 +512,12 @@ func main() {
log.Fatalf("Error while ALPM-init: %v", err)
}
go buildManager.syncWorker()
go buildManager.htmlWorker()
ctx, cancel := context.WithCancel(context.Background())
go func() {
_ = buildManager.syncWorker(ctx)
}()
go buildManager.htmlWorker(ctx)
killLoop:
for {
@ -945,21 +544,7 @@ killLoop:
}
}
buildManager.exit = true
buildManager.buildProcMutex.RLock()
for _, p := range buildManager.buildProcesses {
pgid, err := syscall.Getpgid(p.Pid)
if err != nil {
log.Warningf("Error getting pgid: %v", err)
}
err = syscall.Kill(-pgid, syscall.SIGTERM)
if err != nil {
log.Warningf("Error killing %d: %v", pgid, err)
}
}
buildManager.buildProcMutex.RUnlock()
buildManager.buildWG.Wait()
cancel()
buildManager.repoWG.Wait()
_ = alpmHandle.Release()
}

View File

@ -1,6 +1,7 @@
package main
import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
@ -9,14 +10,17 @@ import (
"git.harting.dev/ALHP/ALHP.GO/ent/dbpackage"
"github.com/Jguer/go-alpm/v2"
"github.com/Morganamilo/go-srcinfo"
"github.com/google/uuid"
log "github.com/sirupsen/logrus"
"io"
"math/rand"
"net/http"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"time"
)
type ProtoPackage struct {
@ -33,10 +37,281 @@ type ProtoPackage struct {
DbPackage *ent.DbPackage
}
func (p ProtoPackage) isEligible(ctx context.Context) (bool, error) {
if err := p.genSrcinfo(); err != nil {
return false, fmt.Errorf("error generating SRCINFO: %w", err)
}
p.Version = constructVersion(p.Srcinfo.Pkgver, p.Srcinfo.Pkgrel, p.Srcinfo.Epoch)
if !p.isAvailable(alpmHandle) {
log.Debugf("[%s/%s] Not available on mirror, skipping build", p.FullRepo, p.Pkgbase)
return false, nil
}
p.toDbPackage(true)
skipping := false
if contains(p.Srcinfo.Arch, "any") {
log.Debugf("Skipped %s: any-Package", p.Srcinfo.Pkgbase)
p.DbPackage.SkipReason = "arch = any"
p.DbPackage.Status = dbpackage.StatusSkipped
skipping = true
} else if contains(conf.Blacklist.Packages, p.Srcinfo.Pkgbase) {
log.Debugf("Skipped %s: blacklisted package", p.Srcinfo.Pkgbase)
p.DbPackage.SkipReason = "blacklisted"
p.DbPackage.Status = dbpackage.StatusSkipped
skipping = true
} else if contains(p.Srcinfo.MakeDepends, "ghc") || contains(p.Srcinfo.MakeDepends, "haskell-ghc") || contains(p.Srcinfo.Depends, "ghc") || contains(p.Srcinfo.Depends, "haskell-ghc") {
// Skip Haskell packages for now, as we are facing linking problems with them,
// most likely caused by not having a dependency check implemented yet and building at random.
// https://git.harting.dev/anonfunc/ALHP.GO/issues/11
log.Debugf("Skipped %s: haskell package", p.Srcinfo.Pkgbase)
p.DbPackage.SkipReason = "blacklisted (haskell)"
p.DbPackage.Status = dbpackage.StatusSkipped
skipping = true
} else if p.isPkgFailed() {
log.Debugf("Skipped %s: failed build", p.Srcinfo.Pkgbase)
skipping = true
}
if skipping {
p.DbPackage = p.DbPackage.Update().SetUpdated(time.Now()).SetVersion(p.Version).
SetPackages(packages2slice(p.Srcinfo.Packages)).SetStatus(p.DbPackage.Status).
SetSkipReason(p.DbPackage.SkipReason).SetHash(p.Hash).SaveX(ctx)
return false, nil
} else {
p.DbPackage = p.DbPackage.Update().SetUpdated(time.Now()).SetPackages(packages2slice(p.Srcinfo.Packages)).SetVersion(p.Version).SaveX(ctx)
}
if contains(conf.Blacklist.LTO, p.Pkgbase) {
p.DbPackage = p.DbPackage.Update().SetLto(dbpackage.LtoDisabled).SaveX(ctx)
}
repoVer, err := p.repoVersion()
if err != nil {
p.DbPackage = p.DbPackage.Update().ClearRepoVersion().SaveX(ctx)
} else if err == nil && alpm.VerCmp(repoVer, p.Version) > 0 {
log.Debugf("Skipped %s: Version in repo higher than in PKGBUILD (%s < %s)", p.Srcinfo.Pkgbase, p.Version, repoVer)
p.DbPackage = p.DbPackage.Update().SetStatus(dbpackage.StatusLatest).ClearSkipReason().SetHash(p.Hash).SaveX(ctx)
return false, nil
}
isLatest, local, syncVersion, err := p.isMirrorLatest(alpmHandle)
if err != nil {
switch err.(type) {
default:
return false, fmt.Errorf("error solving deps: %w", err)
case MultiplePKGBUILDError:
log.Infof("Skipped %s: Multiple PKGBUILDs for dependency found: %v", p.Srcinfo.Pkgbase, err)
p.DbPackage = p.DbPackage.Update().SetStatus(dbpackage.StatusSkipped).SetSkipReason("multiple PKGBUILD for dep. found").SaveX(ctx)
return false, err
case UnableToSatisfyError:
log.Infof("Skipped %s: unable to resolve dependencies: %v", p.Srcinfo.Pkgbase, err)
p.DbPackage = p.DbPackage.Update().SetStatus(dbpackage.StatusSkipped).SetSkipReason("unable to resolve dependencies").SaveX(ctx)
return false, err
}
}
p.DbPackage = p.DbPackage.Update().SetStatus(dbpackage.StatusQueued).SaveX(ctx)
if !isLatest {
if local != nil {
log.Infof("Delayed %s: not all dependencies are up to date (local: %s==%s, sync: %s==%s)", p.Srcinfo.Pkgbase, local.Name(), local.Version(), local.Name(), syncVersion)
p.DbPackage.Update().SetSkipReason(fmt.Sprintf("waiting for %s==%s", local.Name(), syncVersion)).ExecX(ctx)
} else {
log.Infof("Delayed %s: not all dependencies are up to date or resolvable", p.Srcinfo.Pkgbase)
p.DbPackage.Update().SetSkipReason("waiting for mirror").ExecX(ctx)
}
// Purge delayed packages in case delay is caused by inconsistencies in svn2git.
// Worst case would be clients downloading a package update twice, once from their official mirror,
// and then after build from ALHP. Best case we prevent a not buildable package from staying in the repos
// in an outdated version.
return false, nil
}
return true, nil
}
func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) {
rand.Seed(time.Now().UnixNano())
time.Sleep(time.Duration(rand.Float32()*60) * time.Second)
start := time.Now().UTC()
workerId := uuid.New()
chroot := "build_" + workerId.String()
log.Infof("[%s/%s/%s] Build starting", p.FullRepo, p.Pkgbase, p.Version)
p.toDbPackage(true)
p.DbPackage = p.DbPackage.Update().SetStatus(dbpackage.StatusBuilding).ClearSkipReason().SaveX(ctx)
err := p.importKeys()
if err != nil {
log.Warningf("[%s/%s/%s] Failed to import pgp keys: %v", p.FullRepo, p.Pkgbase, p.Version, err)
}
buildFolder, err := p.setupBuildDir()
if err != nil {
return time.Since(start), fmt.Errorf("error setting up build folder: %w", err)
}
defer func() {
err := cleanBuildDir(buildFolder, filepath.Join(conf.Basedir.Work, chrootDir, chroot))
if err != nil {
log.Errorf("error removing builddir/chroot %s/%s: %v", buildDir, chroot, err)
}
}()
buildNo := 1
versionSlice := strings.Split(p.DbPackage.LastVersionBuild, ".")
if strings.Join(versionSlice[:len(versionSlice)-1], ".") == p.Version {
buildNo, err = strconv.Atoi(versionSlice[len(versionSlice)-1])
if err != nil {
return time.Since(start), fmt.Errorf("error while reading buildNo from pkgrel: %w", err)
}
buildNo++
}
err = p.increasePkgRel(buildNo)
if err != nil {
return time.Since(start), fmt.Errorf("error while increasing pkgrel: %w", err)
}
if contains(conf.KernelToPatch, p.Pkgbase) {
err = p.prepareKernelPatches()
if err != nil {
p.DbPackage.Update().SetStatus(dbpackage.StatusFailed).SetSkipReason("failed to apply patch").SetHash(p.Hash).ExecX(ctx)
return time.Since(start), fmt.Errorf("error modifying PKGBUILD for kernel patch: %w", err)
}
}
p.PkgFiles = []string{}
// default to LTO
makepkgFile := makepkg
if p.DbPackage.Lto == dbpackage.LtoDisabled || p.DbPackage.Lto == dbpackage.LtoAutoDisabled {
// use non-lto makepkg.conf if LTO is blacklisted for this package
makepkgFile = makepkgLTO
}
cmd := exec.CommandContext(ctx, "sh", "-c",
"cd "+filepath.Dir(p.Pkgbuild)+"&&makechrootpkg -c -D "+filepath.Join(conf.Basedir.Work, makepkgDir)+" -l "+chroot+" -r "+filepath.Join(conf.Basedir.Work, chrootDir)+" -- "+
"-m --noprogressbar --config "+filepath.Join(conf.Basedir.Work, makepkgDir, fmt.Sprintf(makepkgFile, p.March)))
var out bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &out
err = cmd.Start()
if err != nil {
return time.Since(start), fmt.Errorf("error starting build: %w", err)
}
err = cmd.Wait()
if err != nil {
if ctx.Err() != nil {
return time.Since(start), ctx.Err()
}
if p.DbPackage.Lto != dbpackage.LtoAutoDisabled && p.DbPackage.Lto != dbpackage.LtoDisabled && reLdError.Match(out.Bytes()) {
p.DbPackage.Update().SetStatus(dbpackage.StatusQueued).SetSkipReason("non-LTO rebuild").SetLto(dbpackage.LtoAutoDisabled).ExecX(ctx)
return time.Since(start), fmt.Errorf("ld error detected, LTO disabled")
}
if reDownloadError.Match(out.Bytes()) || rePortError.Match(out.Bytes()) || reSigError.Match(out.Bytes()) {
p.DbPackage.Update().SetStatus(dbpackage.StatusQueued).ExecX(ctx)
return time.Since(start), fmt.Errorf("known builderror detected")
}
err = os.MkdirAll(filepath.Join(conf.Basedir.Repo, logDir, p.March), 0755)
if err != nil {
return time.Since(start), fmt.Errorf("error creating logdir: %w", err)
}
err = os.WriteFile(filepath.Join(conf.Basedir.Repo, logDir, p.March, p.Pkgbase+".log"), out.Bytes(), 0644)
if err != nil {
return time.Since(start), fmt.Errorf("error warting to logdir: %w", err)
}
p.DbPackage.Update().SetStatus(dbpackage.StatusFailed).ClearSkipReason().SetBuildTimeStart(start).SetBuildTimeEnd(time.Now().UTC()).SetHash(p.Hash).ExecX(ctx)
return time.Since(start), fmt.Errorf("build failed: exit code %d", cmd.ProcessState.ExitCode())
}
pkgFiles, err := filepath.Glob(filepath.Join(filepath.Dir(p.Pkgbuild), "*.pkg.tar.zst"))
if err != nil {
return time.Since(start), fmt.Errorf("error scanning builddir for artifacts: %w", err)
}
if len(pkgFiles) == 0 {
return time.Since(start), fmt.Errorf("no build-artifacts found")
}
for _, file := range pkgFiles {
cmd = exec.Command("gpg", "--batch", "--detach-sign", file)
res, err := cmd.CombinedOutput()
if err != nil {
return time.Since(start), fmt.Errorf("error while signing artifact: %w (%s)", err, string(res))
}
}
copyFiles, err := filepath.Glob(filepath.Join(filepath.Dir(p.Pkgbuild), "*.pkg.tar.zst*"))
if err != nil {
return time.Since(start), fmt.Errorf("error scanning builddir for artifacts: %w", err)
}
holdingDir := filepath.Join(conf.Basedir.Work, waitingDir, p.FullRepo)
for _, file := range copyFiles {
err = os.MkdirAll(holdingDir, 0755)
if err != nil {
return time.Since(start), fmt.Errorf("error creating %s: %w", holdingDir, err)
}
_, err = copyFile(file, filepath.Join(holdingDir, filepath.Base(file)))
if err != nil {
return time.Since(start), fmt.Errorf("error while copying file to %s: %w", filepath.Join(holdingDir, filepath.Base(file)), err)
}
if filepath.Ext(file) != ".sig" {
p.PkgFiles = append(p.PkgFiles, filepath.Join(holdingDir, filepath.Base(file)))
}
}
if _, err := os.Stat(filepath.Join(conf.Basedir.Repo, logDir, p.March, p.Pkgbase+".log")); err == nil {
err := os.Remove(filepath.Join(conf.Basedir.Repo, logDir, p.March, p.Pkgbase+".log"))
if err != nil {
return time.Since(start), fmt.Errorf("error removing log: %w", err)
}
}
if p.DbPackage.Lto != dbpackage.LtoDisabled && p.DbPackage.Lto != dbpackage.LtoAutoDisabled {
p.DbPackage.Update().
SetStatus(dbpackage.StatusBuild).
SetLto(dbpackage.LtoEnabled).
SetBuildTimeStart(start).
SetLastVersionBuild(p.Version).
SetBuildTimeEnd(time.Now().UTC()).
SetHash(p.Hash).
ExecX(ctx)
} else {
p.DbPackage.Update().
SetStatus(dbpackage.StatusBuild).
SetBuildTimeStart(start).
SetBuildTimeEnd(time.Now().UTC()).
SetLastVersionBuild(p.Version).
SetHash(p.Hash).ExecX(ctx)
}
return time.Since(start), nil
}
func (p *ProtoPackage) Priority() float64 {
if p.DbPackage.BuildTimeEnd.IsZero() {
return 0
} else {
time := p.DbPackage.BuildTimeEnd.Sub(p.DbPackage.BuildTimeStart)
return time.Minutes()
}
}
func (p *ProtoPackage) setupBuildDir() (string, error) {
buildDir := filepath.Join(conf.Basedir.Work, buildDir, p.March, p.Pkgbase+"-"+p.Version)
err := cleanBuildDir(buildDir)
err := cleanBuildDir(buildDir, "")
if err != nil {
return "", fmt.Errorf("removing old builddir failed: %w", err)
}
@ -274,7 +549,6 @@ func (p *ProtoPackage) SVN2GITVersion(h *alpm.Handle) (string, error) {
return "", fmt.Errorf("invalid arguments")
}
// upstream/upstream-core-extra/extra-cmake-modules/repos/extra-any/PKGBUILD
pkgBuilds, _ := Glob(filepath.Join(conf.Basedir.Work, upstreamDir, "**/"+p.Pkgbase+"/repos/*/PKGBUILD"))
var fPkgbuilds []string

127
utils.go
View File

@ -10,6 +10,7 @@ import (
paconf "github.com/Morganamilo/go-pacmanconf"
"github.com/Morganamilo/go-srcinfo"
log "github.com/sirupsen/logrus"
"golang.org/x/sync/semaphore"
"io"
"io/fs"
"lukechampine.com/blake3"
@ -17,6 +18,7 @@ import (
"os/exec"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"sync"
@ -51,17 +53,11 @@ var (
)
type BuildManager struct {
build map[string]chan *ProtoPackage
parse chan *ProtoPackage
repoPurge map[string]chan []*ProtoPackage
repoAdd map[string]chan []*ProtoPackage
exit bool
buildWG sync.WaitGroup
parseWG sync.WaitGroup
repoWG sync.WaitGroup
buildProcesses []*os.Process
buildProcMutex sync.RWMutex
alpmMutex sync.RWMutex
repoPurge map[string]chan []*ProtoPackage
repoAdd map[string]chan []*ProtoPackage
repoWG sync.WaitGroup
alpmMutex sync.RWMutex
sem *semaphore.Weighted
}
type Conf struct {
@ -76,9 +72,10 @@ type Conf struct {
ConnectTo string `yaml:"connect_to"`
}
Build struct {
Worker int
Makej int
Checks bool
Worker int
Makej int
Checks bool
SlowQueueThreshold int `yaml:"slow_queue_threshold"`
}
Logging struct {
Level string
@ -161,17 +158,110 @@ func containsSubStr(str string, subList []string) bool {
return false
}
func cleanBuildDir(dir string) error {
if _, err := os.Stat(dir); err == nil {
func cleanBuildDir(dir string, chrootDir string) error {
if stat, err := os.Stat(dir); err == nil && stat.IsDir() {
err = os.RemoveAll(dir)
if err != nil {
return err
}
}
if chrootDir != "" {
if stat, err := os.Stat(chrootDir); err == nil && stat.IsDir() {
err = os.RemoveAll(chrootDir)
if err != nil {
return err
}
}
}
return nil
}
func (b *BuildManager) queue(path string) ([]*ProtoPackage, error) {
unsortedQueue, err := genQueue(path)
if err != nil {
return nil, fmt.Errorf("error building queue: %w", err)
}
sort.Slice(unsortedQueue, func(i, j int) bool {
return unsortedQueue[i].Priority() < unsortedQueue[j].Priority()
})
return unsortedQueue, nil
}
func (b *BuildManager) buildQueue(queue []*ProtoPackage, ctx context.Context) error {
for _, pkg := range queue {
if err := b.sem.Acquire(ctx, 1); err != nil {
return err
}
go func(pkg *ProtoPackage) {
defer b.sem.Release(1)
dur, err := pkg.build(ctx)
if err != nil {
log.Warningf("error building package %s->%s->%s in %s: %s", pkg.March, pkg.FullRepo, pkg.Pkgbase, dur, err)
b.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
}
}(pkg)
}
return nil
}
func genQueue(path string) ([]*ProtoPackage, error) {
pkgBuilds, err := Glob(path)
if err != nil {
return nil, fmt.Errorf("error scanning for PKGBUILDs: %w", err)
}
var pkgbuilds []*ProtoPackage
for _, pkgbuild := range pkgBuilds {
mPkgbuild := PKGBUILD(pkgbuild)
if mPkgbuild.FullRepo() == "trunk" || !contains(conf.Repos, mPkgbuild.Repo()) || containsSubStr(mPkgbuild.FullRepo(), conf.Blacklist.Repo) {
continue
}
for _, march := range conf.March {
dbPkg, dbErr := db.DbPackage.Query().Where(
dbpackage.And(
dbpackage.Pkgbase(mPkgbuild.PkgBase()),
dbpackage.RepositoryEQ(dbpackage.Repository(mPkgbuild.Repo())),
dbpackage.March(march),
),
).Only(context.Background())
if ent.IsNotFound(dbErr) {
log.Debugf("[%s/%s] Package not found in database", mPkgbuild.Repo(), mPkgbuild.PkgBase())
} else if err != nil {
log.Errorf("[%s/%s] Problem querying db for package: %v", mPkgbuild.Repo(), mPkgbuild.PkgBase(), dbErr)
}
// compare b3sum of PKGBUILD file to hash in database, only proceed if hash differs
// reduces the amount of PKGBUILDs that need to be parsed with makepkg, which is _really_ slow, significantly
b3s, err := b3sum(pkgbuild)
if err != nil {
log.Fatalf("Error hashing PKGBUILD: %v", err)
}
if dbPkg != nil && b3s == dbPkg.Hash {
log.Debugf("[%s/%s] Skipped: PKGBUILD hash matches db (%s)", mPkgbuild.Repo(), mPkgbuild.PkgBase(), b3s)
continue
}
pkgbuilds = append(pkgbuilds, &ProtoPackage{
Pkgbuild: pkgbuild,
Pkgbase: mPkgbuild.PkgBase(),
Repo: dbpackage.Repository(mPkgbuild.Repo()),
March: march,
FullRepo: mPkgbuild.Repo() + "-" + march,
Hash: b3s,
})
}
}
return pkgbuilds, nil
}
func movePackagesLive(fullRepo string) error {
if _, err := os.Stat(filepath.Join(conf.Basedir.Work, waitingDir, fullRepo)); os.IsNotExist(err) {
return nil
@ -576,11 +666,6 @@ func syncMarchs() error {
log.Fatalf("Can't generate makepkg for %s: %v", march, err)
}
buildManager.build[march] = make(chan *ProtoPackage, 10000)
for i := 0; i < conf.Build.Worker; i++ {
go buildManager.buildWorker(i, march)
}
for _, repo := range conf.Repos {
fRepo := fmt.Sprintf("%s-%s", repo, march)
repos = append(repos, fRepo)