2023-03-14 00:39:15 +01:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2023-05-21 20:28:23 +02:00
|
|
|
"errors"
|
2023-03-14 00:39:15 +01:00
|
|
|
"fmt"
|
2023-05-22 13:39:21 +02:00
|
|
|
"github.com/Jguer/go-alpm/v2"
|
2023-03-14 00:39:15 +01:00
|
|
|
"github.com/c2h5oh/datasize"
|
|
|
|
"github.com/sethvargo/go-retry"
|
|
|
|
log "github.com/sirupsen/logrus"
|
|
|
|
"html/template"
|
|
|
|
"os"
|
|
|
|
"os/exec"
|
|
|
|
"path/filepath"
|
|
|
|
"somegit.dev/ALHP/ALHP.GO/ent"
|
|
|
|
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
)
|
|
|
|
|
2023-05-07 15:12:59 +02:00
|
|
|
const MaxUnknownBuilder = 2
|
|
|
|
|
2023-03-14 00:39:15 +01:00
|
|
|
type BuildManager struct {
|
|
|
|
repoPurge map[string]chan []*ProtoPackage
|
|
|
|
repoAdd map[string]chan []*ProtoPackage
|
|
|
|
repoWG *sync.WaitGroup
|
|
|
|
alpmMutex *sync.RWMutex
|
|
|
|
building []*ProtoPackage
|
|
|
|
buildingLock *sync.RWMutex
|
|
|
|
queueSignal chan struct{}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *BuildManager) buildQueue(queue []*ProtoPackage, ctx context.Context) error {
|
2023-03-15 08:20:22 +01:00
|
|
|
var (
|
|
|
|
doneQ []*ProtoPackage
|
|
|
|
doneQLock = new(sync.RWMutex)
|
|
|
|
unknownBuilds bool
|
|
|
|
queueNoMatch bool
|
|
|
|
)
|
2023-03-14 00:39:15 +01:00
|
|
|
|
|
|
|
for len(doneQ) != len(queue) {
|
|
|
|
up := 0
|
|
|
|
b.buildingLock.RLock()
|
2023-05-07 15:12:59 +02:00
|
|
|
if (pkgList2MaxMem(b.building) < conf.Build.MemoryLimit && !unknownBuilds && !queueNoMatch) || (unknownBuilds && len(b.building) < MaxUnknownBuilder) {
|
2023-03-15 08:20:22 +01:00
|
|
|
queueNoMatch = true
|
2023-03-14 00:39:15 +01:00
|
|
|
b.buildingLock.RUnlock()
|
|
|
|
for _, pkg := range queue {
|
|
|
|
// check if package is already build
|
|
|
|
doneQLock.RLock()
|
|
|
|
if ContainsPkg(doneQ, pkg, true) {
|
|
|
|
doneQLock.RUnlock()
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
doneQLock.RUnlock()
|
|
|
|
|
|
|
|
// check if package is already building (we do not build packages from different marchs simultaneously)
|
|
|
|
b.buildingLock.RLock()
|
|
|
|
if ContainsPkg(b.building, pkg, false) {
|
|
|
|
log.Debugf("[Q] skipped already building package %s->%s", pkg.FullRepo, pkg.Pkgbase)
|
|
|
|
b.buildingLock.RUnlock()
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
b.buildingLock.RUnlock()
|
|
|
|
|
|
|
|
// only check for memory on known-memory-builds
|
|
|
|
// otherwise build them one-at-a-time
|
|
|
|
// TODO: add initial compile mode for new repos
|
|
|
|
if !unknownBuilds {
|
|
|
|
// check if package has unknown memory usage
|
|
|
|
if pkg.DBPackage.MaxRss == nil {
|
|
|
|
log.Debugf("[Q] skipped unknown package %s->%s", pkg.FullRepo, pkg.Pkgbase)
|
|
|
|
up++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2023-05-07 15:12:59 +02:00
|
|
|
// check if package can be built with current memory limit
|
2023-03-14 00:39:15 +01:00
|
|
|
if datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB > conf.Build.MemoryLimit {
|
|
|
|
log.Warningf("[Q] %s->%s exeeds memory limit: %s->%s", pkg.FullRepo, pkg.Pkgbase,
|
|
|
|
datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB, conf.Build.MemoryLimit)
|
|
|
|
doneQLock.Lock()
|
|
|
|
doneQ = append(doneQ, pkg)
|
|
|
|
doneQLock.Unlock()
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
b.buildingLock.RLock()
|
|
|
|
currentMemLoad := pkgList2MaxMem(b.building)
|
|
|
|
b.buildingLock.RUnlock()
|
|
|
|
|
|
|
|
// check if package can be build right now
|
|
|
|
if !unknownBuilds && currentMemLoad+(datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB) > conf.Build.MemoryLimit {
|
|
|
|
log.Debugf("[Q] skipped package with max_rss %s while load %s: %s->%s",
|
|
|
|
datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB, currentMemLoad, pkg.Pkgbase, pkg.March)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
b.buildingLock.RLock()
|
2023-05-07 15:12:59 +02:00
|
|
|
if len(b.building) >= MaxUnknownBuilder {
|
2023-03-14 00:39:15 +01:00
|
|
|
b.buildingLock.RUnlock()
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
b.buildingLock.RUnlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
b.buildingLock.Lock()
|
|
|
|
b.building = append(b.building, pkg)
|
|
|
|
b.buildingLock.Unlock()
|
2023-03-15 08:20:22 +01:00
|
|
|
queueNoMatch = false
|
2023-03-14 00:39:15 +01:00
|
|
|
|
|
|
|
go func(pkg *ProtoPackage) {
|
|
|
|
dur, err := pkg.build(ctx)
|
2023-05-21 20:28:23 +02:00
|
|
|
if err != nil && !errors.Is(err, ErrorNotEligible) {
|
2023-03-14 00:39:15 +01:00
|
|
|
log.Warningf("[Q] error building package %s->%s in %s: %s", pkg.FullRepo, pkg.Pkgbase, dur, err)
|
|
|
|
b.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
|
2023-05-21 20:28:23 +02:00
|
|
|
} else if err == nil {
|
2023-03-14 00:39:15 +01:00
|
|
|
log.Infof("[Q] build successful: %s->%s (%s)", pkg.FullRepo, pkg.Pkgbase, dur)
|
|
|
|
}
|
|
|
|
doneQLock.Lock()
|
|
|
|
b.buildingLock.Lock()
|
|
|
|
doneQ = append(doneQ, pkg)
|
|
|
|
|
|
|
|
for i := 0; i < len(b.building); i++ {
|
|
|
|
if b.building[i].PkgbaseEquals(pkg, true) {
|
|
|
|
b.building = append(b.building[:i], b.building[i+1:]...)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
doneQLock.Unlock()
|
|
|
|
b.buildingLock.Unlock()
|
|
|
|
b.queueSignal <- struct{}{}
|
|
|
|
}(pkg)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
log.Debugf("[Q] memory/build limit reached, waiting for package to finish...")
|
|
|
|
b.buildingLock.RUnlock()
|
|
|
|
<-b.queueSignal
|
2023-03-15 08:20:22 +01:00
|
|
|
queueNoMatch = false
|
2023-03-14 00:39:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// if only unknown packages are left, enable unknown buildmode
|
|
|
|
b.buildingLock.RLock()
|
|
|
|
if up == len(queue)-(len(doneQ)+len(b.building)) {
|
|
|
|
unknownBuilds = true
|
|
|
|
}
|
|
|
|
b.buildingLock.RUnlock()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *BuildManager) htmlWorker(ctx context.Context) {
|
|
|
|
type Pkg struct {
|
|
|
|
Pkgbase string
|
|
|
|
Status string
|
|
|
|
Class string
|
|
|
|
Skip string
|
|
|
|
Version string
|
|
|
|
Svn2GitVersion string
|
|
|
|
BuildDate string
|
|
|
|
BuildDuration time.Duration
|
2023-03-15 15:22:36 +01:00
|
|
|
BuildMemory *string
|
2023-03-14 00:39:15 +01:00
|
|
|
Checked string
|
|
|
|
Log string
|
|
|
|
LTO bool
|
|
|
|
LTOUnknown bool
|
|
|
|
LTODisabled bool
|
|
|
|
LTOAutoDisabled bool
|
|
|
|
DebugSym bool
|
|
|
|
DebugSymNotAvailable bool
|
|
|
|
DebugSymUnknown bool
|
|
|
|
}
|
|
|
|
|
|
|
|
type Repo struct {
|
|
|
|
Name string
|
|
|
|
Packages []Pkg
|
|
|
|
}
|
|
|
|
|
|
|
|
type March struct {
|
|
|
|
Name string
|
|
|
|
Repos []Repo
|
|
|
|
}
|
|
|
|
|
|
|
|
type tpl struct {
|
|
|
|
March []March
|
|
|
|
Generated string
|
|
|
|
Latest int
|
|
|
|
Failed int
|
|
|
|
Skipped int
|
|
|
|
Queued int
|
|
|
|
LTOEnabled int
|
|
|
|
LTOUnknown int
|
|
|
|
LTODisabled int
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
gen := &tpl{}
|
|
|
|
|
|
|
|
for _, march := range conf.March {
|
|
|
|
addMarch := March{
|
|
|
|
Name: march,
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, repo := range conf.Repos {
|
|
|
|
addRepo := Repo{
|
|
|
|
Name: repo,
|
|
|
|
}
|
|
|
|
|
2023-05-21 20:28:23 +02:00
|
|
|
pkgs := db.DBPackage.Query().Order(ent.Asc(dbpackage.FieldPkgbase)).
|
2023-03-14 00:39:15 +01:00
|
|
|
Where(dbpackage.MarchEQ(march), dbpackage.RepositoryEQ(dbpackage.Repository(repo))).AllX(ctx)
|
|
|
|
|
|
|
|
for _, pkg := range pkgs {
|
|
|
|
addPkg := Pkg{
|
|
|
|
Pkgbase: pkg.Pkgbase,
|
|
|
|
Status: strings.ToUpper(pkg.Status.String()),
|
|
|
|
Class: statusID2string(pkg.Status),
|
|
|
|
Skip: pkg.SkipReason,
|
|
|
|
Version: pkg.RepoVersion,
|
|
|
|
Svn2GitVersion: pkg.Version,
|
|
|
|
}
|
|
|
|
|
|
|
|
if pkg.STime != nil && pkg.UTime != nil {
|
|
|
|
addPkg.BuildDuration = time.Duration(*pkg.STime+*pkg.UTime) * time.Second
|
|
|
|
}
|
|
|
|
|
|
|
|
if !pkg.BuildTimeStart.IsZero() {
|
|
|
|
addPkg.BuildDate = pkg.BuildTimeStart.UTC().Format(time.RFC1123)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !pkg.Updated.IsZero() {
|
|
|
|
addPkg.Checked = pkg.Updated.UTC().Format(time.RFC1123)
|
|
|
|
}
|
|
|
|
|
|
|
|
if pkg.Status == dbpackage.StatusFailed {
|
|
|
|
addPkg.Log = fmt.Sprintf("%s/%s/%s.log", logDir, pkg.March, pkg.Pkgbase)
|
|
|
|
}
|
|
|
|
|
2023-03-14 21:05:43 +01:00
|
|
|
if pkg.MaxRss != nil {
|
2023-03-15 15:22:36 +01:00
|
|
|
hrSize := (datasize.ByteSize(*pkg.MaxRss) * datasize.KB).HumanReadable()
|
|
|
|
addPkg.BuildMemory = &hrSize
|
2023-03-14 21:05:43 +01:00
|
|
|
}
|
|
|
|
|
2023-03-14 00:39:15 +01:00
|
|
|
switch pkg.Lto {
|
|
|
|
case dbpackage.LtoUnknown:
|
|
|
|
if pkg.Status != dbpackage.StatusSkipped && pkg.Status != dbpackage.StatusFailed {
|
|
|
|
addPkg.LTOUnknown = true
|
|
|
|
}
|
|
|
|
case dbpackage.LtoEnabled:
|
|
|
|
addPkg.LTO = true
|
|
|
|
case dbpackage.LtoDisabled:
|
|
|
|
addPkg.LTODisabled = true
|
|
|
|
case dbpackage.LtoAutoDisabled:
|
|
|
|
addPkg.LTOAutoDisabled = true
|
|
|
|
}
|
|
|
|
|
|
|
|
switch pkg.DebugSymbols {
|
|
|
|
case dbpackage.DebugSymbolsUnknown:
|
|
|
|
if pkg.Status != dbpackage.StatusSkipped && pkg.Status != dbpackage.StatusFailed {
|
|
|
|
addPkg.DebugSymUnknown = true
|
|
|
|
}
|
|
|
|
case dbpackage.DebugSymbolsAvailable:
|
|
|
|
addPkg.DebugSym = true
|
|
|
|
case dbpackage.DebugSymbolsNotAvailable:
|
|
|
|
addPkg.DebugSymNotAvailable = true
|
|
|
|
}
|
|
|
|
|
|
|
|
addRepo.Packages = append(addRepo.Packages, addPkg)
|
|
|
|
}
|
|
|
|
addMarch.Repos = append(addMarch.Repos, addRepo)
|
|
|
|
}
|
|
|
|
gen.March = append(gen.March, addMarch)
|
|
|
|
}
|
|
|
|
|
|
|
|
gen.Generated = time.Now().UTC().Format(time.RFC1123)
|
|
|
|
|
|
|
|
var v []struct {
|
|
|
|
Status dbpackage.Status `json:"status"`
|
|
|
|
Count int `json:"count"`
|
|
|
|
}
|
|
|
|
|
2023-05-21 20:28:23 +02:00
|
|
|
db.DBPackage.Query().GroupBy(dbpackage.FieldStatus).Aggregate(ent.Count()).ScanX(ctx, &v)
|
2023-03-14 00:39:15 +01:00
|
|
|
|
|
|
|
for _, c := range v {
|
|
|
|
switch c.Status {
|
|
|
|
case dbpackage.StatusFailed:
|
|
|
|
gen.Failed = c.Count
|
|
|
|
case dbpackage.StatusSkipped:
|
|
|
|
gen.Skipped = c.Count
|
|
|
|
case dbpackage.StatusLatest:
|
|
|
|
gen.Latest = c.Count
|
|
|
|
case dbpackage.StatusQueued:
|
|
|
|
gen.Queued = c.Count
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var v2 []struct {
|
|
|
|
Status dbpackage.Lto `json:"lto"`
|
|
|
|
Count int `json:"count"`
|
|
|
|
}
|
|
|
|
|
2023-05-21 20:28:23 +02:00
|
|
|
db.DBPackage.Query().Where(dbpackage.StatusNEQ(dbpackage.StatusSkipped)).
|
2023-03-14 00:39:15 +01:00
|
|
|
GroupBy(dbpackage.FieldLto).Aggregate(ent.Count()).ScanX(ctx, &v2)
|
|
|
|
|
|
|
|
for _, c := range v2 {
|
|
|
|
switch c.Status {
|
|
|
|
case dbpackage.LtoUnknown:
|
|
|
|
gen.LTOUnknown = c.Count
|
|
|
|
case dbpackage.LtoDisabled, dbpackage.LtoAutoDisabled:
|
|
|
|
gen.LTODisabled += c.Count
|
|
|
|
case dbpackage.LtoEnabled:
|
|
|
|
gen.LTOEnabled = c.Count
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
statusTpl, err := template.ParseFiles("tpl/packages.html")
|
|
|
|
if err != nil {
|
|
|
|
log.Warningf("[HTML] Error parsing template file: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
f, err := os.OpenFile(filepath.Join(conf.Basedir.Repo, "packages.html"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644)
|
|
|
|
if err != nil {
|
|
|
|
log.Warningf("[HTML] Erro ropening output file: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
err = statusTpl.Execute(f, gen)
|
|
|
|
if err != nil {
|
|
|
|
log.Warningf("[HTML] Error filling template: %v", err)
|
|
|
|
}
|
|
|
|
_ = f.Close()
|
|
|
|
|
|
|
|
time.Sleep(time.Minute * 5)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *BuildManager) repoWorker(repo string) {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case pkgL := <-b.repoAdd[repo]:
|
|
|
|
b.repoWG.Add(1)
|
|
|
|
toAdd := make([]string, 0)
|
|
|
|
for _, pkg := range pkgL {
|
|
|
|
toAdd = append(toAdd, pkg.PkgFiles...)
|
|
|
|
}
|
|
|
|
|
|
|
|
args := []string{"-s", "-v", "-p", "-n", filepath.Join(conf.Basedir.Repo, repo, "os", conf.Arch, repo) + ".db.tar.xz"}
|
|
|
|
args = append(args, toAdd...)
|
|
|
|
cmd := exec.Command("repo-add", args...)
|
|
|
|
res, err := cmd.CombinedOutput()
|
|
|
|
log.Debug(string(res))
|
|
|
|
if err != nil && cmd.ProcessState.ExitCode() != 1 {
|
|
|
|
log.Panicf("%s while repo-add: %v", string(res), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, pkg := range pkgL {
|
2023-05-21 20:28:23 +02:00
|
|
|
err = pkg.toDBPackage(true)
|
|
|
|
if err != nil {
|
|
|
|
log.Warningf("error getting db entry for %s: %v", pkg.Pkgbase, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
pkgUpd := pkg.DBPackage.Update().
|
|
|
|
SetStatus(dbpackage.StatusLatest).
|
|
|
|
ClearSkipReason().
|
|
|
|
SetRepoVersion(pkg.Version).
|
|
|
|
SetTagRev(pkg.State.TagRev)
|
|
|
|
|
2023-03-14 00:39:15 +01:00
|
|
|
if _, err := os.Stat(filepath.Join(conf.Basedir.Debug, pkg.March,
|
|
|
|
pkg.DBPackage.Packages[0]+"-debug-"+pkg.Version+"-"+conf.Arch+".pkg.tar.zst")); err == nil {
|
2023-05-21 20:28:23 +02:00
|
|
|
pkgUpd = pkgUpd.SetDebugSymbols(dbpackage.DebugSymbolsAvailable)
|
2023-03-14 00:39:15 +01:00
|
|
|
} else {
|
2023-05-21 20:28:23 +02:00
|
|
|
pkgUpd = pkgUpd.SetDebugSymbols(dbpackage.DebugSymbolsNotAvailable)
|
2023-03-14 00:39:15 +01:00
|
|
|
}
|
2023-05-21 20:28:23 +02:00
|
|
|
pkg.DBPackage = pkgUpd.SaveX(context.Background())
|
2023-03-14 00:39:15 +01:00
|
|
|
}
|
|
|
|
|
2023-05-21 20:28:23 +02:00
|
|
|
cmd = exec.Command("paccache", "-rc", filepath.Join(conf.Basedir.Repo, repo, "os", conf.Arch), "-k", "1")
|
2023-03-14 00:39:15 +01:00
|
|
|
res, err = cmd.CombinedOutput()
|
|
|
|
log.Debug(string(res))
|
|
|
|
if err != nil {
|
2023-05-21 20:28:23 +02:00
|
|
|
log.Warningf("error running paccache: %v", err)
|
2023-03-14 00:39:15 +01:00
|
|
|
}
|
2023-05-21 20:28:23 +02:00
|
|
|
|
2023-03-14 00:39:15 +01:00
|
|
|
err = updateLastUpdated()
|
|
|
|
if err != nil {
|
2023-05-21 20:28:23 +02:00
|
|
|
log.Warningf("error updating lastupdate: %v", err)
|
2023-03-14 00:39:15 +01:00
|
|
|
}
|
|
|
|
b.repoWG.Done()
|
|
|
|
case pkgL := <-b.repoPurge[repo]:
|
|
|
|
for _, pkg := range pkgL {
|
|
|
|
if _, err := os.Stat(filepath.Join(conf.Basedir.Repo, pkg.FullRepo, "os", conf.Arch, pkg.FullRepo) + ".db.tar.xz"); err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if len(pkg.PkgFiles) == 0 {
|
|
|
|
if err := pkg.findPkgFiles(); err != nil {
|
2023-05-21 20:28:23 +02:00
|
|
|
log.Warningf("[%s/%s] unable to find files: %v", pkg.FullRepo, pkg.Pkgbase, err)
|
2023-03-14 00:39:15 +01:00
|
|
|
continue
|
|
|
|
} else if len(pkg.PkgFiles) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var realPkgs []string
|
|
|
|
for _, filePath := range pkg.PkgFiles {
|
|
|
|
if _, err := os.Stat(filePath); err == nil {
|
|
|
|
realPkgs = append(realPkgs, Package(filePath).Name())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(realPkgs) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
b.repoWG.Add(1)
|
|
|
|
args := []string{"-s", "-v", filepath.Join(conf.Basedir.Repo, pkg.FullRepo, "os", conf.Arch, pkg.FullRepo) + ".db.tar.xz"}
|
|
|
|
args = append(args, realPkgs...)
|
|
|
|
cmd := exec.Command("repo-remove", args...)
|
|
|
|
res, err := cmd.CombinedOutput()
|
|
|
|
log.Debug(string(res))
|
|
|
|
if err != nil && cmd.ProcessState.ExitCode() == 1 {
|
2023-05-21 20:28:23 +02:00
|
|
|
log.Warningf("error while deleting package %s: %s", pkg.Pkgbase, string(res))
|
2023-03-14 00:39:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if pkg.DBPackage != nil {
|
2023-05-21 20:28:23 +02:00
|
|
|
_ = pkg.DBPackage.Update().ClearRepoVersion().ClearTagRev().Exec(context.Background())
|
2023-03-14 00:39:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, file := range pkg.PkgFiles {
|
|
|
|
_ = os.Remove(file)
|
|
|
|
_ = os.Remove(file + ".sig")
|
|
|
|
}
|
|
|
|
err = updateLastUpdated()
|
|
|
|
if err != nil {
|
2023-05-21 20:28:23 +02:00
|
|
|
log.Warningf("error updating lastupdate: %v", err)
|
2023-03-14 00:39:15 +01:00
|
|
|
}
|
|
|
|
b.repoWG.Done()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *BuildManager) syncWorker(ctx context.Context) error {
|
2023-05-21 20:28:23 +02:00
|
|
|
err := os.MkdirAll(filepath.Join(conf.Basedir.Work), 0o755)
|
2023-03-14 00:39:15 +01:00
|
|
|
if err != nil {
|
2023-05-21 20:28:23 +02:00
|
|
|
log.Fatalf("error creating work dir %s: %v", conf.Basedir.Work, err)
|
2023-03-14 00:39:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
2023-05-21 20:28:23 +02:00
|
|
|
gitPath := filepath.Join(conf.Basedir.Work, stateDir)
|
2023-03-14 00:39:15 +01:00
|
|
|
|
2023-05-21 20:28:23 +02:00
|
|
|
if _, err := os.Stat(gitPath); os.IsNotExist(err) {
|
|
|
|
cmd := exec.Command("git", "clone", "--depth=1", conf.StateRepo, gitPath)
|
|
|
|
res, err := cmd.CombinedOutput()
|
|
|
|
log.Debug(string(res))
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("error cloning state repo: %v", err)
|
|
|
|
}
|
|
|
|
} else if err == nil {
|
|
|
|
cmd := exec.Command("git", "reset", "--hard")
|
|
|
|
cmd.Dir = gitPath
|
|
|
|
res, err := cmd.CombinedOutput()
|
|
|
|
log.Debug(string(res))
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("error reseting state repo: %v", err)
|
|
|
|
}
|
2023-03-14 00:39:15 +01:00
|
|
|
|
2023-05-21 20:28:23 +02:00
|
|
|
cmd = exec.Command("git", "pull")
|
|
|
|
cmd.Dir = gitPath
|
|
|
|
res, err = cmd.CombinedOutput()
|
|
|
|
log.Debug(string(res))
|
|
|
|
if err != nil {
|
|
|
|
log.Warningf("failed to update state repo: %v", err)
|
2023-03-14 00:39:15 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// housekeeping
|
|
|
|
wg := new(sync.WaitGroup)
|
|
|
|
for _, repo := range repos {
|
|
|
|
wg.Add(1)
|
|
|
|
splitRepo := strings.Split(repo, "-")
|
|
|
|
repo := repo
|
|
|
|
go func() {
|
|
|
|
err := housekeeping(splitRepo[0], strings.Join(splitRepo[1:], "-"), wg)
|
|
|
|
if err != nil {
|
|
|
|
log.Warningf("[%s] housekeeping failed: %v", repo, err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
err := logHK()
|
|
|
|
if err != nil {
|
|
|
|
log.Warningf("log-housekeeping failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// fetch updates between sync runs
|
|
|
|
b.alpmMutex.Lock()
|
|
|
|
err = alpmHandle.Release()
|
|
|
|
if err != nil {
|
2023-05-21 20:28:23 +02:00
|
|
|
log.Fatalf("error releasing ALPM handle: %v", err)
|
2023-03-14 00:39:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if err := retry.Fibonacci(ctx, 1*time.Second, func(ctx context.Context) error {
|
|
|
|
if err := setupChroot(); err != nil {
|
2023-05-21 20:28:23 +02:00
|
|
|
log.Warningf("unable to upgrade chroot, trying again later")
|
2023-03-14 00:39:15 +01:00
|
|
|
return retry.RetryableError(err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
log.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
alpmHandle, err = initALPM(filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot),
|
|
|
|
filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot, "/var/lib/pacman"))
|
|
|
|
if err != nil {
|
2023-05-21 20:28:23 +02:00
|
|
|
log.Warningf("error while alpm-init: %v", err)
|
2023-03-14 00:39:15 +01:00
|
|
|
}
|
|
|
|
b.alpmMutex.Unlock()
|
|
|
|
|
2023-05-21 20:28:23 +02:00
|
|
|
queue, err := b.genQueue()
|
2023-03-14 00:39:15 +01:00
|
|
|
if err != nil {
|
2023-05-21 20:28:23 +02:00
|
|
|
log.Errorf("error building queue: %v", err)
|
2023-03-14 00:39:15 +01:00
|
|
|
} else {
|
2023-05-21 20:28:23 +02:00
|
|
|
log.Debugf("build-queue with %d items", len(queue))
|
2023-03-14 00:39:15 +01:00
|
|
|
err = b.buildQueue(queue, ctx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ctx.Err() == nil {
|
|
|
|
for _, repo := range repos {
|
|
|
|
err = movePackagesLive(repo)
|
|
|
|
if err != nil {
|
2023-05-21 20:28:23 +02:00
|
|
|
log.Errorf("[%s] error moving packages live: %v", repo, err)
|
2023-03-14 00:39:15 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Debugf("build-cycle finished")
|
|
|
|
time.Sleep(time.Duration(*checkInterval) * time.Minute)
|
|
|
|
}
|
|
|
|
}
|
2023-05-21 20:28:23 +02:00
|
|
|
|
|
|
|
func (b *BuildManager) genQueue() ([]*ProtoPackage, error) {
|
|
|
|
stateFiles, err := Glob(filepath.Join(conf.Basedir.Work, stateDir, "**/*"))
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("error scanning for state-files: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var pkgbuilds []*ProtoPackage
|
|
|
|
for _, stateFile := range stateFiles {
|
|
|
|
stat, err := os.Stat(stateFile)
|
|
|
|
if err != nil || stat.IsDir() || strings.Contains(stateFile, ".git") {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
repo, subRepo, arch, err := stateFileMeta(stateFile)
|
|
|
|
if err != nil {
|
|
|
|
log.Warningf("[QG] error generating statefile metadata %s: %v", stateFile, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2023-05-22 13:39:21 +02:00
|
|
|
if !Contains(conf.Repos, repo) || (subRepo != nil && Contains(conf.Blacklist.Repo, *subRepo)) {
|
2023-05-21 20:28:23 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
rawState, err := os.ReadFile(stateFile)
|
|
|
|
if err != nil {
|
|
|
|
log.Warningf("[QG] cannot read statefile %s: %v", stateFile, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
state, err := parseState(string(rawState))
|
|
|
|
if err != nil {
|
|
|
|
log.Warningf("[QG] cannot parse statefile %s: %v", stateFile, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, march := range conf.March {
|
|
|
|
pkg := &ProtoPackage{
|
|
|
|
Pkgbase: state.Pkgbase,
|
|
|
|
Repo: dbpackage.Repository(repo),
|
|
|
|
March: march,
|
|
|
|
FullRepo: repo + "-" + march,
|
|
|
|
State: state,
|
|
|
|
Version: state.PkgVer,
|
|
|
|
Arch: arch,
|
|
|
|
}
|
|
|
|
|
|
|
|
err = pkg.toDBPackage(true)
|
|
|
|
if err != nil {
|
|
|
|
log.Warningf("[QG] error getting/creating dbpackage %s: %v", state.Pkgbase, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2023-05-22 13:39:21 +02:00
|
|
|
if !pkg.isAvailable(alpmHandle) {
|
|
|
|
log.Debugf("[%s/%s] not available on mirror, skipping build", pkg.FullRepo, pkg.Pkgbase)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
skipping := false
|
|
|
|
switch {
|
|
|
|
case arch == "any":
|
|
|
|
log.Debugf("skipped %s: any-package", pkg.Pkgbase)
|
|
|
|
pkg.DBPackage.SkipReason = "arch = any"
|
|
|
|
pkg.DBPackage.Status = dbpackage.StatusSkipped
|
|
|
|
skipping = true
|
|
|
|
case Contains(conf.Blacklist.Packages, pkg.Pkgbase):
|
|
|
|
log.Debugf("skipped %s: blacklisted package", pkg.Pkgbase)
|
|
|
|
pkg.DBPackage.SkipReason = "blacklisted"
|
|
|
|
pkg.DBPackage.Status = dbpackage.StatusSkipped
|
|
|
|
skipping = true
|
|
|
|
case pkg.DBPackage.MaxRss != nil && datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB > conf.Build.MemoryLimit:
|
|
|
|
log.Debugf("skipped %s: memory limit exceeded (%s)", pkg.Pkgbase, datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB)
|
|
|
|
pkg.DBPackage.SkipReason = "memory limit exceeded"
|
|
|
|
pkg.DBPackage.Status = dbpackage.StatusSkipped
|
|
|
|
skipping = true
|
|
|
|
case pkg.isPkgFailed():
|
|
|
|
log.Debugf("skipped %s: failed build", pkg.Pkgbase)
|
|
|
|
skipping = true
|
|
|
|
}
|
|
|
|
|
|
|
|
if skipping {
|
|
|
|
pkg.DBPackage = pkg.DBPackage.Update().SetUpdated(time.Now()).SetVersion(pkg.Version).SetStatus(pkg.DBPackage.Status).
|
|
|
|
SetSkipReason(pkg.DBPackage.SkipReason).SetTagRev(pkg.State.TagRev).SaveX(context.Background())
|
|
|
|
continue
|
|
|
|
} else {
|
|
|
|
pkg.DBPackage = pkg.DBPackage.Update().SetUpdated(time.Now()).SetVersion(pkg.Version).SaveX(context.Background())
|
|
|
|
}
|
|
|
|
|
|
|
|
repoVer, err := pkg.repoVersion()
|
|
|
|
if err != nil {
|
|
|
|
pkg.DBPackage = pkg.DBPackage.Update().ClearRepoVersion().SaveX(context.Background())
|
|
|
|
} else if err == nil && alpm.VerCmp(repoVer, pkg.Version) > 0 {
|
|
|
|
log.Debugf("skipped %s: version in repo higher than in PKGBUILD (%s < %s)", pkg.Pkgbase, pkg.Version, repoVer)
|
|
|
|
pkg.DBPackage = pkg.DBPackage.Update().SetStatus(dbpackage.StatusLatest).ClearSkipReason().SetTagRev(pkg.State.TagRev).SaveX(context.Background())
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2023-05-21 20:28:23 +02:00
|
|
|
pkgbuilds = append(pkgbuilds, pkg)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return pkgbuilds, nil
|
|
|
|
}
|