From ece8c4c7d91e1a9e167bbadc634e3b5bfccbaa37 Mon Sep 17 00:00:00 2001 From: Giovanni Harting <539@idlegandalf.com> Date: Tue, 14 Mar 2023 00:39:15 +0100 Subject: [PATCH] faster SRCINFO parsing + memory-based building Switched to parsing srcinfo with channels on all available cpus, speeding up srcinfo-parsing and queue generation by a lot. New memory-limit based building will max out the available memory while not building the same packages at the same time for different marchs, fixing some long-standing bugs like firefox not building at the same time because the same ports are used for profile-based optimization. This also drops the artificial delay on build-start, speeding up things even more. This also means there is no hard-coded limit on how many packages can be build at once anymore. As long as there is RAM available, builds will be started. --- buildmanager.go | 631 ++++++++++++++++++++++++++++++++++++++++ config_dist.yaml | 6 +- ent/client.go | 67 ++++- ent/config.go | 66 ----- ent/context.go | 33 --- ent/dbpackage.go | 8 +- ent/dbpackage/where.go | 2 +- ent/dbpackage_create.go | 10 +- ent/dbpackage_delete.go | 24 +- ent/dbpackage_query.go | 28 +- ent/dbpackage_update.go | 32 +- ent/ent.go | 28 +- ent/enttest/enttest.go | 6 +- ent/hook/hook.go | 2 +- ent/mutation.go | 5 +- ent/runtime.go | 4 +- ent/runtime/runtime.go | 6 +- ent/schema/dbpackage.go | 3 +- flags.yaml | 6 +- go.mod | 34 +-- go.sum | 83 +++--- housekeeping.go | 265 +++++++++++++++++ main.go | 464 +---------------------------- package.go | 4 +- proto_package.go | 32 +- utils.go | 422 ++------------------------- 26 files changed, 1157 insertions(+), 1114 deletions(-) create mode 100644 buildmanager.go delete mode 100644 ent/config.go delete mode 100644 ent/context.go create mode 100644 housekeeping.go diff --git a/buildmanager.go b/buildmanager.go new file mode 100644 index 0000000..fe198ee --- /dev/null +++ b/buildmanager.go @@ -0,0 +1,631 @@ +package main + +import ( + "context" + "fmt" + "github.com/c2h5oh/datasize" + "github.com/sethvargo/go-retry" + log "github.com/sirupsen/logrus" + "html/template" + "os" + "os/exec" + "path/filepath" + "runtime" + "somegit.dev/ALHP/ALHP.GO/ent" + "somegit.dev/ALHP/ALHP.GO/ent/dbpackage" + "strings" + "sync" + "time" +) + +type BuildManager struct { + repoPurge map[string]chan []*ProtoPackage + repoAdd map[string]chan []*ProtoPackage + repoWG *sync.WaitGroup + alpmMutex *sync.RWMutex + building []*ProtoPackage + buildingLock *sync.RWMutex + queueSignal chan struct{} +} + +func (b *BuildManager) refreshSRCINFOs(ctx context.Context, path string) error { + pkgBuilds, err := Glob(path) + if err != nil { + return fmt.Errorf("error scanning for PKGBUILDs: %w", err) + } + + wg := new(sync.WaitGroup) + workerChan := make(chan string, runtime.NumCPU()) + + for o := 0; o < runtime.NumCPU(); o++ { + wg.Add(1) + go b.SRCINFOWorker(ctx, workerChan, wg) + } + + go func() { + for _, pkgbuild := range pkgBuilds { + workerChan <- pkgbuild + } + close(workerChan) + }() + + wg.Wait() + return nil +} + +func (b *BuildManager) SRCINFOWorker(ctx context.Context, workIn chan string, wg *sync.WaitGroup) { + defer wg.Done() + for pkgbuild := range workIn { + mPkgbuild := PKGBUILD(pkgbuild) + if mPkgbuild.FullRepo() == "trunk" || !Contains(conf.Repos, mPkgbuild.Repo()) || + containsSubStr(mPkgbuild.FullRepo(), conf.Blacklist.Repo) { + continue + } + + for _, march := range conf.March { + dbPkg, dbErr := db.DbPackage.Query().Where( + dbpackage.And( + dbpackage.Pkgbase(mPkgbuild.PkgBase()), + dbpackage.RepositoryEQ(dbpackage.Repository(mPkgbuild.Repo())), + dbpackage.March(march), + ), + ).Only(context.Background()) + + if ent.IsNotFound(dbErr) { + log.Debugf("[%s/%s] Package not found in database", mPkgbuild.Repo(), mPkgbuild.PkgBase()) + } else if dbErr != nil { + log.Errorf("[%s/%s] Problem querying db for package: %v", mPkgbuild.Repo(), mPkgbuild.PkgBase(), dbErr) + } + + // compare b3sum of PKGBUILD file to hash in database, only proceed if hash differs + // reduces the amount of PKGBUILDs that need to be parsed with makepkg, which is _really_ slow, significantly + b3s, err := b3sum(pkgbuild) + if err != nil { + log.Errorf("Error hashing PKGBUILD: %v", err) + } + + if dbPkg != nil && b3s == dbPkg.Hash { + log.Debugf("[%s/%s] Skipped: PKGBUILD hash matches db (%s)", mPkgbuild.Repo(), mPkgbuild.PkgBase(), b3s) + continue + } else if dbPkg != nil && b3s != dbPkg.Hash && dbPkg.SrcinfoHash != b3s { + log.Debugf("[%s/%s] srcinfo cleared", mPkgbuild.Repo(), mPkgbuild.PkgBase()) + dbPkg = dbPkg.Update().ClearSrcinfo().SaveX(context.Background()) + } + + proto := &ProtoPackage{ + Pkgbuild: pkgbuild, + Pkgbase: mPkgbuild.PkgBase(), + Repo: dbpackage.Repository(mPkgbuild.Repo()), + March: march, + FullRepo: mPkgbuild.Repo() + "-" + march, + Hash: b3s, + DBPackage: dbPkg, + } + + _, err = proto.isEligible(ctx) + if err != nil { + log.Infof("Unable to determine status for package %s: %v", proto.Pkgbase, err) + b.repoPurge[proto.FullRepo] <- []*ProtoPackage{proto} + } else if proto.DBPackage != nil { + proto.DBPackage.Update().SetPkgbuild(proto.Pkgbuild).ExecX(ctx) + } + } + } +} + +func (b *BuildManager) buildQueue(queue []*ProtoPackage, ctx context.Context) error { + var doneQ []*ProtoPackage + doneQLock := new(sync.RWMutex) + var unknownBuilds bool + + for len(doneQ) != len(queue) { + up := 0 + b.buildingLock.RLock() + if (pkgList2MaxMem(b.building) < conf.Build.MemoryLimit && !unknownBuilds) || (unknownBuilds && len(b.building) < 1) { + b.buildingLock.RUnlock() + for _, pkg := range queue { + // check if package is already build + doneQLock.RLock() + if ContainsPkg(doneQ, pkg, true) { + doneQLock.RUnlock() + continue + } + doneQLock.RUnlock() + + // check if package is already building (we do not build packages from different marchs simultaneously) + b.buildingLock.RLock() + if ContainsPkg(b.building, pkg, false) { + log.Debugf("[Q] skipped already building package %s->%s", pkg.FullRepo, pkg.Pkgbase) + b.buildingLock.RUnlock() + continue + } + b.buildingLock.RUnlock() + + // only check for memory on known-memory-builds + // otherwise build them one-at-a-time + // TODO: add initial compile mode for new repos + if !unknownBuilds { + // check if package has unknown memory usage + if pkg.DBPackage.MaxRss == nil { + log.Debugf("[Q] skipped unknown package %s->%s", pkg.FullRepo, pkg.Pkgbase) + up++ + continue + } + + // check if package can be build with current memory limit + if datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB > conf.Build.MemoryLimit { + log.Warningf("[Q] %s->%s exeeds memory limit: %s->%s", pkg.FullRepo, pkg.Pkgbase, + datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB, conf.Build.MemoryLimit) + doneQLock.Lock() + doneQ = append(doneQ, pkg) + doneQLock.Unlock() + continue + } + + b.buildingLock.RLock() + currentMemLoad := pkgList2MaxMem(b.building) + b.buildingLock.RUnlock() + + // check if package can be build right now + if !unknownBuilds && currentMemLoad+(datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB) > conf.Build.MemoryLimit { + log.Debugf("[Q] skipped package with max_rss %s while load %s: %s->%s", + datasize.ByteSize(*pkg.DBPackage.MaxRss)*datasize.KB, currentMemLoad, pkg.Pkgbase, pkg.March) + continue + } + } else { + b.buildingLock.RLock() + if len(b.building) >= 1 { + b.buildingLock.RUnlock() + continue + } + b.buildingLock.RUnlock() + } + + b.buildingLock.Lock() + b.building = append(b.building, pkg) + b.buildingLock.Unlock() + + go func(pkg *ProtoPackage) { + dur, err := pkg.build(ctx) + if err != nil { + log.Warningf("[Q] error building package %s->%s in %s: %s", pkg.FullRepo, pkg.Pkgbase, dur, err) + b.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg} + } else { + log.Infof("[Q] build successful: %s->%s (%s)", pkg.FullRepo, pkg.Pkgbase, dur) + } + doneQLock.Lock() + b.buildingLock.Lock() + doneQ = append(doneQ, pkg) + + for i := 0; i < len(b.building); i++ { + if b.building[i].PkgbaseEquals(pkg, true) { + b.building = append(b.building[:i], b.building[i+1:]...) + break + } + } + doneQLock.Unlock() + b.buildingLock.Unlock() + b.queueSignal <- struct{}{} + }(pkg) + } + } else { + log.Debugf("[Q] memory/build limit reached, waiting for package to finish...") + b.buildingLock.RUnlock() + <-b.queueSignal + } + + // if only unknown packages are left, enable unknown buildmode + b.buildingLock.RLock() + if up == len(queue)-(len(doneQ)+len(b.building)) { + unknownBuilds = true + } + b.buildingLock.RUnlock() + up = 0 + } + return nil +} + +func (b *BuildManager) htmlWorker(ctx context.Context) { + type Pkg struct { + Pkgbase string + Status string + Class string + Skip string + Version string + Svn2GitVersion string + BuildDate string + BuildDuration time.Duration + Checked string + Log string + LTO bool + LTOUnknown bool + LTODisabled bool + LTOAutoDisabled bool + DebugSym bool + DebugSymNotAvailable bool + DebugSymUnknown bool + } + + type Repo struct { + Name string + Packages []Pkg + } + + type March struct { + Name string + Repos []Repo + } + + type tpl struct { + March []March + Generated string + Latest int + Failed int + Skipped int + Queued int + LTOEnabled int + LTOUnknown int + LTODisabled int + } + + for { + gen := &tpl{} + + for _, march := range conf.March { + addMarch := March{ + Name: march, + } + + for _, repo := range conf.Repos { + addRepo := Repo{ + Name: repo, + } + + pkgs := db.DbPackage.Query().Order(ent.Asc(dbpackage.FieldPkgbase)). + Where(dbpackage.MarchEQ(march), dbpackage.RepositoryEQ(dbpackage.Repository(repo))).AllX(ctx) + + for _, pkg := range pkgs { + addPkg := Pkg{ + Pkgbase: pkg.Pkgbase, + Status: strings.ToUpper(pkg.Status.String()), + Class: statusID2string(pkg.Status), + Skip: pkg.SkipReason, + Version: pkg.RepoVersion, + Svn2GitVersion: pkg.Version, + } + + if pkg.STime != nil && pkg.UTime != nil { + addPkg.BuildDuration = time.Duration(*pkg.STime+*pkg.UTime) * time.Second + } + + if !pkg.BuildTimeStart.IsZero() { + addPkg.BuildDate = pkg.BuildTimeStart.UTC().Format(time.RFC1123) + } + + if !pkg.Updated.IsZero() { + addPkg.Checked = pkg.Updated.UTC().Format(time.RFC1123) + } + + if pkg.Status == dbpackage.StatusFailed { + addPkg.Log = fmt.Sprintf("%s/%s/%s.log", logDir, pkg.March, pkg.Pkgbase) + } + + switch pkg.Lto { + case dbpackage.LtoUnknown: + if pkg.Status != dbpackage.StatusSkipped && pkg.Status != dbpackage.StatusFailed { + addPkg.LTOUnknown = true + } + case dbpackage.LtoEnabled: + addPkg.LTO = true + case dbpackage.LtoDisabled: + addPkg.LTODisabled = true + case dbpackage.LtoAutoDisabled: + addPkg.LTOAutoDisabled = true + } + + switch pkg.DebugSymbols { + case dbpackage.DebugSymbolsUnknown: + if pkg.Status != dbpackage.StatusSkipped && pkg.Status != dbpackage.StatusFailed { + addPkg.DebugSymUnknown = true + } + case dbpackage.DebugSymbolsAvailable: + addPkg.DebugSym = true + case dbpackage.DebugSymbolsNotAvailable: + addPkg.DebugSymNotAvailable = true + } + + addRepo.Packages = append(addRepo.Packages, addPkg) + } + addMarch.Repos = append(addMarch.Repos, addRepo) + } + gen.March = append(gen.March, addMarch) + } + + gen.Generated = time.Now().UTC().Format(time.RFC1123) + + var v []struct { + Status dbpackage.Status `json:"status"` + Count int `json:"count"` + } + + db.DbPackage.Query().GroupBy(dbpackage.FieldStatus).Aggregate(ent.Count()).ScanX(ctx, &v) + + for _, c := range v { + switch c.Status { + case dbpackage.StatusFailed: + gen.Failed = c.Count + case dbpackage.StatusSkipped: + gen.Skipped = c.Count + case dbpackage.StatusLatest: + gen.Latest = c.Count + case dbpackage.StatusQueued: + gen.Queued = c.Count + } + } + + var v2 []struct { + Status dbpackage.Lto `json:"lto"` + Count int `json:"count"` + } + + db.DbPackage.Query().Where(dbpackage.StatusNEQ(dbpackage.StatusSkipped)). + GroupBy(dbpackage.FieldLto).Aggregate(ent.Count()).ScanX(ctx, &v2) + + for _, c := range v2 { + switch c.Status { + case dbpackage.LtoUnknown: + gen.LTOUnknown = c.Count + case dbpackage.LtoDisabled, dbpackage.LtoAutoDisabled: + gen.LTODisabled += c.Count + case dbpackage.LtoEnabled: + gen.LTOEnabled = c.Count + } + } + + statusTpl, err := template.ParseFiles("tpl/packages.html") + if err != nil { + log.Warningf("[HTML] Error parsing template file: %v", err) + continue + } + + f, err := os.OpenFile(filepath.Join(conf.Basedir.Repo, "packages.html"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644) + if err != nil { + log.Warningf("[HTML] Erro ropening output file: %v", err) + continue + } + err = statusTpl.Execute(f, gen) + if err != nil { + log.Warningf("[HTML] Error filling template: %v", err) + } + _ = f.Close() + + time.Sleep(time.Minute * 5) + } +} + +func (b *BuildManager) repoWorker(repo string) { + for { + select { + case pkgL := <-b.repoAdd[repo]: + b.repoWG.Add(1) + toAdd := make([]string, 0) + for _, pkg := range pkgL { + toAdd = append(toAdd, pkg.PkgFiles...) + } + + args := []string{"-s", "-v", "-p", "-n", filepath.Join(conf.Basedir.Repo, repo, "os", conf.Arch, repo) + ".db.tar.xz"} + args = append(args, toAdd...) + cmd := exec.Command("repo-add", args...) + res, err := cmd.CombinedOutput() + log.Debug(string(res)) + if err != nil && cmd.ProcessState.ExitCode() != 1 { + log.Panicf("%s while repo-add: %v", string(res), err) + } + + for _, pkg := range pkgL { + pkg.toDBPackage(true) + if _, err := os.Stat(filepath.Join(conf.Basedir.Debug, pkg.March, + pkg.DBPackage.Packages[0]+"-debug-"+pkg.Version+"-"+conf.Arch+".pkg.tar.zst")); err == nil { + pkg.DBPackage = pkg.DBPackage.Update(). + SetStatus(dbpackage.StatusLatest). + ClearSkipReason(). + SetDebugSymbols(dbpackage.DebugSymbolsAvailable). + SetRepoVersion(pkg.Version). + SetHash(pkg.Hash). + SaveX(context.Background()) + } else { + pkg.DBPackage = pkg.DBPackage.Update(). + SetStatus(dbpackage.StatusLatest). + ClearSkipReason(). + SetDebugSymbols(dbpackage.DebugSymbolsNotAvailable). + SetRepoVersion(pkg.Version). + SetHash(pkg.Hash). + SaveX(context.Background()) + } + } + + cmd = exec.Command("paccache", "-rc", filepath.Join(conf.Basedir.Repo, repo, "os", conf.Arch), "-k", "1") //nolint:gosec + res, err = cmd.CombinedOutput() + log.Debug(string(res)) + if err != nil { + log.Warningf("Error running paccache: %v", err) + } + err = updateLastUpdated() + if err != nil { + log.Warningf("Error updating lastupdate: %v", err) + } + b.repoWG.Done() + case pkgL := <-b.repoPurge[repo]: + for _, pkg := range pkgL { + if _, err := os.Stat(filepath.Join(conf.Basedir.Repo, pkg.FullRepo, "os", conf.Arch, pkg.FullRepo) + ".db.tar.xz"); err != nil { + continue + } + if len(pkg.PkgFiles) == 0 { + if err := pkg.findPkgFiles(); err != nil { + log.Warningf("[%s/%s] Unable to find files: %v", pkg.FullRepo, pkg.Pkgbase, err) + continue + } else if len(pkg.PkgFiles) == 0 { + continue + } + } + + var realPkgs []string + for _, filePath := range pkg.PkgFiles { + if _, err := os.Stat(filePath); err == nil { + realPkgs = append(realPkgs, Package(filePath).Name()) + } + } + + if len(realPkgs) == 0 { + continue + } + + b.repoWG.Add(1) + args := []string{"-s", "-v", filepath.Join(conf.Basedir.Repo, pkg.FullRepo, "os", conf.Arch, pkg.FullRepo) + ".db.tar.xz"} + args = append(args, realPkgs...) + cmd := exec.Command("repo-remove", args...) + res, err := cmd.CombinedOutput() + log.Debug(string(res)) + if err != nil && cmd.ProcessState.ExitCode() == 1 { + log.Warningf("Error while deleting package %s: %s", pkg.Pkgbase, string(res)) + } + + if pkg.DBPackage != nil { + _ = pkg.DBPackage.Update().ClearRepoVersion().ClearHash().Exec(context.Background()) + } + + for _, file := range pkg.PkgFiles { + _ = os.Remove(file) + _ = os.Remove(file + ".sig") + } + err = updateLastUpdated() + if err != nil { + log.Warningf("Error updating lastupdate: %v", err) + } + b.repoWG.Done() + } + } + } +} + +func (b *BuildManager) syncWorker(ctx context.Context) error { + err := os.MkdirAll(filepath.Join(conf.Basedir.Work, upstreamDir), 0o755) + if err != nil { + log.Fatalf("Error creating upstream dir: %v", err) + } + + for { + for gitDir, gitURL := range conf.Svn2git { + gitPath := filepath.Join(conf.Basedir.Work, upstreamDir, gitDir) + + if _, err := os.Stat(gitPath); os.IsNotExist(err) { + cmd := exec.Command("git", "clone", "--depth=1", gitURL, gitPath) + res, err := cmd.CombinedOutput() + log.Debug(string(res)) + if err != nil { + log.Fatalf("Error running git clone: %v", err) + } + } else if err == nil { + cmd := exec.Command("git", "reset", "--hard") + cmd.Dir = gitPath + res, err := cmd.CombinedOutput() + log.Debug(string(res)) + if err != nil { + log.Fatalf("Error running git reset: %v", err) + } + + cmd = exec.Command("git", "pull") + cmd.Dir = gitPath + res, err = cmd.CombinedOutput() + log.Debug(string(res)) + if err != nil { + log.Warningf("Failed to update git repo %s: %v", gitDir, err) + } + } + } + + // housekeeping + wg := new(sync.WaitGroup) + for _, repo := range repos { + wg.Add(1) + splitRepo := strings.Split(repo, "-") + repo := repo + go func() { + err := housekeeping(splitRepo[0], strings.Join(splitRepo[1:], "-"), wg) + if err != nil { + log.Warningf("[%s] housekeeping failed: %v", repo, err) + } + }() + } + wg.Wait() + + err := logHK() + if err != nil { + log.Warningf("log-housekeeping failed: %v", err) + } + + // fetch updates between sync runs + b.alpmMutex.Lock() + err = alpmHandle.Release() + if err != nil { + log.Fatalf("Error releasing ALPM handle: %v", err) + } + + if err := retry.Fibonacci(ctx, 1*time.Second, func(ctx context.Context) error { + if err := setupChroot(); err != nil { + log.Warningf("Unable to upgrade chroot, trying again later.") + return retry.RetryableError(err) + } + return nil + }); err != nil { + log.Fatal(err) + } + + alpmHandle, err = initALPM(filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot), + filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot, "/var/lib/pacman")) + if err != nil { + log.Warningf("Error while ALPM-init: %v", err) + } + b.alpmMutex.Unlock() + + // do refreshSRCINFOs twice here + // since MirrorLatest depends on the DB being correct, there can be packages queued which should not be queued, + // so we check them twice to eliminate those. + log.Debugf("generating build-queue for PKGBUILDs found in %s", filepath.Join(conf.Basedir.Work, upstreamDir, "/**/PKGBUILD")) + err = b.refreshSRCINFOs(ctx, filepath.Join(conf.Basedir.Work, upstreamDir, "/**/PKGBUILD")) + if err != nil { + log.Fatalf("error refreshing PKGBUILDs: %v", err) + } + log.Debugf("regenerating build-queue for PKGBUILDs found in %s", filepath.Join(conf.Basedir.Work, upstreamDir, "/**/PKGBUILD")) + err = b.refreshSRCINFOs(ctx, filepath.Join(conf.Basedir.Work, upstreamDir, "/**/PKGBUILD")) + if err != nil { + log.Fatalf("error refreshing PKGBUILDs: %v", err) + } + + queue, err := genQueue() + if err != nil { + log.Warningf("Error building buildQueue: %v", err) + } else { + log.Debugf("buildQueue with %d items", len(queue)) + + err = b.buildQueue(queue, ctx) + if err != nil { + return err + } + } + + if ctx.Err() == nil { + for _, repo := range repos { + err = movePackagesLive(repo) + if err != nil { + log.Errorf("[%s] Error moving packages live: %v", repo, err) + } + } + } else { + return ctx.Err() + } + + log.Debugf("build-cycle finished") + time.Sleep(time.Duration(*checkInterval) * time.Minute) + } +} diff --git a/config_dist.yaml b/config_dist.yaml index a21edbc..ab230e4 100644 --- a/config_dist.yaml +++ b/config_dist.yaml @@ -62,8 +62,10 @@ build: worker: 4 makej: 8 checks: true - # builds over this threshold are considered slow (in cpu-time-seconds) - slow_queue_threshold: 14400.0 + # how much memory ALHP should use + # this will also decide how many builds will run concurrently, + # since ALHP will try to optimise the queue for speed while not going over this limit + memory_limit: "16gb" logging: level: INFO \ No newline at end of file diff --git a/ent/client.go b/ent/client.go index 55db96b..8b90239 100644 --- a/ent/client.go +++ b/ent/client.go @@ -8,12 +8,12 @@ import ( "fmt" "log" - "git.harting.dev/ALHP/ALHP.GO/ent/migrate" - - "git.harting.dev/ALHP/ALHP.GO/ent/dbpackage" + "somegit.dev/ALHP/ALHP.GO/ent/migrate" + "entgo.io/ent" "entgo.io/ent/dialect" "entgo.io/ent/dialect/sql" + "somegit.dev/ALHP/ALHP.GO/ent/dbpackage" ) // Client is the client that holds all ent builders. @@ -39,6 +39,55 @@ func (c *Client) init() { c.DbPackage = NewDbPackageClient(c.config) } +type ( + // config is the configuration for the client and its builder. + config struct { + // driver used for executing database requests. + driver dialect.Driver + // debug enable a debug logging. + debug bool + // log used for logging on debug mode. + log func(...any) + // hooks to execute on mutations. + hooks *hooks + // interceptors to execute on queries. + inters *inters + } + // Option function to configure the client. + Option func(*config) +) + +// options applies the options on the config object. +func (c *config) options(opts ...Option) { + for _, opt := range opts { + opt(c) + } + if c.debug { + c.driver = dialect.Debug(c.driver, c.log) + } +} + +// Debug enables debug logging on the ent.Driver. +func Debug() Option { + return func(c *config) { + c.debug = true + } +} + +// Log sets the logging function for debug mode. +func Log(fn func(...any)) Option { + return func(c *config) { + c.log = fn + } +} + +// Driver configures the client driver. +func Driver(driver dialect.Driver) Option { + return func(c *config) { + c.driver = driver + } +} + // Open opens a database/sql.DB specified by the driver name and // the data source name, and returns a new client attached to it. // Optional parameters can be added for configuring the client. @@ -154,7 +203,7 @@ func (c *DbPackageClient) Use(hooks ...Hook) { c.hooks.DbPackage = append(c.hooks.DbPackage, hooks...) } -// Use adds a list of query interceptors to the interceptors stack. +// Intercept adds a list of query interceptors to the interceptors stack. // A call to `Intercept(f, g, h)` equals to `dbpackage.Intercept(f(g(h())))`. func (c *DbPackageClient) Intercept(interceptors ...Interceptor) { c.inters.DbPackage = append(c.inters.DbPackage, interceptors...) @@ -255,3 +304,13 @@ func (c *DbPackageClient) mutate(ctx context.Context, m *DbPackageMutation) (Val return nil, fmt.Errorf("ent: unknown DbPackage mutation op: %q", m.Op()) } } + +// hooks and interceptors per client, for fast access. +type ( + hooks struct { + DbPackage []ent.Hook + } + inters struct { + DbPackage []ent.Interceptor + } +) diff --git a/ent/config.go b/ent/config.go deleted file mode 100644 index 1f79dde..0000000 --- a/ent/config.go +++ /dev/null @@ -1,66 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "entgo.io/ent" - "entgo.io/ent/dialect" -) - -// Option function to configure the client. -type Option func(*config) - -// Config is the configuration for the client and its builder. -type config struct { - // driver used for executing database requests. - driver dialect.Driver - // debug enable a debug logging. - debug bool - // log used for logging on debug mode. - log func(...any) - // hooks to execute on mutations. - hooks *hooks - // interceptors to execute on queries. - inters *inters -} - -// hooks and interceptors per client, for fast access. -type ( - hooks struct { - DbPackage []ent.Hook - } - inters struct { - DbPackage []ent.Interceptor - } -) - -// Options applies the options on the config object. -func (c *config) options(opts ...Option) { - for _, opt := range opts { - opt(c) - } - if c.debug { - c.driver = dialect.Debug(c.driver, c.log) - } -} - -// Debug enables debug logging on the ent.Driver. -func Debug() Option { - return func(c *config) { - c.debug = true - } -} - -// Log sets the logging function for debug mode. -func Log(fn func(...any)) Option { - return func(c *config) { - c.log = fn - } -} - -// Driver configures the client driver. -func Driver(driver dialect.Driver) Option { - return func(c *config) { - c.driver = driver - } -} diff --git a/ent/context.go b/ent/context.go deleted file mode 100644 index 7811bfa..0000000 --- a/ent/context.go +++ /dev/null @@ -1,33 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "context" -) - -type clientCtxKey struct{} - -// FromContext returns a Client stored inside a context, or nil if there isn't one. -func FromContext(ctx context.Context) *Client { - c, _ := ctx.Value(clientCtxKey{}).(*Client) - return c -} - -// NewContext returns a new context with the given Client attached. -func NewContext(parent context.Context, c *Client) context.Context { - return context.WithValue(parent, clientCtxKey{}, c) -} - -type txCtxKey struct{} - -// TxFromContext returns a Tx stored inside a context, or nil if there isn't one. -func TxFromContext(ctx context.Context) *Tx { - tx, _ := ctx.Value(txCtxKey{}).(*Tx) - return tx -} - -// NewTxContext returns a new context with the given Tx attached. -func NewTxContext(parent context.Context, tx *Tx) context.Context { - return context.WithValue(parent, txCtxKey{}, tx) -} diff --git a/ent/dbpackage.go b/ent/dbpackage.go index 1522b42..c0ca868 100644 --- a/ent/dbpackage.go +++ b/ent/dbpackage.go @@ -9,7 +9,7 @@ import ( "time" "entgo.io/ent/dialect/sql" - "git.harting.dev/ALHP/ALHP.GO/ent/dbpackage" + "somegit.dev/ALHP/ALHP.GO/ent/dbpackage" ) // DbPackage is the model entity for the DbPackage schema. @@ -359,9 +359,3 @@ func (dp *DbPackage) String() string { // DbPackages is a parsable slice of DbPackage. type DbPackages []*DbPackage - -func (dp DbPackages) config(cfg config) { - for _i := range dp { - dp[_i].config = cfg - } -} diff --git a/ent/dbpackage/where.go b/ent/dbpackage/where.go index f01c736..9b68bab 100644 --- a/ent/dbpackage/where.go +++ b/ent/dbpackage/where.go @@ -6,7 +6,7 @@ import ( "time" "entgo.io/ent/dialect/sql" - "git.harting.dev/ALHP/ALHP.GO/ent/predicate" + "somegit.dev/ALHP/ALHP.GO/ent/predicate" ) // ID filters vertices based on their ID field. diff --git a/ent/dbpackage_create.go b/ent/dbpackage_create.go index 27ecf4f..b0dde65 100644 --- a/ent/dbpackage_create.go +++ b/ent/dbpackage_create.go @@ -10,7 +10,7 @@ import ( "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" - "git.harting.dev/ALHP/ALHP.GO/ent/dbpackage" + "somegit.dev/ALHP/ALHP.GO/ent/dbpackage" ) // DbPackageCreate is the builder for creating a DbPackage entity. @@ -424,13 +424,7 @@ func (dpc *DbPackageCreate) sqlSave(ctx context.Context) (*DbPackage, error) { func (dpc *DbPackageCreate) createSpec() (*DbPackage, *sqlgraph.CreateSpec) { var ( _node = &DbPackage{config: dpc.config} - _spec = &sqlgraph.CreateSpec{ - Table: dbpackage.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: dbpackage.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(dbpackage.Table, sqlgraph.NewFieldSpec(dbpackage.FieldID, field.TypeInt)) ) if value, ok := dpc.mutation.Pkgbase(); ok { _spec.SetField(dbpackage.FieldPkgbase, field.TypeString, value) diff --git a/ent/dbpackage_delete.go b/ent/dbpackage_delete.go index 2f7b4c2..9c3d6e2 100644 --- a/ent/dbpackage_delete.go +++ b/ent/dbpackage_delete.go @@ -8,8 +8,8 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" - "git.harting.dev/ALHP/ALHP.GO/ent/dbpackage" - "git.harting.dev/ALHP/ALHP.GO/ent/predicate" + "somegit.dev/ALHP/ALHP.GO/ent/dbpackage" + "somegit.dev/ALHP/ALHP.GO/ent/predicate" ) // DbPackageDelete is the builder for deleting a DbPackage entity. @@ -40,15 +40,7 @@ func (dpd *DbPackageDelete) ExecX(ctx context.Context) int { } func (dpd *DbPackageDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: dbpackage.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: dbpackage.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(dbpackage.Table, sqlgraph.NewFieldSpec(dbpackage.FieldID, field.TypeInt)) if ps := dpd.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -69,6 +61,12 @@ type DbPackageDeleteOne struct { dpd *DbPackageDelete } +// Where appends a list predicates to the DbPackageDelete builder. +func (dpdo *DbPackageDeleteOne) Where(ps ...predicate.DbPackage) *DbPackageDeleteOne { + dpdo.dpd.mutation.Where(ps...) + return dpdo +} + // Exec executes the deletion query. func (dpdo *DbPackageDeleteOne) Exec(ctx context.Context) error { n, err := dpdo.dpd.Exec(ctx) @@ -84,5 +82,7 @@ func (dpdo *DbPackageDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (dpdo *DbPackageDeleteOne) ExecX(ctx context.Context) { - dpdo.dpd.ExecX(ctx) + if err := dpdo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/ent/dbpackage_query.go b/ent/dbpackage_query.go index 3a3c8b5..4e64aba 100644 --- a/ent/dbpackage_query.go +++ b/ent/dbpackage_query.go @@ -10,8 +10,8 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" - "git.harting.dev/ALHP/ALHP.GO/ent/dbpackage" - "git.harting.dev/ALHP/ALHP.GO/ent/predicate" + "somegit.dev/ALHP/ALHP.GO/ent/dbpackage" + "somegit.dev/ALHP/ALHP.GO/ent/predicate" ) // DbPackageQuery is the builder for querying DbPackage entities. @@ -178,10 +178,12 @@ func (dpq *DbPackageQuery) AllX(ctx context.Context) []*DbPackage { } // IDs executes the query and returns a list of DbPackage IDs. -func (dpq *DbPackageQuery) IDs(ctx context.Context) ([]int, error) { - var ids []int +func (dpq *DbPackageQuery) IDs(ctx context.Context) (ids []int, err error) { + if dpq.ctx.Unique == nil && dpq.path != nil { + dpq.Unique(true) + } ctx = setContextOp(ctx, dpq.ctx, "IDs") - if err := dpq.Select(dbpackage.FieldID).Scan(ctx, &ids); err != nil { + if err = dpq.Select(dbpackage.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -369,20 +371,12 @@ func (dpq *DbPackageQuery) sqlCount(ctx context.Context) (int, error) { } func (dpq *DbPackageQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: dbpackage.Table, - Columns: dbpackage.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: dbpackage.FieldID, - }, - }, - From: dpq.sql, - Unique: true, - } + _spec := sqlgraph.NewQuerySpec(dbpackage.Table, dbpackage.Columns, sqlgraph.NewFieldSpec(dbpackage.FieldID, field.TypeInt)) + _spec.From = dpq.sql if unique := dpq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if dpq.path != nil { + _spec.Unique = true } if fields := dpq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) diff --git a/ent/dbpackage_update.go b/ent/dbpackage_update.go index f6b2c1f..0587488 100644 --- a/ent/dbpackage_update.go +++ b/ent/dbpackage_update.go @@ -12,8 +12,8 @@ import ( "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/dialect/sql/sqljson" "entgo.io/ent/schema/field" - "git.harting.dev/ALHP/ALHP.GO/ent/dbpackage" - "git.harting.dev/ALHP/ALHP.GO/ent/predicate" + "somegit.dev/ALHP/ALHP.GO/ent/dbpackage" + "somegit.dev/ALHP/ALHP.GO/ent/predicate" ) // DbPackageUpdate is the builder for updating DbPackage entities. @@ -536,16 +536,7 @@ func (dpu *DbPackageUpdate) sqlSave(ctx context.Context) (n int, err error) { if err := dpu.check(); err != nil { return n, err } - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: dbpackage.Table, - Columns: dbpackage.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: dbpackage.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(dbpackage.Table, dbpackage.Columns, sqlgraph.NewFieldSpec(dbpackage.FieldID, field.TypeInt)) if ps := dpu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -1162,6 +1153,12 @@ func (dpuo *DbPackageUpdateOne) Mutation() *DbPackageMutation { return dpuo.mutation } +// Where appends a list predicates to the DbPackageUpdate builder. +func (dpuo *DbPackageUpdateOne) Where(ps ...predicate.DbPackage) *DbPackageUpdateOne { + dpuo.mutation.Where(ps...) + return dpuo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (dpuo *DbPackageUpdateOne) Select(field string, fields ...string) *DbPackageUpdateOne { @@ -1231,16 +1228,7 @@ func (dpuo *DbPackageUpdateOne) sqlSave(ctx context.Context) (_node *DbPackage, if err := dpuo.check(); err != nil { return _node, err } - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: dbpackage.Table, - Columns: dbpackage.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: dbpackage.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(dbpackage.Table, dbpackage.Columns, sqlgraph.NewFieldSpec(dbpackage.FieldID, field.TypeInt)) id, ok := dpuo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "DbPackage.id" for update`)} diff --git a/ent/ent.go b/ent/ent.go index 932c021..b153630 100644 --- a/ent/ent.go +++ b/ent/ent.go @@ -11,7 +11,7 @@ import ( "entgo.io/ent" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" - "git.harting.dev/ALHP/ALHP.GO/ent/dbpackage" + "somegit.dev/ALHP/ALHP.GO/ent/dbpackage" ) // ent aliases to avoid import conflicts in user's code. @@ -33,6 +33,32 @@ type ( MutateFunc = ent.MutateFunc ) +type clientCtxKey struct{} + +// FromContext returns a Client stored inside a context, or nil if there isn't one. +func FromContext(ctx context.Context) *Client { + c, _ := ctx.Value(clientCtxKey{}).(*Client) + return c +} + +// NewContext returns a new context with the given Client attached. +func NewContext(parent context.Context, c *Client) context.Context { + return context.WithValue(parent, clientCtxKey{}, c) +} + +type txCtxKey struct{} + +// TxFromContext returns a Tx stored inside a context, or nil if there isn't one. +func TxFromContext(ctx context.Context) *Tx { + tx, _ := ctx.Value(txCtxKey{}).(*Tx) + return tx +} + +// NewTxContext returns a new context with the given Tx attached. +func NewTxContext(parent context.Context, tx *Tx) context.Context { + return context.WithValue(parent, txCtxKey{}, tx) +} + // OrderFunc applies an ordering on the sql selector. type OrderFunc func(*sql.Selector) diff --git a/ent/enttest/enttest.go b/ent/enttest/enttest.go index 5589d44..b5c47da 100644 --- a/ent/enttest/enttest.go +++ b/ent/enttest/enttest.go @@ -5,12 +5,12 @@ package enttest import ( "context" - "git.harting.dev/ALHP/ALHP.GO/ent" + "somegit.dev/ALHP/ALHP.GO/ent" // required by schema hooks. - _ "git.harting.dev/ALHP/ALHP.GO/ent/runtime" + _ "somegit.dev/ALHP/ALHP.GO/ent/runtime" "entgo.io/ent/dialect/sql/schema" - "git.harting.dev/ALHP/ALHP.GO/ent/migrate" + "somegit.dev/ALHP/ALHP.GO/ent/migrate" ) type ( diff --git a/ent/hook/hook.go b/ent/hook/hook.go index 682ff0f..b8605cb 100644 --- a/ent/hook/hook.go +++ b/ent/hook/hook.go @@ -6,7 +6,7 @@ import ( "context" "fmt" - "git.harting.dev/ALHP/ALHP.GO/ent" + "somegit.dev/ALHP/ALHP.GO/ent" ) // The DbPackageFunc type is an adapter to allow the use of ordinary diff --git a/ent/mutation.go b/ent/mutation.go index 8ed0a3f..9d25529 100644 --- a/ent/mutation.go +++ b/ent/mutation.go @@ -9,11 +9,10 @@ import ( "sync" "time" - "git.harting.dev/ALHP/ALHP.GO/ent/dbpackage" - "git.harting.dev/ALHP/ALHP.GO/ent/predicate" - "entgo.io/ent" "entgo.io/ent/dialect/sql" + "somegit.dev/ALHP/ALHP.GO/ent/dbpackage" + "somegit.dev/ALHP/ALHP.GO/ent/predicate" ) const ( diff --git a/ent/runtime.go b/ent/runtime.go index 689b7ed..79e4e8c 100644 --- a/ent/runtime.go +++ b/ent/runtime.go @@ -3,8 +3,8 @@ package ent import ( - "git.harting.dev/ALHP/ALHP.GO/ent/dbpackage" - "git.harting.dev/ALHP/ALHP.GO/ent/schema" + "somegit.dev/ALHP/ALHP.GO/ent/dbpackage" + "somegit.dev/ALHP/ALHP.GO/ent/schema" ) // The init function reads all schema descriptors with runtime code diff --git a/ent/runtime/runtime.go b/ent/runtime/runtime.go index e040a60..94a2681 100644 --- a/ent/runtime/runtime.go +++ b/ent/runtime/runtime.go @@ -2,9 +2,9 @@ package runtime -// The schema-stitching logic is generated in git.harting.dev/ALHP/ALHP.GO/ent/runtime.go +// The schema-stitching logic is generated in somegit.dev/ALHP/ALHP.GO/ent/runtime.go const ( - Version = "v0.11.6" // Version of ent codegen. - Sum = "h1:fMQwhuzbPv12AXdrAGyHoOcgh9r0D9F8WEsCRoUWxVc=" // Sum of ent codegen. + Version = "v0.11.9" // Version of ent codegen. + Sum = "h1:dbbCkAiPVTRBIJwoZctiSYjB7zxQIBOzVSU5H9VYIQI=" // Sum of ent codegen. ) diff --git a/ent/schema/dbpackage.go b/ent/schema/dbpackage.go index aa91373..fa8d5cf 100644 --- a/ent/schema/dbpackage.go +++ b/ent/schema/dbpackage.go @@ -15,7 +15,8 @@ func (DbPackage) Fields() []ent.Field { return []ent.Field{ field.String("pkgbase").NotEmpty().Immutable(), field.Strings("packages").Optional(), - field.Enum("status").Values("skipped", "failed", "build", "queued", "delayed", "building", "latest", "signing", "unknown").Default("unknown").Optional(), + field.Enum("status").Values("skipped", "failed", "build", "queued", "delayed", "building", + "latest", "signing", "unknown").Default("unknown").Optional(), field.String("skip_reason").Optional(), field.Enum("repository").Values("extra", "core", "community"), field.String("march").NotEmpty().Immutable(), diff --git a/flags.yaml b/flags.yaml index d6b0bbf..43f0886 100644 --- a/flags.yaml +++ b/flags.yaml @@ -26,9 +26,9 @@ common: lto: rustflags: - - "-Clto=fat" - "-Ccodegen-units=1" - - "-Clinker-plugin-lto" options: - - "!lto": "lto" \ No newline at end of file + - "!lto": "lto" + + cargo_profile_release_lto: "fat" \ No newline at end of file diff --git a/go.mod b/go.mod index d339fa2..5c39520 100644 --- a/go.mod +++ b/go.mod @@ -1,43 +1,43 @@ -module git.harting.dev/ALHP/ALHP.GO +module somegit.dev/ALHP/ALHP.GO -go 1.18 +go 1.20 require ( - entgo.io/ent v0.11.6 - github.com/Jguer/go-alpm/v2 v2.1.2 + entgo.io/ent v0.11.9 + github.com/Jguer/go-alpm/v2 v2.2.0 github.com/Morganamilo/go-pacmanconf v0.0.0-20210502114700-cff030e927a5 github.com/Morganamilo/go-srcinfo v1.0.0 + github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b github.com/google/uuid v1.3.0 - github.com/jackc/pgx/v4 v4.17.2 + github.com/jackc/pgx/v4 v4.18.1 github.com/otiai10/copy v1.9.0 github.com/sethvargo/go-retry v0.2.4 github.com/sirupsen/logrus v1.9.0 github.com/wercker/journalhook v0.0.0-20180428041537-5d0a5ae867b3 - golang.org/x/sync v0.1.0 gopkg.in/yaml.v2 v2.4.0 lukechampine.com/blake3 v1.1.7 ) require ( - ariga.io/atlas v0.9.1-0.20230119123307-a3ab6808892b // indirect + ariga.io/atlas v0.9.1 // indirect github.com/agext/levenshtein v1.2.3 // indirect github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect github.com/go-openapi/inflect v0.19.0 // indirect github.com/google/go-cmp v0.5.9 // indirect - github.com/hashicorp/hcl/v2 v2.15.0 // indirect + github.com/hashicorp/hcl/v2 v2.16.2 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect - github.com/jackc/pgconn v1.13.0 // indirect + github.com/jackc/pgconn v1.14.0 // indirect github.com/jackc/pgio v1.0.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect - github.com/jackc/pgproto3/v2 v2.3.1 // indirect + github.com/jackc/pgproto3/v2 v2.3.2 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect - github.com/jackc/pgtype v1.13.0 // indirect - github.com/klauspost/cpuid/v2 v2.2.3 // indirect + github.com/jackc/pgtype v1.14.0 // indirect + github.com/klauspost/cpuid/v2 v2.2.4 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect - github.com/zclconf/go-cty v1.12.1 // indirect - golang.org/x/crypto v0.5.0 // indirect - golang.org/x/mod v0.7.0 // indirect - golang.org/x/sys v0.4.0 // indirect - golang.org/x/text v0.6.0 // indirect + github.com/zclconf/go-cty v1.13.0 // indirect + golang.org/x/crypto v0.7.0 // indirect + golang.org/x/mod v0.9.0 // indirect + golang.org/x/sys v0.6.0 // indirect + golang.org/x/text v0.8.0 // indirect ) diff --git a/go.sum b/go.sum index f56c870..2f98017 100644 --- a/go.sum +++ b/go.sum @@ -1,11 +1,11 @@ -ariga.io/atlas v0.9.1-0.20230119123307-a3ab6808892b h1:f1868Z/5iWzfVMgjOBwjjP/mRCxOSbXtAl+9DAYb4kg= -ariga.io/atlas v0.9.1-0.20230119123307-a3ab6808892b/go.mod h1:T230JFcENj4ZZzMkZrXFDSkv+2kXkUgpJ5FQQ5hMcKU= -entgo.io/ent v0.11.6 h1:fMQwhuzbPv12AXdrAGyHoOcgh9r0D9F8WEsCRoUWxVc= -entgo.io/ent v0.11.6/go.mod h1:d4yUWiwY3NQtjGvINzAhUyypopfeEKOxcxLN7D5yM7o= +ariga.io/atlas v0.9.1 h1:EpoPMnwsQG0vn9c0sYExpwSYtr7bvuSUXzQclU2pMjc= +ariga.io/atlas v0.9.1/go.mod h1:T230JFcENj4ZZzMkZrXFDSkv+2kXkUgpJ5FQQ5hMcKU= +entgo.io/ent v0.11.9 h1:dbbCkAiPVTRBIJwoZctiSYjB7zxQIBOzVSU5H9VYIQI= +entgo.io/ent v0.11.9/go.mod h1:KWHOcDZn1xk3mz3ipWdKrQpMvwqa/9B69TUuAPP9W6g= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= -github.com/Jguer/go-alpm/v2 v2.1.2 h1:CGTIxzuEpT9Q3a7IBrx0E6acoYoaHX2Z93UOApPDhgU= -github.com/Jguer/go-alpm/v2 v2.1.2/go.mod h1:uLQcTMNM904dRiGU+/JDtDdd7Nd8mVbEVaHjhmziT7w= +github.com/Jguer/go-alpm/v2 v2.2.0 h1:+sh4UEZwTpcAO+vHdySsnLZSnLZIBun8j85BbPExSlg= +github.com/Jguer/go-alpm/v2 v2.2.0/go.mod h1:uLQcTMNM904dRiGU+/JDtDdd7Nd8mVbEVaHjhmziT7w= github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Morganamilo/go-pacmanconf v0.0.0-20210502114700-cff030e927a5 h1:TMscPjkb1ThXN32LuFY5bEYIcXZx3YlwzhS1GxNpn/c= @@ -16,6 +16,8 @@ github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7l github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= +github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b h1:6+ZFm0flnudZzdSE0JxlhR2hKnGPcNB35BjQf4RYQDY= +github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= @@ -39,8 +41,8 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/hashicorp/hcl/v2 v2.15.0 h1:CPDXO6+uORPjKflkWCCwoWc9uRp+zSIPcCQ+BrxV7m8= -github.com/hashicorp/hcl/v2 v2.15.0/go.mod h1:JRmR89jycNkrrqnMmvPDMd56n1rQJ2Q6KocSLCMCXng= +github.com/hashicorp/hcl/v2 v2.16.2 h1:mpkHZh/Tv+xet3sy3F9Ld4FyI2tUpWe9x3XtPx9f1a0= +github.com/hashicorp/hcl/v2 v2.16.2/go.mod h1:JRmR89jycNkrrqnMmvPDMd56n1rQJ2Q6KocSLCMCXng= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= @@ -51,8 +53,8 @@ github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsU github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= -github.com/jackc/pgconn v1.13.0 h1:3L1XMNV2Zvca/8BYhzcRFS70Lr0WlDg16Di6SFGAbys= -github.com/jackc/pgconn v1.13.0/go.mod h1:AnowpAqO4CMIIJNZl2VJp+KrkAZciAkhEl0W0JIobpI= +github.com/jackc/pgconn v1.14.0 h1:vrbA9Ud87g6JdFWkHTJXppVce58qPIdP7N8y0Ml/A7Q= +github.com/jackc/pgconn v1.14.0/go.mod h1:9mBNlny0UvkgJdCDvdVHYSjI+8tD2rnKK69Wz8ti++E= github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= @@ -68,8 +70,8 @@ github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvW github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.3.1 h1:nwj7qwf0S+Q7ISFfBndqeLwSwxs+4DPsbRFjECT1Y4Y= -github.com/jackc/pgproto3/v2 v2.3.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.3.2 h1:7eY55bdBeCz1F2fTzSz69QC+pG46jYq9/jtSPiJ5nn0= +github.com/jackc/pgproto3/v2 v2.3.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= @@ -77,23 +79,22 @@ github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01C github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= -github.com/jackc/pgtype v1.12.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= -github.com/jackc/pgtype v1.13.0 h1:XkIc7A+1BmZD19bB2NxrtjJweHxQ9agqvM+9URc68Cg= -github.com/jackc/pgtype v1.13.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw= +github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= -github.com/jackc/pgx/v4 v4.17.2 h1:0Ut0rpeKwvIVbMQ1KbMBU4h6wxehBI535LK6Flheh8E= -github.com/jackc/pgx/v4 v4.17.2/go.mod h1:lcxIZN44yMIrWI78a5CpucdD14hX0SBDbNRvjDBItsw= +github.com/jackc/pgx/v4 v4.18.1 h1:YP7G1KABtKpB5IHrO9vYwSrCOhs7p3uqhvhhQBptya0= +github.com/jackc/pgx/v4 v4.18.1/go.mod h1:FydWkUyadDmdNH/mHnGob881GawxeEm7TcMCzkb+qQE= github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU= -github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= +github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= @@ -146,6 +147,7 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -153,11 +155,13 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/wercker/journalhook v0.0.0-20180428041537-5d0a5ae867b3 h1:shC1HB1UogxN5Ech3Yqaaxj1X/P656PPCB4RbojIJqc= github.com/wercker/journalhook v0.0.0-20180428041537-5d0a5ae867b3/go.mod h1:XCsSkdKK4gwBMNrOCZWww0pX6AOt+2gYc5Z6jBRrNVg= -github.com/zclconf/go-cty v1.12.1 h1:PcupnljUm9EIvbgSHQnHhUr3fO6oFmkOrvs2BAFNXXY= -github.com/zclconf/go-cty v1.12.1/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zclconf/go-cty v1.13.0 h1:It5dfKTTZHe9aeppbNOda3mN7Ag7sg6QkBNm6TkyFa0= +github.com/zclconf/go-cty v1.13.0/go.mod h1:YKQzy/7pZ7iq2jNFzy5go57xdxdWoLLpaEp4u238AE0= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -179,23 +183,25 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= -golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -207,22 +213,27 @@ golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -230,7 +241,9 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/housekeeping.go b/housekeeping.go new file mode 100644 index 0000000..4dfa088 --- /dev/null +++ b/housekeeping.go @@ -0,0 +1,265 @@ +package main + +import ( + "context" + log "github.com/sirupsen/logrus" + "os" + "path/filepath" + "somegit.dev/ALHP/ALHP.GO/ent" + "somegit.dev/ALHP/ALHP.GO/ent/dbpackage" + "strings" + "sync" + "time" +) + +func housekeeping(repo, march string, wg *sync.WaitGroup) error { + defer wg.Done() + fullRepo := repo + "-" + march + log.Debugf("[%s] Start housekeeping", fullRepo) + packages, err := Glob(filepath.Join(conf.Basedir.Repo, fullRepo, "/**/*.pkg.tar.zst")) + if err != nil { + return err + } + + log.Debugf("[HK/%s] removing orphans, signature check", fullRepo) + for _, path := range packages { + mPackage := Package(path) + + dbPkg, err := mPackage.DBPackage(db) + if ent.IsNotFound(err) { + log.Infof("[HK/%s] removing orphan %s", fullRepo, filepath.Base(path)) + pkg := &ProtoPackage{ + FullRepo: mPackage.FullRepo(), + PkgFiles: []string{path}, + March: mPackage.MArch(), + } + buildManager.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg} + continue + } else if err != nil { + log.Warningf("[HK/%s] Problem fetching package from db for %s: %v", fullRepo, path, err) + continue + } + + pkg := &ProtoPackage{ + Pkgbase: dbPkg.Pkgbase, + Repo: mPackage.Repo(), + FullRepo: mPackage.FullRepo(), + DBPackage: dbPkg, + March: mPackage.MArch(), + Arch: mPackage.Arch(), + } + + var upstream string + switch pkg.DBPackage.Repository { + case dbpackage.RepositoryCore, dbpackage.RepositoryExtra: + upstream = "upstream-core-extra" + case dbpackage.RepositoryCommunity: + upstream = "upstream-community" + } + pkg.Pkgbuild = filepath.Join(conf.Basedir.Work, upstreamDir, upstream, dbPkg.Pkgbase, "repos", + pkg.DBPackage.Repository.String()+"-"+conf.Arch, "PKGBUILD") + + // check if package is still part of repo + dbs, err := alpmHandle.SyncDBs() + if err != nil { + return err + } + buildManager.alpmMutex.Lock() + pkgResolved, err := dbs.FindSatisfier(mPackage.Name()) + buildManager.alpmMutex.Unlock() + if err != nil || pkgResolved.DB().Name() != pkg.DBPackage.Repository.String() || pkgResolved.DB().Name() != pkg.Repo.String() || + pkgResolved.Architecture() != pkg.Arch || pkgResolved.Name() != mPackage.Name() { + // package not found on mirror/db -> not part of any repo anymore + log.Infof("[HK/%s/%s] not included in repo", pkg.FullRepo, mPackage.Name()) + buildManager.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg} + err = db.DbPackage.DeleteOne(pkg.DBPackage).Exec(context.Background()) + if err != nil { + return err + } + continue + } + + if pkg.DBPackage.LastVerified.Before(pkg.DBPackage.BuildTimeStart) { + err := pkg.DBPackage.Update().SetLastVerified(time.Now().UTC()).Exec(context.Background()) + if err != nil { + return err + } + // check if pkg signature is valid + valid, err := mPackage.HasValidSignature() + if err != nil { + return err + } + if !valid { + log.Infof("[HK/%s/%s] invalid package signature", pkg.FullRepo, pkg.Pkgbase) + buildManager.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg} + continue + } + } + + // compare db-version with repo version + repoVer, err := pkg.repoVersion() + if err == nil && repoVer != dbPkg.RepoVersion { + log.Infof("[HK/%s/%s] update %s->%s in db", pkg.FullRepo, pkg.Pkgbase, dbPkg.RepoVersion, repoVer) + pkg.DBPackage, err = pkg.DBPackage.Update().SetRepoVersion(repoVer).ClearHash().Save(context.Background()) + if err != nil { + return err + } + } + } + + // check all packages from db for existence + dbPackages, err := db.DbPackage.Query().Where( + dbpackage.And( + dbpackage.RepositoryEQ(dbpackage.Repository(repo)), + dbpackage.March(march), + )).All(context.Background()) + if err != nil { + return err + } + + log.Debugf("[HK/%s] checking %d existing package-files", fullRepo, len(dbPackages)) + + for _, dbPkg := range dbPackages { + pkg := &ProtoPackage{ + Pkgbase: dbPkg.Pkgbase, + Repo: dbPkg.Repository, + March: dbPkg.March, + FullRepo: dbPkg.Repository.String() + "-" + dbPkg.March, + DBPackage: dbPkg, + } + + if !pkg.isAvailable(alpmHandle) { + log.Infof("[HK/%s/%s] not found on mirror, removing", pkg.FullRepo, pkg.Pkgbase) + err = db.DbPackage.DeleteOne(dbPkg).Exec(context.Background()) + if err != nil { + log.Errorf("[HK] Error deleting package %s: %v", dbPkg.Pkgbase, err) + } + continue + } + + switch { + case dbPkg.Status == dbpackage.StatusLatest && dbPkg.RepoVersion != "": + var existingSplits []string + var missingSplits []string + for _, splitPkg := range dbPkg.Packages { + pkgFile := filepath.Join(conf.Basedir.Repo, fullRepo, "os", conf.Arch, + splitPkg+"-"+dbPkg.RepoVersion+"-"+conf.Arch+".pkg.tar.zst") + _, err = os.Stat(pkgFile) + switch { + case os.IsNotExist(err): + missingSplits = append(missingSplits, splitPkg) + case err != nil: + log.Warningf("[HK] error reading package-file %s: %v", splitPkg, err) + default: + existingSplits = append(existingSplits, pkgFile) + } + } + if len(missingSplits) > 0 { + log.Infof("[HK/%s] missing split-package(s) %s for pkgbase %s", fullRepo, missingSplits, dbPkg.Pkgbase) + pkg.DBPackage, err = pkg.DBPackage.Update().ClearRepoVersion().ClearHash().SetStatus(dbpackage.StatusQueued).Save(context.Background()) + if err != nil { + return err + } + + pkg := &ProtoPackage{ + FullRepo: fullRepo, + PkgFiles: existingSplits, + March: march, + DBPackage: dbPkg, + } + buildManager.repoPurge[fullRepo] <- []*ProtoPackage{pkg} + } + case dbPkg.Status == dbpackage.StatusLatest && dbPkg.RepoVersion == "": + log.Infof("[HK] reseting missing package %s with no repo version", dbPkg.Pkgbase) + err = dbPkg.Update().SetStatus(dbpackage.StatusQueued).ClearHash().ClearRepoVersion().Exec(context.Background()) + if err != nil { + return err + } + case dbPkg.Status == dbpackage.StatusSkipped && dbPkg.RepoVersion != "" && strings.HasPrefix(dbPkg.SkipReason, "blacklisted"): + log.Infof("[HK] delete blacklisted package %s", dbPkg.Pkgbase) + pkg := &ProtoPackage{ + FullRepo: fullRepo, + March: march, + DBPackage: dbPkg, + } + buildManager.repoPurge[fullRepo] <- []*ProtoPackage{pkg} + } + } + + log.Debugf("[HK/%s] all tasks finished", fullRepo) + return nil +} + +func logHK() error { + // check if package for log exists and if error can be fixed by rebuild + logFiles, err := Glob(filepath.Join(conf.Basedir.Repo, logDir, "/**/*.log")) + if err != nil { + return err + } + + for _, logFile := range logFiles { + pathSplit := strings.Split(logFile, string(filepath.Separator)) + extSplit := strings.Split(filepath.Base(logFile), ".") + pkgbase := strings.Join(extSplit[:len(extSplit)-1], ".") + march := pathSplit[len(pathSplit)-2] + + pkg := ProtoPackage{ + Pkgbase: pkgbase, + March: march, + } + + if exists, err := pkg.exists(); err != nil { + return err + } else if !exists { + _ = os.Remove(logFile) + continue + } + + pkgSkipped, err := db.DbPackage.Query().Where( + dbpackage.Pkgbase(pkg.Pkgbase), + dbpackage.March(pkg.March), + dbpackage.StatusEQ(dbpackage.StatusSkipped), + ).Exist(context.Background()) + if err != nil { + return err + } + + if pkgSkipped { + _ = os.Remove(logFile) + continue + } + + logContent, err := os.ReadFile(logFile) + if err != nil { + return err + } + sLogContent := string(logContent) + + if rePortError.MatchString(sLogContent) || reSigError.MatchString(sLogContent) || reDownloadError.MatchString(sLogContent) { + rows, err := db.DbPackage.Update().Where(dbpackage.And(dbpackage.Pkgbase(pkg.Pkgbase), dbpackage.March(pkg.March), + dbpackage.StatusEQ(dbpackage.StatusFailed))).ClearHash().SetStatus(dbpackage.StatusQueued).Save(context.Background()) + if err != nil { + return err + } + + if rows > 0 { + log.Infof("[HK/%s/%s] fixable build-error detected, requeueing package (%d)", pkg.March, pkg.Pkgbase, rows) + } + } else if reLdError.MatchString(sLogContent) || reRustLTOError.MatchString(sLogContent) { + rows, err := db.DbPackage.Update().Where( + dbpackage.Pkgbase(pkg.Pkgbase), + dbpackage.March(pkg.March), + dbpackage.StatusEQ(dbpackage.StatusFailed), + dbpackage.LtoNotIn(dbpackage.LtoAutoDisabled, dbpackage.LtoDisabled), + ).ClearHash().SetStatus(dbpackage.StatusQueued).SetLto(dbpackage.LtoAutoDisabled).Save(context.Background()) + if err != nil { + return err + } + + if rows > 0 { + log.Infof("[HK/%s/%s] fixable build-error detected (linker-error), requeueing package (%d)", pkg.March, pkg.Pkgbase, rows) + } + } + } + return nil +} diff --git a/main.go b/main.go index 87207b1..d2c3319 100644 --- a/main.go +++ b/main.go @@ -5,27 +5,18 @@ import ( "entgo.io/ent/dialect" "entgo.io/ent/dialect/sql" "flag" - "fmt" - "git.harting.dev/ALHP/ALHP.GO/ent" - "git.harting.dev/ALHP/ALHP.GO/ent/dbpackage" - "git.harting.dev/ALHP/ALHP.GO/ent/migrate" "github.com/Jguer/go-alpm/v2" _ "github.com/jackc/pgx/v4/stdlib" - "github.com/sethvargo/go-retry" log "github.com/sirupsen/logrus" "github.com/wercker/journalhook" - "golang.org/x/sync/semaphore" "gopkg.in/yaml.v2" - "html/template" - "math" "os" - "os/exec" "os/signal" "path/filepath" - "strings" + "somegit.dev/ALHP/ALHP.GO/ent" + "somegit.dev/ALHP/ALHP.GO/ent/migrate" "sync" "syscall" - "time" ) var ( @@ -38,447 +29,6 @@ var ( checkInterval = flag.Int("interval", 5, "How often svn2git should be checked in minutes (default: 5)") ) -func (b *BuildManager) htmlWorker(ctx context.Context) { - type Pkg struct { - Pkgbase string - Status string - Class string - Skip string - Version string - Svn2GitVersion string - BuildDate string - BuildDuration time.Duration - Checked string - Log string - LTO bool - LTOUnknown bool - LTODisabled bool - LTOAutoDisabled bool - DebugSym bool - DebugSymNotAvailable bool - DebugSymUnknown bool - } - - type Repo struct { - Name string - Packages []Pkg - } - - type March struct { - Name string - Repos []Repo - } - - type tpl struct { - March []March - Generated string - Latest int - Failed int - Skipped int - Queued int - LTOEnabled int - LTOUnknown int - LTODisabled int - } - - for { - gen := &tpl{} - - for _, march := range conf.March { - addMarch := March{ - Name: march, - } - - for _, repo := range conf.Repos { - addRepo := Repo{ - Name: repo, - } - - pkgs := db.DbPackage.Query().Order(ent.Asc(dbpackage.FieldPkgbase)). - Where(dbpackage.MarchEQ(march), dbpackage.RepositoryEQ(dbpackage.Repository(repo))).AllX(ctx) - - for _, pkg := range pkgs { - addPkg := Pkg{ - Pkgbase: pkg.Pkgbase, - Status: strings.ToUpper(pkg.Status.String()), - Class: statusID2string(pkg.Status), - Skip: pkg.SkipReason, - Version: pkg.RepoVersion, - Svn2GitVersion: pkg.Version, - } - - if pkg.STime != nil && pkg.UTime != nil { - addPkg.BuildDuration = time.Duration(*pkg.STime+*pkg.UTime) * time.Second - } - - if !pkg.BuildTimeStart.IsZero() { - addPkg.BuildDate = pkg.BuildTimeStart.UTC().Format(time.RFC1123) - } - - if !pkg.Updated.IsZero() { - addPkg.Checked = pkg.Updated.UTC().Format(time.RFC1123) - } - - if pkg.Status == dbpackage.StatusFailed { - addPkg.Log = fmt.Sprintf("%s/%s/%s.log", logDir, pkg.March, pkg.Pkgbase) - } - - switch pkg.Lto { - case dbpackage.LtoUnknown: - if pkg.Status != dbpackage.StatusSkipped && pkg.Status != dbpackage.StatusFailed { - addPkg.LTOUnknown = true - } - case dbpackage.LtoEnabled: - addPkg.LTO = true - case dbpackage.LtoDisabled: - addPkg.LTODisabled = true - case dbpackage.LtoAutoDisabled: - addPkg.LTOAutoDisabled = true - } - - switch pkg.DebugSymbols { - case dbpackage.DebugSymbolsUnknown: - if pkg.Status != dbpackage.StatusSkipped && pkg.Status != dbpackage.StatusFailed { - addPkg.DebugSymUnknown = true - } - case dbpackage.DebugSymbolsAvailable: - addPkg.DebugSym = true - case dbpackage.DebugSymbolsNotAvailable: - addPkg.DebugSymNotAvailable = true - } - - addRepo.Packages = append(addRepo.Packages, addPkg) - } - addMarch.Repos = append(addMarch.Repos, addRepo) - } - gen.March = append(gen.March, addMarch) - } - - gen.Generated = time.Now().UTC().Format(time.RFC1123) - - var v []struct { - Status dbpackage.Status `json:"status"` - Count int `json:"count"` - } - - db.DbPackage.Query().GroupBy(dbpackage.FieldStatus).Aggregate(ent.Count()).ScanX(ctx, &v) - - for _, c := range v { - switch c.Status { - case dbpackage.StatusFailed: - gen.Failed = c.Count - case dbpackage.StatusSkipped: - gen.Skipped = c.Count - case dbpackage.StatusLatest: - gen.Latest = c.Count - case dbpackage.StatusQueued: - gen.Queued = c.Count - } - } - - var v2 []struct { - Status dbpackage.Lto `json:"lto"` - Count int `json:"count"` - } - - db.DbPackage.Query().Where(dbpackage.StatusNEQ(dbpackage.StatusSkipped)). - GroupBy(dbpackage.FieldLto).Aggregate(ent.Count()).ScanX(ctx, &v2) - - for _, c := range v2 { - switch c.Status { - case dbpackage.LtoUnknown: - gen.LTOUnknown = c.Count - case dbpackage.LtoDisabled, dbpackage.LtoAutoDisabled: - gen.LTODisabled += c.Count - case dbpackage.LtoEnabled: - gen.LTOEnabled = c.Count - } - } - - statusTpl, err := template.ParseFiles("tpl/packages.html") - if err != nil { - log.Warningf("[HTML] Error parsing template file: %v", err) - continue - } - - f, err := os.OpenFile(filepath.Join(conf.Basedir.Repo, "packages.html"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644) - if err != nil { - log.Warningf("[HTML] Erro ropening output file: %v", err) - continue - } - err = statusTpl.Execute(f, gen) - if err != nil { - log.Warningf("[HTML] Error filling template: %v", err) - } - _ = f.Close() - - time.Sleep(time.Minute * 5) - } -} - -func (b *BuildManager) repoWorker(repo string) { - for { - select { - case pkgL := <-b.repoAdd[repo]: - b.repoWG.Add(1) - toAdd := make([]string, 0) - for _, pkg := range pkgL { - toAdd = append(toAdd, pkg.PkgFiles...) - } - - args := []string{"-s", "-v", "-p", "-n", filepath.Join(conf.Basedir.Repo, repo, "os", conf.Arch, repo) + ".db.tar.xz"} - args = append(args, toAdd...) - cmd := exec.Command("repo-add", args...) - res, err := cmd.CombinedOutput() - log.Debug(string(res)) - if err != nil && cmd.ProcessState.ExitCode() != 1 { - log.Panicf("%s while repo-add: %v", string(res), err) - } - - for _, pkg := range pkgL { - pkg.toDBPackage(true) - if _, err := os.Stat(filepath.Join(conf.Basedir.Debug, pkg.March, - pkg.DBPackage.Packages[0]+"-debug-"+pkg.Version+"-"+conf.Arch+".pkg.tar.zst")); err == nil { - pkg.DBPackage = pkg.DBPackage.Update(). - SetStatus(dbpackage.StatusLatest). - ClearSkipReason(). - SetDebugSymbols(dbpackage.DebugSymbolsAvailable). - SetRepoVersion(pkg.Version). - SetHash(pkg.Hash). - SaveX(context.Background()) - } else { - pkg.DBPackage = pkg.DBPackage.Update(). - SetStatus(dbpackage.StatusLatest). - ClearSkipReason(). - SetDebugSymbols(dbpackage.DebugSymbolsNotAvailable). - SetRepoVersion(pkg.Version). - SetHash(pkg.Hash). - SaveX(context.Background()) - } - } - - cmd = exec.Command("paccache", "-rc", filepath.Join(conf.Basedir.Repo, repo, "os", conf.Arch), "-k", "1") //nolint:gosec - res, err = cmd.CombinedOutput() - log.Debug(string(res)) - if err != nil { - log.Warningf("Error running paccache: %v", err) - } - err = updateLastUpdated() - if err != nil { - log.Warningf("Error updating lastupdate: %v", err) - } - b.repoWG.Done() - case pkgL := <-b.repoPurge[repo]: - for _, pkg := range pkgL { - if _, err := os.Stat(filepath.Join(conf.Basedir.Repo, pkg.FullRepo, "os", conf.Arch, pkg.FullRepo) + ".db.tar.xz"); err != nil { - continue - } - if len(pkg.PkgFiles) == 0 { - if err := pkg.findPkgFiles(); err != nil { - log.Warningf("[%s/%s] Unable to find files: %v", pkg.FullRepo, pkg.Pkgbase, err) - continue - } else if len(pkg.PkgFiles) == 0 { - continue - } - } - - var realPkgs []string - for _, filePath := range pkg.PkgFiles { - if _, err := os.Stat(filePath); err == nil { - realPkgs = append(realPkgs, Package(filePath).Name()) - } - } - - if len(realPkgs) == 0 { - continue - } - - b.repoWG.Add(1) - args := []string{"-s", "-v", filepath.Join(conf.Basedir.Repo, pkg.FullRepo, "os", conf.Arch, pkg.FullRepo) + ".db.tar.xz"} - args = append(args, realPkgs...) - cmd := exec.Command("repo-remove", args...) - res, err := cmd.CombinedOutput() - log.Debug(string(res)) - if err != nil && cmd.ProcessState.ExitCode() == 1 { - log.Warningf("Error while deleting package %s: %s", pkg.Pkgbase, string(res)) - } - - if pkg.DBPackage != nil { - _ = pkg.DBPackage.Update().ClearRepoVersion().ClearHash().Exec(context.Background()) - } - - for _, file := range pkg.PkgFiles { - _ = os.Remove(file) - _ = os.Remove(file + ".sig") - } - err = updateLastUpdated() - if err != nil { - log.Warningf("Error updating lastupdate: %v", err) - } - b.repoWG.Done() - } - } - } -} - -func (b *BuildManager) syncWorker(ctx context.Context) error { - err := os.MkdirAll(filepath.Join(conf.Basedir.Work, upstreamDir), 0o755) - if err != nil { - log.Fatalf("Error creating upstream dir: %v", err) - } - - for { - for gitDir, gitURL := range conf.Svn2git { - gitPath := filepath.Join(conf.Basedir.Work, upstreamDir, gitDir) - - if _, err := os.Stat(gitPath); os.IsNotExist(err) { - cmd := exec.Command("git", "clone", "--depth=1", gitURL, gitPath) - res, err := cmd.CombinedOutput() - log.Debug(string(res)) - if err != nil { - log.Fatalf("Error running git clone: %v", err) - } - } else if err == nil { - cmd := exec.Command("git", "reset", "--hard") - cmd.Dir = gitPath - res, err := cmd.CombinedOutput() - log.Debug(string(res)) - if err != nil { - log.Fatalf("Error running git reset: %v", err) - } - - cmd = exec.Command("git", "pull") - cmd.Dir = gitPath - res, err = cmd.CombinedOutput() - log.Debug(string(res)) - if err != nil { - log.Warningf("Failed to update git repo %s: %v", gitDir, err) - } - } - } - - // housekeeping - wg := new(sync.WaitGroup) - for _, repo := range repos { - wg.Add(1) - splitRepo := strings.Split(repo, "-") - repo := repo - go func() { - err := housekeeping(splitRepo[0], strings.Join(splitRepo[1:], "-"), wg) - if err != nil { - log.Warningf("[%s] housekeeping failed: %v", repo, err) - } - }() - } - wg.Wait() - - err := logHK() - if err != nil { - log.Warningf("log-housekeeping failed: %v", err) - } - - // fetch updates between sync runs - b.alpmMutex.Lock() - err = alpmHandle.Release() - if err != nil { - log.Fatalf("Error releasing ALPM handle: %v", err) - } - - if err := retry.Fibonacci(ctx, 1*time.Second, func(ctx context.Context) error { - if err := setupChroot(); err != nil { - log.Warningf("Unable to upgrade chroot, trying again later.") - return retry.RetryableError(err) - } - return nil - }); err != nil { - log.Fatal(err) - } - - alpmHandle, err = initALPM(filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot), - filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot, "/var/lib/pacman")) - if err != nil { - log.Warningf("Error while ALPM-init: %v", err) - } - b.alpmMutex.Unlock() - - // do refreshSRCINFOs twice here - // since MirrorLatest depends on the DB being correct, there can be packages queued which should not be queued, - // so we check them twice to eliminate those. - log.Debugf("generating build-queue for PKGBUILDs found in %s", filepath.Join(conf.Basedir.Work, upstreamDir, "/**/PKGBUILD")) - err = b.refreshSRCINFOs(ctx, filepath.Join(conf.Basedir.Work, upstreamDir, "/**/PKGBUILD")) - if err != nil { - log.Fatalf("error refreshing PKGBUILDs: %v", err) - } - log.Debugf("regenerating build-queue for PKGBUILDs found in %s", filepath.Join(conf.Basedir.Work, upstreamDir, "/**/PKGBUILD")) - err = b.refreshSRCINFOs(ctx, filepath.Join(conf.Basedir.Work, upstreamDir, "/**/PKGBUILD")) - if err != nil { - log.Fatalf("error refreshing PKGBUILDs: %v", err) - } - - queue, err := b.queue() - if err != nil { - log.Warningf("Error building buildQueue: %v", err) - } else { - log.Debugf("buildQueue with %d items", len(queue)) - var fastQueue []*ProtoPackage - var slowQueue []*ProtoPackage - - maxDiff := 0.0 - cutOff := 0.0 - for i := 0; i < len(queue); i++ { - if i+1 < len(queue) { - if math.Abs(queue[i].Priority()-queue[i+1].Priority()) > maxDiff { - maxDiff = math.Abs(queue[i].Priority() - queue[i+1].Priority()) - cutOff = queue[i].Priority() - } - } - } - - for _, pkg := range queue { - if pkg.Priority() > cutOff && cutOff >= conf.Build.SlowQueueThreshold { - slowQueue = append(slowQueue, pkg) - } else { - fastQueue = append(fastQueue, pkg) - } - } - - if len(fastQueue) > 0 && len(slowQueue) > 0 { - log.Infof("Skipping slowQueue=%d in favor of fastQueue=%d", len(slowQueue), len(fastQueue)) - slowQueue = []*ProtoPackage{} - } - - err = b.buildQueue(fastQueue, ctx) - if err != nil { - return err - } - - err = b.buildQueue(slowQueue, ctx) - if err != nil { - return err - } - - if err := b.sem.Acquire(ctx, int64(conf.Build.Worker)); err != nil { - return err - } - b.sem.Release(int64(conf.Build.Worker)) - } - - if ctx.Err() == nil { - for _, repo := range repos { - err = movePackagesLive(repo) - if err != nil { - log.Errorf("[%s] Error moving packages live: %v", repo, err) - } - } - } else { - return ctx.Err() - } - - log.Debugf("build-cycle finished") - time.Sleep(time.Duration(*checkInterval) * time.Minute) - } -} - func main() { killSignals := make(chan os.Signal, 1) signal.Notify(killSignals, syscall.SIGINT, syscall.SIGTERM) @@ -540,9 +90,13 @@ func main() { } buildManager = &BuildManager{ - repoPurge: make(map[string]chan []*ProtoPackage), - repoAdd: make(map[string]chan []*ProtoPackage), - sem: semaphore.NewWeighted(int64(conf.Build.Worker)), + repoPurge: make(map[string]chan []*ProtoPackage), + repoAdd: make(map[string]chan []*ProtoPackage), + queueSignal: make(chan struct{}), + alpmMutex: new(sync.RWMutex), + building: []*ProtoPackage{}, + buildingLock: new(sync.RWMutex), + repoWG: new(sync.WaitGroup), } err = setupChroot() diff --git a/package.go b/package.go index 9e0bf74..ed86fe8 100644 --- a/package.go +++ b/package.go @@ -5,11 +5,11 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqljson" "fmt" - "git.harting.dev/ALHP/ALHP.GO/ent" - "git.harting.dev/ALHP/ALHP.GO/ent/dbpackage" log "github.com/sirupsen/logrus" "os/exec" "path/filepath" + "somegit.dev/ALHP/ALHP.GO/ent" + "somegit.dev/ALHP/ALHP.GO/ent/dbpackage" "strings" ) diff --git a/proto_package.go b/proto_package.go index 9133b9a..af86f77 100644 --- a/proto_package.go +++ b/proto_package.go @@ -7,19 +7,19 @@ import ( "encoding/hex" "errors" "fmt" - "git.harting.dev/ALHP/ALHP.GO/ent" - "git.harting.dev/ALHP/ALHP.GO/ent/dbpackage" "github.com/Jguer/go-alpm/v2" "github.com/Morganamilo/go-srcinfo" + "github.com/c2h5oh/datasize" "github.com/google/uuid" "github.com/otiai10/copy" log "github.com/sirupsen/logrus" "io" - "math/rand" "net/http" "os" "os/exec" "path/filepath" + "somegit.dev/ALHP/ALHP.GO/ent" + "somegit.dev/ALHP/ALHP.GO/ent/dbpackage" "strconv" "strings" "syscall" @@ -70,6 +70,11 @@ func (p *ProtoPackage) isEligible(ctx context.Context) (bool, error) { p.DBPackage.SkipReason = "blacklisted (haskell)" p.DBPackage.Status = dbpackage.StatusSkipped skipping = true + case p.DBPackage.MaxRss != nil && datasize.ByteSize(*p.DBPackage.MaxRss)*datasize.KB > conf.Build.MemoryLimit: + log.Debugf("Skipped %s: memory limit exceeded (%s)", p.Srcinfo.Pkgbase, datasize.ByteSize(*p.DBPackage.MaxRss)*datasize.KB) + p.DBPackage.SkipReason = "memory limit exceeded" + p.DBPackage.Status = dbpackage.StatusSkipped + skipping = true case p.isPkgFailed(): log.Debugf("Skipped %s: failed build", p.Srcinfo.Pkgbase) skipping = true @@ -141,11 +146,6 @@ func (p *ProtoPackage) isEligible(ctx context.Context) (bool, error) { } func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) { - // Sleep randomly here to add some delay, avoiding two pacman instances trying to download the same package, - // which leads to errors when it's trying to remove the same temporary download file. - // This can be removed as soon as we can pass separate cache locations to makechrootpkg. - rand.Seed(time.Now().UnixNano()) - time.Sleep(time.Duration(rand.Float32()*60) * time.Second) //nolint:gomnd,gosec start := time.Now().UTC() chroot := "build_" + uuid.New().String() @@ -347,18 +347,6 @@ func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) { return time.Since(start), nil } -func (p *ProtoPackage) Priority() float64 { - if p.DBPackage == nil { - return 0 - } - - if p.DBPackage.STime == nil || p.DBPackage.UTime == nil { - return 0 - } else { - return float64(*p.DBPackage.STime + *p.DBPackage.UTime) - } -} - func (p *ProtoPackage) setupBuildDir() (string, error) { buildDir := filepath.Join(conf.Basedir.Work, buildDir, p.March, p.Pkgbase+"-"+p.Version) @@ -835,3 +823,7 @@ func (p *ProtoPackage) isMirrorLatest(h *alpm.Handle) (latest bool, foundPkg alp return true, nil, "", nil } + +func (p *ProtoPackage) PkgbaseEquals(p2 *ProtoPackage, marchSensitive bool) bool { + return (marchSensitive && (p.Pkgbase == p2.Pkgbase && p.FullRepo == p2.FullRepo)) || (!marchSensitive && p.Pkgbase == p2.Pkgbase) +} diff --git a/utils.go b/utils.go index 1d59dd9..f0f6c50 100644 --- a/utils.go +++ b/utils.go @@ -4,13 +4,11 @@ import ( "context" "encoding/hex" "fmt" - "git.harting.dev/ALHP/ALHP.GO/ent" - "git.harting.dev/ALHP/ALHP.GO/ent/dbpackage" "github.com/Jguer/go-alpm/v2" paconf "github.com/Morganamilo/go-pacmanconf" "github.com/Morganamilo/go-srcinfo" + "github.com/c2h5oh/datasize" log "github.com/sirupsen/logrus" - "golang.org/x/sync/semaphore" "gopkg.in/yaml.v2" "io" "io/fs" @@ -19,11 +17,9 @@ import ( "os/exec" "path/filepath" "regexp" - "runtime" - "sort" + "somegit.dev/ALHP/ALHP.GO/ent/dbpackage" "strconv" "strings" - "sync" "time" ) @@ -57,14 +53,6 @@ var ( reRustLTOError = regexp.MustCompile(`(?m)^error: options \x60-C (.+)\x60 and \x60-C lto\x60 are incompatible$`) ) -type BuildManager struct { - repoPurge map[string]chan []*ProtoPackage - repoAdd map[string]chan []*ProtoPackage - repoWG sync.WaitGroup - alpmMutex sync.RWMutex - sem *semaphore.Weighted -} - type Conf struct { Arch string Repos, March []string @@ -77,10 +65,9 @@ type Conf struct { ConnectTo string `yaml:"connect_to"` } `yaml:"db"` Build struct { - Worker int - Makej int - Checks bool - SlowQueueThreshold float64 `yaml:"slow_queue_threshold"` + Makej int + Checks bool + MemoryLimit datasize.ByteSize `yaml:"memory_limit"` } Logging struct { Level string @@ -119,88 +106,6 @@ func updateLastUpdated() error { return nil } -func (b *BuildManager) refreshSRCINFOs(ctx context.Context, path string) error { - pkgBuilds, err := Glob(path) - if err != nil { - return fmt.Errorf("error scanning for PKGBUILDs: %w", err) - } - - step := int(float32(len(pkgBuilds)) / float32(runtime.NumCPU())) - cur := 0 - wg := sync.WaitGroup{} - for i := 0; i < runtime.NumCPU(); i++ { - if i == runtime.NumCPU()-1 { - step = len(pkgBuilds) - cur - } - - wg.Add(1) - go func(pkgBuilds []string) { - defer wg.Done() - for _, pkgbuild := range pkgBuilds { - mPkgbuild := PKGBUILD(pkgbuild) - if mPkgbuild.FullRepo() == "trunk" || !Contains(conf.Repos, mPkgbuild.Repo()) || - containsSubStr(mPkgbuild.FullRepo(), conf.Blacklist.Repo) { - continue - } - - for _, march := range conf.March { - dbPkg, dbErr := db.DbPackage.Query().Where( - dbpackage.And( - dbpackage.Pkgbase(mPkgbuild.PkgBase()), - dbpackage.RepositoryEQ(dbpackage.Repository(mPkgbuild.Repo())), - dbpackage.March(march), - ), - ).Only(context.Background()) - - if ent.IsNotFound(dbErr) { - log.Debugf("[%s/%s] Package not found in database", mPkgbuild.Repo(), mPkgbuild.PkgBase()) - } else if err != nil { - log.Errorf("[%s/%s] Problem querying db for package: %v", mPkgbuild.Repo(), mPkgbuild.PkgBase(), dbErr) - } - - // compare b3sum of PKGBUILD file to hash in database, only proceed if hash differs - // reduces the amount of PKGBUILDs that need to be parsed with makepkg, which is _really_ slow, significantly - b3s, err := b3sum(pkgbuild) - if err != nil { - log.Fatalf("Error hashing PKGBUILD: %v", err) - } - - if dbPkg != nil && b3s == dbPkg.Hash { - log.Debugf("[%s/%s] Skipped: PKGBUILD hash matches db (%s)", mPkgbuild.Repo(), mPkgbuild.PkgBase(), b3s) - continue - } else if dbPkg != nil && b3s != dbPkg.Hash && dbPkg.SrcinfoHash != b3s { - log.Debugf("[%s/%s] srcinfo cleared", mPkgbuild.Repo(), mPkgbuild.PkgBase()) - dbPkg = dbPkg.Update().ClearSrcinfo().SaveX(context.Background()) - } - - proto := &ProtoPackage{ - Pkgbuild: pkgbuild, - Pkgbase: mPkgbuild.PkgBase(), - Repo: dbpackage.Repository(mPkgbuild.Repo()), - March: march, - FullRepo: mPkgbuild.Repo() + "-" + march, - Hash: b3s, - DBPackage: dbPkg, - } - - _, err = proto.isEligible(ctx) - if err != nil { - log.Infof("Unable to determine status for package %s: %v", proto.Pkgbase, err) - b.repoPurge[proto.FullRepo] <- []*ProtoPackage{proto} - } else if proto.DBPackage != nil { - proto.DBPackage.Update().SetPkgbuild(proto.Pkgbuild).ExecX(ctx) - } - } - } - }(pkgBuilds[cur : cur+step]) - - cur += step - } - - wg.Wait() - return nil -} - func statusID2string(s dbpackage.Status) string { switch s { case dbpackage.StatusSkipped: @@ -269,37 +174,17 @@ func cleanBuildDir(dir, chrootDir string) error { return nil } -func (b *BuildManager) queue() ([]*ProtoPackage, error) { - unsortedQueue, err := genQueue() - if err != nil { - return nil, fmt.Errorf("error building queue: %w", err) - } - - sort.Slice(unsortedQueue, func(i, j int) bool { - return unsortedQueue[i].Priority() < unsortedQueue[j].Priority() - }) - - return unsortedQueue, nil -} - -func (b *BuildManager) buildQueue(queue []*ProtoPackage, ctx context.Context) error { - for _, pkg := range queue { - if err := b.sem.Acquire(ctx, 1); err != nil { - return err +func pkgList2MaxMem(pkgList []*ProtoPackage) datasize.ByteSize { + var sum uint64 + for _, pkg := range pkgList { + if pkg.DBPackage.MaxRss != nil { + sum += uint64(*pkg.DBPackage.MaxRss) } - - go func(pkg *ProtoPackage) { - defer b.sem.Release(1) - dur, err := pkg.build(ctx) - if err != nil { - log.Warningf("error building package %s->%s->%s in %s: %s", pkg.March, pkg.Repo, pkg.Pkgbase, dur, err) - b.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg} - } else { - log.Infof("Build successful: %s (%s)", pkg.Pkgbase, dur) - } - }(pkg) } - return nil + + // multiply by Kibibyte here, since rusage is in kb + // https://man.archlinux.org/man/core/man-pages/getrusage.2.en#ru_maxrss + return datasize.ByteSize(sum) * datasize.KB } func genQueue() ([]*ProtoPackage, error) { @@ -497,258 +382,6 @@ func setupChroot() error { return nil } -func housekeeping(repo, march string, wg *sync.WaitGroup) error { - defer wg.Done() - fullRepo := repo + "-" + march - log.Debugf("[%s] Start housekeeping", fullRepo) - packages, err := Glob(filepath.Join(conf.Basedir.Repo, fullRepo, "/**/*.pkg.tar.zst")) - if err != nil { - return err - } - - log.Debugf("[HK/%s] removing orphans, signature check", fullRepo) - for _, path := range packages { - mPackage := Package(path) - - dbPkg, err := mPackage.DBPackage(db) - if ent.IsNotFound(err) { - log.Infof("[HK/%s] removing orphan %s", fullRepo, filepath.Base(path)) - pkg := &ProtoPackage{ - FullRepo: mPackage.FullRepo(), - PkgFiles: []string{path}, - March: mPackage.MArch(), - } - buildManager.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg} - continue - } else if err != nil { - log.Warningf("[HK/%s] Problem fetching package from db for %s: %v", fullRepo, path, err) - continue - } - - pkg := &ProtoPackage{ - Pkgbase: dbPkg.Pkgbase, - Repo: mPackage.Repo(), - FullRepo: mPackage.FullRepo(), - DBPackage: dbPkg, - March: mPackage.MArch(), - Arch: mPackage.Arch(), - } - - var upstream string - switch pkg.DBPackage.Repository { - case dbpackage.RepositoryCore, dbpackage.RepositoryExtra: - upstream = "upstream-core-extra" - case dbpackage.RepositoryCommunity: - upstream = "upstream-community" - } - pkg.Pkgbuild = filepath.Join(conf.Basedir.Work, upstreamDir, upstream, dbPkg.Pkgbase, "repos", - pkg.DBPackage.Repository.String()+"-"+conf.Arch, "PKGBUILD") - - // check if package is still part of repo - dbs, err := alpmHandle.SyncDBs() - if err != nil { - return err - } - buildManager.alpmMutex.Lock() - pkgResolved, err := dbs.FindSatisfier(mPackage.Name()) - buildManager.alpmMutex.Unlock() - if err != nil || pkgResolved.DB().Name() != pkg.DBPackage.Repository.String() || pkgResolved.DB().Name() != pkg.Repo.String() || - pkgResolved.Architecture() != pkg.Arch || pkgResolved.Name() != mPackage.Name() { - // package not found on mirror/db -> not part of any repo anymore - log.Infof("[HK/%s/%s] not included in repo", pkg.FullRepo, mPackage.Name()) - buildManager.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg} - err = db.DbPackage.DeleteOne(pkg.DBPackage).Exec(context.Background()) - if err != nil { - return err - } - continue - } - - if pkg.DBPackage.LastVerified.Before(pkg.DBPackage.BuildTimeStart) { - err := pkg.DBPackage.Update().SetLastVerified(time.Now().UTC()).Exec(context.Background()) - if err != nil { - return err - } - // check if pkg signature is valid - valid, err := mPackage.HasValidSignature() - if err != nil { - return err - } - if !valid { - log.Infof("[HK/%s/%s] invalid package signature", pkg.FullRepo, pkg.Pkgbase) - buildManager.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg} - continue - } - } - - // compare db-version with repo version - repoVer, err := pkg.repoVersion() - if err == nil && repoVer != dbPkg.RepoVersion { - log.Infof("[HK/%s/%s] update %s->%s in db", pkg.FullRepo, pkg.Pkgbase, dbPkg.RepoVersion, repoVer) - pkg.DBPackage, err = pkg.DBPackage.Update().SetRepoVersion(repoVer).ClearHash().Save(context.Background()) - if err != nil { - return err - } - } - } - - // check all packages from db for existence - dbPackages, err := db.DbPackage.Query().Where( - dbpackage.And( - dbpackage.RepositoryEQ(dbpackage.Repository(repo)), - dbpackage.March(march), - )).All(context.Background()) - if err != nil { - return err - } - - log.Debugf("[HK/%s] checking %d existing package-files", fullRepo, len(dbPackages)) - - for _, dbPkg := range dbPackages { - pkg := &ProtoPackage{ - Pkgbase: dbPkg.Pkgbase, - Repo: dbPkg.Repository, - March: dbPkg.March, - FullRepo: dbPkg.Repository.String() + "-" + dbPkg.March, - DBPackage: dbPkg, - } - - if !pkg.isAvailable(alpmHandle) { - log.Infof("[HK/%s/%s] not found on mirror, removing", pkg.FullRepo, pkg.Pkgbase) - err = db.DbPackage.DeleteOne(dbPkg).Exec(context.Background()) - if err != nil { - log.Errorf("[HK] Error deleting package %s: %v", dbPkg.Pkgbase, err) - } - continue - } - - switch { - case dbPkg.Status == dbpackage.StatusLatest && dbPkg.RepoVersion != "": - var existingSplits []string - var missingSplits []string - for _, splitPkg := range dbPkg.Packages { - pkgFile := filepath.Join(conf.Basedir.Repo, fullRepo, "os", conf.Arch, - splitPkg+"-"+dbPkg.RepoVersion+"-"+conf.Arch+".pkg.tar.zst") - _, err = os.Stat(pkgFile) - switch { - case os.IsNotExist(err): - missingSplits = append(missingSplits, splitPkg) - case err != nil: - log.Warningf("[HK] error reading package-file %s: %v", splitPkg, err) - default: - existingSplits = append(existingSplits, pkgFile) - } - } - if len(missingSplits) > 0 { - log.Infof("[HK/%s] missing split-package(s) %s for pkgbase %s", fullRepo, missingSplits, dbPkg.Pkgbase) - pkg.DBPackage, err = pkg.DBPackage.Update().ClearRepoVersion().ClearHash().SetStatus(dbpackage.StatusQueued).Save(context.Background()) - if err != nil { - return err - } - - pkg := &ProtoPackage{ - FullRepo: fullRepo, - PkgFiles: existingSplits, - March: march, - DBPackage: dbPkg, - } - buildManager.repoPurge[fullRepo] <- []*ProtoPackage{pkg} - } - case dbPkg.Status == dbpackage.StatusLatest && dbPkg.RepoVersion == "": - log.Infof("[HK] reseting missing package %s with no repo version", dbPkg.Pkgbase) - err = dbPkg.Update().SetStatus(dbpackage.StatusQueued).ClearHash().ClearRepoVersion().Exec(context.Background()) - if err != nil { - return err - } - case dbPkg.Status == dbpackage.StatusSkipped && dbPkg.RepoVersion != "" && strings.HasPrefix(dbPkg.SkipReason, "blacklisted"): - log.Infof("[HK] delete blacklisted package %s", dbPkg.Pkgbase) - pkg := &ProtoPackage{ - FullRepo: fullRepo, - March: march, - DBPackage: dbPkg, - } - buildManager.repoPurge[fullRepo] <- []*ProtoPackage{pkg} - } - } - - log.Debugf("[HK/%s] all tasks finished", fullRepo) - return nil -} - -func logHK() error { - // check if package for log exists and if error can be fixed by rebuild - logFiles, err := Glob(filepath.Join(conf.Basedir.Repo, logDir, "/**/*.log")) - if err != nil { - return err - } - - for _, logFile := range logFiles { - pathSplit := strings.Split(logFile, string(filepath.Separator)) - extSplit := strings.Split(filepath.Base(logFile), ".") - pkgbase := strings.Join(extSplit[:len(extSplit)-1], ".") - march := pathSplit[len(pathSplit)-2] - - pkg := ProtoPackage{ - Pkgbase: pkgbase, - March: march, - } - - if exists, err := pkg.exists(); err != nil { - return err - } else if !exists { - _ = os.Remove(logFile) - continue - } - - pkgSkipped, err := db.DbPackage.Query().Where( - dbpackage.Pkgbase(pkg.Pkgbase), - dbpackage.March(pkg.March), - dbpackage.StatusEQ(dbpackage.StatusSkipped), - ).Exist(context.Background()) - if err != nil { - return err - } - - if pkgSkipped { - _ = os.Remove(logFile) - continue - } - - logContent, err := os.ReadFile(logFile) - if err != nil { - return err - } - sLogContent := string(logContent) - - if rePortError.MatchString(sLogContent) || reSigError.MatchString(sLogContent) || reDownloadError.MatchString(sLogContent) { - rows, err := db.DbPackage.Update().Where(dbpackage.And(dbpackage.Pkgbase(pkg.Pkgbase), dbpackage.March(pkg.March), - dbpackage.StatusEQ(dbpackage.StatusFailed))).ClearHash().SetStatus(dbpackage.StatusQueued).Save(context.Background()) - if err != nil { - return err - } - - if rows > 0 { - log.Infof("[HK/%s/%s] fixable build-error detected, requeueing package (%d)", pkg.March, pkg.Pkgbase, rows) - } - } else if reLdError.MatchString(sLogContent) || reRustLTOError.MatchString(sLogContent) { - rows, err := db.DbPackage.Update().Where( - dbpackage.Pkgbase(pkg.Pkgbase), - dbpackage.March(pkg.March), - dbpackage.StatusEQ(dbpackage.StatusFailed), - dbpackage.LtoNotIn(dbpackage.LtoAutoDisabled, dbpackage.LtoDisabled), - ).ClearHash().SetStatus(dbpackage.StatusQueued).SetLto(dbpackage.LtoAutoDisabled).Save(context.Background()) - if err != nil { - return err - } - - if rows > 0 { - log.Infof("[HK/%s/%s] fixable build-error detected (linker-error), requeueing package (%d)", pkg.March, pkg.Pkgbase, rows) - } - } - } - return nil -} - func syncMarchs() error { files, err := os.ReadDir(conf.Basedir.Repo) if err != nil { @@ -781,8 +414,8 @@ func syncMarchs() error { for _, repo := range conf.Repos { fRepo := fmt.Sprintf("%s-%s", repo, march) repos = append(repos, fRepo) - buildManager.repoAdd[fRepo] = make(chan []*ProtoPackage, conf.Build.Worker) - buildManager.repoPurge[fRepo] = make(chan []*ProtoPackage, 10000) //nolint:gomnd + buildManager.repoAdd[fRepo] = make(chan []*ProtoPackage, 1000) //nolint:gomnd + buildManager.repoPurge[fRepo] = make(chan []*ProtoPackage, 1000) //nolint:gomnd go buildManager.repoWorker(fRepo) if _, err := os.Stat(filepath.Join(conf.Basedir.Repo, fRepo, "os", conf.Arch)); os.IsNotExist(err) { @@ -926,6 +559,16 @@ func setupMakepkg(march string, flags map[string]any) error { return nil } +func ContainsPkg(pkgs []*ProtoPackage, pkg *ProtoPackage, repoSensitive bool) bool { + for _, tPkg := range pkgs { + if tPkg.PkgbaseEquals(pkg, repoSensitive) { + return true + } + } + + return false +} + func Contains(s any, str string) bool { switch v := s.(type) { case []string: @@ -958,19 +601,6 @@ func Find[T comparable](arr []T, match T) int { return -1 } -func Unique[T comparable](arr []T) []T { - occurred := map[T]bool{} - var result []T - for e := range arr { - if !occurred[arr[e]] { - occurred[arr[e]] = true - result = append(result, arr[e]) - } - } - - return result -} - func Replace[T comparable](arr []T, replace, with T) []T { for i, v := range arr { if v == replace {