switched to using state repo & separate package repos
This commit is contained in:
parent
786b70b842
commit
e079526d4c
280
buildmanager.go
280
buildmanager.go
@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/c2h5oh/datasize"
|
||||
"github.com/sethvargo/go-retry"
|
||||
@ -10,7 +11,6 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"somegit.dev/ALHP/ALHP.GO/ent"
|
||||
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
|
||||
"strings"
|
||||
@ -30,91 +30,6 @@ type BuildManager struct {
|
||||
queueSignal chan struct{}
|
||||
}
|
||||
|
||||
func (b *BuildManager) refreshSRCINFOs(ctx context.Context, path string) error {
|
||||
pkgBuilds, err := Glob(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error scanning for PKGBUILDs: %w", err)
|
||||
}
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
workerChan := make(chan string, runtime.NumCPU())
|
||||
|
||||
for o := 0; o < runtime.NumCPU(); o++ {
|
||||
wg.Add(1)
|
||||
go b.SRCINFOWorker(ctx, workerChan, wg)
|
||||
}
|
||||
|
||||
go func() {
|
||||
for _, pkgbuild := range pkgBuilds {
|
||||
workerChan <- pkgbuild
|
||||
}
|
||||
close(workerChan)
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BuildManager) SRCINFOWorker(ctx context.Context, workIn chan string, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
for pkgbuild := range workIn {
|
||||
mPkgbuild := PKGBUILD(pkgbuild)
|
||||
if mPkgbuild.FullRepo() == "trunk" || !Contains(conf.Repos, mPkgbuild.Repo()) ||
|
||||
containsSubStr(mPkgbuild.FullRepo(), conf.Blacklist.Repo) {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, march := range conf.March {
|
||||
dbPkg, dbErr := db.DbPackage.Query().Where(
|
||||
dbpackage.And(
|
||||
dbpackage.Pkgbase(mPkgbuild.PkgBase()),
|
||||
dbpackage.RepositoryEQ(dbpackage.Repository(mPkgbuild.Repo())),
|
||||
dbpackage.March(march),
|
||||
),
|
||||
).Only(context.Background())
|
||||
|
||||
if ent.IsNotFound(dbErr) {
|
||||
log.Debugf("[%s/%s] Package not found in database", mPkgbuild.Repo(), mPkgbuild.PkgBase())
|
||||
} else if dbErr != nil {
|
||||
log.Errorf("[%s/%s] Problem querying db for package: %v", mPkgbuild.Repo(), mPkgbuild.PkgBase(), dbErr)
|
||||
}
|
||||
|
||||
// compare b3sum of PKGBUILD file to hash in database, only proceed if hash differs
|
||||
// reduces the amount of PKGBUILDs that need to be parsed with makepkg, which is _really_ slow, significantly
|
||||
b3s, err := b3sum(pkgbuild)
|
||||
if err != nil {
|
||||
log.Errorf("Error hashing PKGBUILD: %v", err)
|
||||
}
|
||||
|
||||
if dbPkg != nil && b3s == dbPkg.Hash {
|
||||
log.Debugf("[%s/%s] Skipped: PKGBUILD hash matches db (%s)", mPkgbuild.Repo(), mPkgbuild.PkgBase(), b3s)
|
||||
continue
|
||||
} else if dbPkg != nil && b3s != dbPkg.Hash && dbPkg.SrcinfoHash != b3s {
|
||||
log.Debugf("[%s/%s] srcinfo cleared", mPkgbuild.Repo(), mPkgbuild.PkgBase())
|
||||
dbPkg = dbPkg.Update().ClearSrcinfo().SaveX(context.Background())
|
||||
}
|
||||
|
||||
proto := &ProtoPackage{
|
||||
Pkgbuild: pkgbuild,
|
||||
Pkgbase: mPkgbuild.PkgBase(),
|
||||
Repo: dbpackage.Repository(mPkgbuild.Repo()),
|
||||
March: march,
|
||||
FullRepo: mPkgbuild.Repo() + "-" + march,
|
||||
Hash: b3s,
|
||||
DBPackage: dbPkg,
|
||||
}
|
||||
|
||||
_, err = proto.isEligible(ctx)
|
||||
if err != nil {
|
||||
log.Infof("Unable to determine status for package %s: %v", proto.Pkgbase, err)
|
||||
b.repoPurge[proto.FullRepo] <- []*ProtoPackage{proto}
|
||||
} else if proto.DBPackage != nil {
|
||||
proto.DBPackage.Update().SetPkgbuild(proto.Pkgbuild).ExecX(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BuildManager) buildQueue(queue []*ProtoPackage, ctx context.Context) error {
|
||||
var (
|
||||
doneQ []*ProtoPackage
|
||||
@ -194,10 +109,10 @@ func (b *BuildManager) buildQueue(queue []*ProtoPackage, ctx context.Context) er
|
||||
|
||||
go func(pkg *ProtoPackage) {
|
||||
dur, err := pkg.build(ctx)
|
||||
if err != nil {
|
||||
if err != nil && !errors.Is(err, NotEligibleError) {
|
||||
log.Warningf("[Q] error building package %s->%s in %s: %s", pkg.FullRepo, pkg.Pkgbase, dur, err)
|
||||
b.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
|
||||
} else {
|
||||
} else if err == nil {
|
||||
log.Infof("[Q] build successful: %s->%s (%s)", pkg.FullRepo, pkg.Pkgbase, dur)
|
||||
}
|
||||
doneQLock.Lock()
|
||||
@ -289,7 +204,7 @@ func (b *BuildManager) htmlWorker(ctx context.Context) {
|
||||
Name: repo,
|
||||
}
|
||||
|
||||
pkgs := db.DbPackage.Query().Order(ent.Asc(dbpackage.FieldPkgbase)).
|
||||
pkgs := db.DBPackage.Query().Order(ent.Asc(dbpackage.FieldPkgbase)).
|
||||
Where(dbpackage.MarchEQ(march), dbpackage.RepositoryEQ(dbpackage.Repository(repo))).AllX(ctx)
|
||||
|
||||
for _, pkg := range pkgs {
|
||||
@ -361,7 +276,7 @@ func (b *BuildManager) htmlWorker(ctx context.Context) {
|
||||
Count int `json:"count"`
|
||||
}
|
||||
|
||||
db.DbPackage.Query().GroupBy(dbpackage.FieldStatus).Aggregate(ent.Count()).ScanX(ctx, &v)
|
||||
db.DBPackage.Query().GroupBy(dbpackage.FieldStatus).Aggregate(ent.Count()).ScanX(ctx, &v)
|
||||
|
||||
for _, c := range v {
|
||||
switch c.Status {
|
||||
@ -381,7 +296,7 @@ func (b *BuildManager) htmlWorker(ctx context.Context) {
|
||||
Count int `json:"count"`
|
||||
}
|
||||
|
||||
db.DbPackage.Query().Where(dbpackage.StatusNEQ(dbpackage.StatusSkipped)).
|
||||
db.DBPackage.Query().Where(dbpackage.StatusNEQ(dbpackage.StatusSkipped)).
|
||||
GroupBy(dbpackage.FieldLto).Aggregate(ent.Count()).ScanX(ctx, &v2)
|
||||
|
||||
for _, c := range v2 {
|
||||
@ -436,36 +351,37 @@ func (b *BuildManager) repoWorker(repo string) {
|
||||
}
|
||||
|
||||
for _, pkg := range pkgL {
|
||||
pkg.toDBPackage(true)
|
||||
err = pkg.toDBPackage(true)
|
||||
if err != nil {
|
||||
log.Warningf("error getting db entry for %s: %v", pkg.Pkgbase, err)
|
||||
continue
|
||||
}
|
||||
|
||||
pkgUpd := pkg.DBPackage.Update().
|
||||
SetStatus(dbpackage.StatusLatest).
|
||||
ClearSkipReason().
|
||||
SetRepoVersion(pkg.Version).
|
||||
SetTagRev(pkg.State.TagRev)
|
||||
|
||||
if _, err := os.Stat(filepath.Join(conf.Basedir.Debug, pkg.March,
|
||||
pkg.DBPackage.Packages[0]+"-debug-"+pkg.Version+"-"+conf.Arch+".pkg.tar.zst")); err == nil {
|
||||
pkg.DBPackage = pkg.DBPackage.Update().
|
||||
SetStatus(dbpackage.StatusLatest).
|
||||
ClearSkipReason().
|
||||
SetDebugSymbols(dbpackage.DebugSymbolsAvailable).
|
||||
SetRepoVersion(pkg.Version).
|
||||
SetHash(pkg.Hash).
|
||||
SaveX(context.Background())
|
||||
pkgUpd = pkgUpd.SetDebugSymbols(dbpackage.DebugSymbolsAvailable)
|
||||
} else {
|
||||
pkg.DBPackage = pkg.DBPackage.Update().
|
||||
SetStatus(dbpackage.StatusLatest).
|
||||
ClearSkipReason().
|
||||
SetDebugSymbols(dbpackage.DebugSymbolsNotAvailable).
|
||||
SetRepoVersion(pkg.Version).
|
||||
SetHash(pkg.Hash).
|
||||
SaveX(context.Background())
|
||||
pkgUpd = pkgUpd.SetDebugSymbols(dbpackage.DebugSymbolsNotAvailable)
|
||||
}
|
||||
pkg.DBPackage = pkgUpd.SaveX(context.Background())
|
||||
}
|
||||
|
||||
cmd = exec.Command("paccache", "-rc", filepath.Join(conf.Basedir.Repo, repo, "os", conf.Arch), "-k", "1") //nolint:gosec
|
||||
cmd = exec.Command("paccache", "-rc", filepath.Join(conf.Basedir.Repo, repo, "os", conf.Arch), "-k", "1")
|
||||
res, err = cmd.CombinedOutput()
|
||||
log.Debug(string(res))
|
||||
if err != nil {
|
||||
log.Warningf("Error running paccache: %v", err)
|
||||
log.Warningf("error running paccache: %v", err)
|
||||
}
|
||||
|
||||
err = updateLastUpdated()
|
||||
if err != nil {
|
||||
log.Warningf("Error updating lastupdate: %v", err)
|
||||
log.Warningf("error updating lastupdate: %v", err)
|
||||
}
|
||||
b.repoWG.Done()
|
||||
case pkgL := <-b.repoPurge[repo]:
|
||||
@ -475,7 +391,7 @@ func (b *BuildManager) repoWorker(repo string) {
|
||||
}
|
||||
if len(pkg.PkgFiles) == 0 {
|
||||
if err := pkg.findPkgFiles(); err != nil {
|
||||
log.Warningf("[%s/%s] Unable to find files: %v", pkg.FullRepo, pkg.Pkgbase, err)
|
||||
log.Warningf("[%s/%s] unable to find files: %v", pkg.FullRepo, pkg.Pkgbase, err)
|
||||
continue
|
||||
} else if len(pkg.PkgFiles) == 0 {
|
||||
continue
|
||||
@ -500,11 +416,11 @@ func (b *BuildManager) repoWorker(repo string) {
|
||||
res, err := cmd.CombinedOutput()
|
||||
log.Debug(string(res))
|
||||
if err != nil && cmd.ProcessState.ExitCode() == 1 {
|
||||
log.Warningf("Error while deleting package %s: %s", pkg.Pkgbase, string(res))
|
||||
log.Warningf("error while deleting package %s: %s", pkg.Pkgbase, string(res))
|
||||
}
|
||||
|
||||
if pkg.DBPackage != nil {
|
||||
_ = pkg.DBPackage.Update().ClearRepoVersion().ClearHash().Exec(context.Background())
|
||||
_ = pkg.DBPackage.Update().ClearRepoVersion().ClearTagRev().Exec(context.Background())
|
||||
}
|
||||
|
||||
for _, file := range pkg.PkgFiles {
|
||||
@ -513,7 +429,7 @@ func (b *BuildManager) repoWorker(repo string) {
|
||||
}
|
||||
err = updateLastUpdated()
|
||||
if err != nil {
|
||||
log.Warningf("Error updating lastupdate: %v", err)
|
||||
log.Warningf("error updating lastupdate: %v", err)
|
||||
}
|
||||
b.repoWG.Done()
|
||||
}
|
||||
@ -522,38 +438,36 @@ func (b *BuildManager) repoWorker(repo string) {
|
||||
}
|
||||
|
||||
func (b *BuildManager) syncWorker(ctx context.Context) error {
|
||||
err := os.MkdirAll(filepath.Join(conf.Basedir.Work, upstreamDir), 0o755)
|
||||
err := os.MkdirAll(filepath.Join(conf.Basedir.Work), 0o755)
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating upstream dir: %v", err)
|
||||
log.Fatalf("error creating work dir %s: %v", conf.Basedir.Work, err)
|
||||
}
|
||||
|
||||
for {
|
||||
for gitDir, gitURL := range conf.Svn2git {
|
||||
gitPath := filepath.Join(conf.Basedir.Work, upstreamDir, gitDir)
|
||||
gitPath := filepath.Join(conf.Basedir.Work, stateDir)
|
||||
|
||||
if _, err := os.Stat(gitPath); os.IsNotExist(err) {
|
||||
cmd := exec.Command("git", "clone", "--depth=1", gitURL, gitPath)
|
||||
res, err := cmd.CombinedOutput()
|
||||
log.Debug(string(res))
|
||||
if err != nil {
|
||||
log.Fatalf("Error running git clone: %v", err)
|
||||
}
|
||||
} else if err == nil {
|
||||
cmd := exec.Command("git", "reset", "--hard")
|
||||
cmd.Dir = gitPath
|
||||
res, err := cmd.CombinedOutput()
|
||||
log.Debug(string(res))
|
||||
if err != nil {
|
||||
log.Fatalf("Error running git reset: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(gitPath); os.IsNotExist(err) {
|
||||
cmd := exec.Command("git", "clone", "--depth=1", conf.StateRepo, gitPath)
|
||||
res, err := cmd.CombinedOutput()
|
||||
log.Debug(string(res))
|
||||
if err != nil {
|
||||
log.Fatalf("error cloning state repo: %v", err)
|
||||
}
|
||||
} else if err == nil {
|
||||
cmd := exec.Command("git", "reset", "--hard")
|
||||
cmd.Dir = gitPath
|
||||
res, err := cmd.CombinedOutput()
|
||||
log.Debug(string(res))
|
||||
if err != nil {
|
||||
log.Fatalf("error reseting state repo: %v", err)
|
||||
}
|
||||
|
||||
cmd = exec.Command("git", "pull")
|
||||
cmd.Dir = gitPath
|
||||
res, err = cmd.CombinedOutput()
|
||||
log.Debug(string(res))
|
||||
if err != nil {
|
||||
log.Warningf("Failed to update git repo %s: %v", gitDir, err)
|
||||
}
|
||||
cmd = exec.Command("git", "pull")
|
||||
cmd.Dir = gitPath
|
||||
res, err = cmd.CombinedOutput()
|
||||
log.Debug(string(res))
|
||||
if err != nil {
|
||||
log.Warningf("failed to update state repo: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -581,12 +495,12 @@ func (b *BuildManager) syncWorker(ctx context.Context) error {
|
||||
b.alpmMutex.Lock()
|
||||
err = alpmHandle.Release()
|
||||
if err != nil {
|
||||
log.Fatalf("Error releasing ALPM handle: %v", err)
|
||||
log.Fatalf("error releasing ALPM handle: %v", err)
|
||||
}
|
||||
|
||||
if err := retry.Fibonacci(ctx, 1*time.Second, func(ctx context.Context) error {
|
||||
if err := setupChroot(); err != nil {
|
||||
log.Warningf("Unable to upgrade chroot, trying again later.")
|
||||
log.Warningf("unable to upgrade chroot, trying again later")
|
||||
return retry.RetryableError(err)
|
||||
}
|
||||
return nil
|
||||
@ -597,30 +511,15 @@ func (b *BuildManager) syncWorker(ctx context.Context) error {
|
||||
alpmHandle, err = initALPM(filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot),
|
||||
filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot, "/var/lib/pacman"))
|
||||
if err != nil {
|
||||
log.Warningf("Error while ALPM-init: %v", err)
|
||||
log.Warningf("error while alpm-init: %v", err)
|
||||
}
|
||||
b.alpmMutex.Unlock()
|
||||
|
||||
// do refreshSRCINFOs twice here
|
||||
// since MirrorLatest depends on the DB being correct, there can be packages queued which should not be queued,
|
||||
// so we check them twice to eliminate those.
|
||||
log.Debugf("generating build-queue for PKGBUILDs found in %s", filepath.Join(conf.Basedir.Work, upstreamDir, "/**/PKGBUILD"))
|
||||
err = b.refreshSRCINFOs(ctx, filepath.Join(conf.Basedir.Work, upstreamDir, "/**/PKGBUILD"))
|
||||
queue, err := b.genQueue()
|
||||
if err != nil {
|
||||
log.Fatalf("error refreshing PKGBUILDs: %v", err)
|
||||
}
|
||||
log.Debugf("regenerating build-queue for PKGBUILDs found in %s", filepath.Join(conf.Basedir.Work, upstreamDir, "/**/PKGBUILD"))
|
||||
err = b.refreshSRCINFOs(ctx, filepath.Join(conf.Basedir.Work, upstreamDir, "/**/PKGBUILD"))
|
||||
if err != nil {
|
||||
log.Fatalf("error refreshing PKGBUILDs: %v", err)
|
||||
}
|
||||
|
||||
queue, err := genQueue()
|
||||
if err != nil {
|
||||
log.Warningf("Error building buildQueue: %v", err)
|
||||
log.Errorf("error building queue: %v", err)
|
||||
} else {
|
||||
log.Debugf("buildQueue with %d items", len(queue))
|
||||
|
||||
log.Debugf("build-queue with %d items", len(queue))
|
||||
err = b.buildQueue(queue, ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -631,7 +530,7 @@ func (b *BuildManager) syncWorker(ctx context.Context) error {
|
||||
for _, repo := range repos {
|
||||
err = movePackagesLive(repo)
|
||||
if err != nil {
|
||||
log.Errorf("[%s] Error moving packages live: %v", repo, err)
|
||||
log.Errorf("[%s] error moving packages live: %v", repo, err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -642,3 +541,62 @@ func (b *BuildManager) syncWorker(ctx context.Context) error {
|
||||
time.Sleep(time.Duration(*checkInterval) * time.Minute)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BuildManager) genQueue() ([]*ProtoPackage, error) {
|
||||
stateFiles, err := Glob(filepath.Join(conf.Basedir.Work, stateDir, "**/*"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error scanning for state-files: %w", err)
|
||||
}
|
||||
|
||||
var pkgbuilds []*ProtoPackage
|
||||
for _, stateFile := range stateFiles {
|
||||
stat, err := os.Stat(stateFile)
|
||||
if err != nil || stat.IsDir() || strings.Contains(stateFile, ".git") {
|
||||
continue
|
||||
}
|
||||
|
||||
repo, subRepo, arch, err := stateFileMeta(stateFile)
|
||||
if err != nil {
|
||||
log.Warningf("[QG] error generating statefile metadata %s: %v", stateFile, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !Contains(conf.Repos, repo) || (subRepo != nil && Contains(conf.Blacklist.Repo, *subRepo) || arch == "any") {
|
||||
continue
|
||||
}
|
||||
|
||||
rawState, err := os.ReadFile(stateFile)
|
||||
if err != nil {
|
||||
log.Warningf("[QG] cannot read statefile %s: %v", stateFile, err)
|
||||
continue
|
||||
}
|
||||
|
||||
state, err := parseState(string(rawState))
|
||||
if err != nil {
|
||||
log.Warningf("[QG] cannot parse statefile %s: %v", stateFile, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, march := range conf.March {
|
||||
pkg := &ProtoPackage{
|
||||
Pkgbase: state.Pkgbase,
|
||||
Repo: dbpackage.Repository(repo),
|
||||
March: march,
|
||||
FullRepo: repo + "-" + march,
|
||||
State: state,
|
||||
Version: state.PkgVer,
|
||||
Arch: arch,
|
||||
}
|
||||
|
||||
err = pkg.toDBPackage(true)
|
||||
if err != nil {
|
||||
log.Warningf("[QG] error getting/creating dbpackage %s: %v", state.Pkgbase, err)
|
||||
continue
|
||||
}
|
||||
|
||||
pkgbuilds = append(pkgbuilds, pkg)
|
||||
}
|
||||
}
|
||||
|
||||
return pkgbuilds, nil
|
||||
}
|
||||
|
@ -2,11 +2,8 @@ arch: x86_64
|
||||
repos:
|
||||
- core
|
||||
- extra
|
||||
- community
|
||||
|
||||
svn2git:
|
||||
upstream-core-extra: "https://github.com/archlinux/svntogit-packages.git"
|
||||
upstream-community: "https://github.com/archlinux/svntogit-community.git"
|
||||
state_repo: "https://gitlab.archlinux.org/archlinux/packaging/state.git"
|
||||
|
||||
db:
|
||||
driver: pgx
|
||||
|
124
ent/client.go
124
ent/client.go
@ -21,8 +21,8 @@ type Client struct {
|
||||
config
|
||||
// Schema is the client for creating, migrating and dropping schema.
|
||||
Schema *migrate.Schema
|
||||
// DbPackage is the client for interacting with the DbPackage builders.
|
||||
DbPackage *DbPackageClient
|
||||
// DBPackage is the client for interacting with the DBPackage builders.
|
||||
DBPackage *DBPackageClient
|
||||
}
|
||||
|
||||
// NewClient creates a new client configured with the given options.
|
||||
@ -36,7 +36,7 @@ func NewClient(opts ...Option) *Client {
|
||||
|
||||
func (c *Client) init() {
|
||||
c.Schema = migrate.NewSchema(c.driver)
|
||||
c.DbPackage = NewDbPackageClient(c.config)
|
||||
c.DBPackage = NewDBPackageClient(c.config)
|
||||
}
|
||||
|
||||
type (
|
||||
@ -119,7 +119,7 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) {
|
||||
return &Tx{
|
||||
ctx: ctx,
|
||||
config: cfg,
|
||||
DbPackage: NewDbPackageClient(cfg),
|
||||
DBPackage: NewDBPackageClient(cfg),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -139,14 +139,14 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error)
|
||||
return &Tx{
|
||||
ctx: ctx,
|
||||
config: cfg,
|
||||
DbPackage: NewDbPackageClient(cfg),
|
||||
DBPackage: NewDBPackageClient(cfg),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Debug returns a new debug-client. It's used to get verbose logging on specific operations.
|
||||
//
|
||||
// client.Debug().
|
||||
// DbPackage.
|
||||
// DBPackage.
|
||||
// Query().
|
||||
// Count(ctx)
|
||||
func (c *Client) Debug() *Client {
|
||||
@ -168,111 +168,111 @@ func (c *Client) Close() error {
|
||||
// Use adds the mutation hooks to all the entity clients.
|
||||
// In order to add hooks to a specific client, call: `client.Node.Use(...)`.
|
||||
func (c *Client) Use(hooks ...Hook) {
|
||||
c.DbPackage.Use(hooks...)
|
||||
c.DBPackage.Use(hooks...)
|
||||
}
|
||||
|
||||
// Intercept adds the query interceptors to all the entity clients.
|
||||
// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`.
|
||||
func (c *Client) Intercept(interceptors ...Interceptor) {
|
||||
c.DbPackage.Intercept(interceptors...)
|
||||
c.DBPackage.Intercept(interceptors...)
|
||||
}
|
||||
|
||||
// Mutate implements the ent.Mutator interface.
|
||||
func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) {
|
||||
switch m := m.(type) {
|
||||
case *DbPackageMutation:
|
||||
return c.DbPackage.mutate(ctx, m)
|
||||
case *DBPackageMutation:
|
||||
return c.DBPackage.mutate(ctx, m)
|
||||
default:
|
||||
return nil, fmt.Errorf("ent: unknown mutation type %T", m)
|
||||
}
|
||||
}
|
||||
|
||||
// DbPackageClient is a client for the DbPackage schema.
|
||||
type DbPackageClient struct {
|
||||
// DBPackageClient is a client for the DBPackage schema.
|
||||
type DBPackageClient struct {
|
||||
config
|
||||
}
|
||||
|
||||
// NewDbPackageClient returns a client for the DbPackage from the given config.
|
||||
func NewDbPackageClient(c config) *DbPackageClient {
|
||||
return &DbPackageClient{config: c}
|
||||
// NewDBPackageClient returns a client for the DBPackage from the given config.
|
||||
func NewDBPackageClient(c config) *DBPackageClient {
|
||||
return &DBPackageClient{config: c}
|
||||
}
|
||||
|
||||
// Use adds a list of mutation hooks to the hooks stack.
|
||||
// A call to `Use(f, g, h)` equals to `dbpackage.Hooks(f(g(h())))`.
|
||||
func (c *DbPackageClient) Use(hooks ...Hook) {
|
||||
c.hooks.DbPackage = append(c.hooks.DbPackage, hooks...)
|
||||
func (c *DBPackageClient) Use(hooks ...Hook) {
|
||||
c.hooks.DBPackage = append(c.hooks.DBPackage, hooks...)
|
||||
}
|
||||
|
||||
// Intercept adds a list of query interceptors to the interceptors stack.
|
||||
// A call to `Intercept(f, g, h)` equals to `dbpackage.Intercept(f(g(h())))`.
|
||||
func (c *DbPackageClient) Intercept(interceptors ...Interceptor) {
|
||||
c.inters.DbPackage = append(c.inters.DbPackage, interceptors...)
|
||||
func (c *DBPackageClient) Intercept(interceptors ...Interceptor) {
|
||||
c.inters.DBPackage = append(c.inters.DBPackage, interceptors...)
|
||||
}
|
||||
|
||||
// Create returns a builder for creating a DbPackage entity.
|
||||
func (c *DbPackageClient) Create() *DbPackageCreate {
|
||||
mutation := newDbPackageMutation(c.config, OpCreate)
|
||||
return &DbPackageCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
// Create returns a builder for creating a DBPackage entity.
|
||||
func (c *DBPackageClient) Create() *DBPackageCreate {
|
||||
mutation := newDBPackageMutation(c.config, OpCreate)
|
||||
return &DBPackageCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// CreateBulk returns a builder for creating a bulk of DbPackage entities.
|
||||
func (c *DbPackageClient) CreateBulk(builders ...*DbPackageCreate) *DbPackageCreateBulk {
|
||||
return &DbPackageCreateBulk{config: c.config, builders: builders}
|
||||
// CreateBulk returns a builder for creating a bulk of DBPackage entities.
|
||||
func (c *DBPackageClient) CreateBulk(builders ...*DBPackageCreate) *DBPackageCreateBulk {
|
||||
return &DBPackageCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// Update returns an update builder for DbPackage.
|
||||
func (c *DbPackageClient) Update() *DbPackageUpdate {
|
||||
mutation := newDbPackageMutation(c.config, OpUpdate)
|
||||
return &DbPackageUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
// Update returns an update builder for DBPackage.
|
||||
func (c *DBPackageClient) Update() *DBPackageUpdate {
|
||||
mutation := newDBPackageMutation(c.config, OpUpdate)
|
||||
return &DBPackageUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// UpdateOne returns an update builder for the given entity.
|
||||
func (c *DbPackageClient) UpdateOne(dp *DbPackage) *DbPackageUpdateOne {
|
||||
mutation := newDbPackageMutation(c.config, OpUpdateOne, withDbPackage(dp))
|
||||
return &DbPackageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
func (c *DBPackageClient) UpdateOne(dp *DBPackage) *DBPackageUpdateOne {
|
||||
mutation := newDBPackageMutation(c.config, OpUpdateOne, withDBPackage(dp))
|
||||
return &DBPackageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// UpdateOneID returns an update builder for the given id.
|
||||
func (c *DbPackageClient) UpdateOneID(id int) *DbPackageUpdateOne {
|
||||
mutation := newDbPackageMutation(c.config, OpUpdateOne, withDbPackageID(id))
|
||||
return &DbPackageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
func (c *DBPackageClient) UpdateOneID(id int) *DBPackageUpdateOne {
|
||||
mutation := newDBPackageMutation(c.config, OpUpdateOne, withDBPackageID(id))
|
||||
return &DBPackageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// Delete returns a delete builder for DbPackage.
|
||||
func (c *DbPackageClient) Delete() *DbPackageDelete {
|
||||
mutation := newDbPackageMutation(c.config, OpDelete)
|
||||
return &DbPackageDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
// Delete returns a delete builder for DBPackage.
|
||||
func (c *DBPackageClient) Delete() *DBPackageDelete {
|
||||
mutation := newDBPackageMutation(c.config, OpDelete)
|
||||
return &DBPackageDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// DeleteOne returns a builder for deleting the given entity.
|
||||
func (c *DbPackageClient) DeleteOne(dp *DbPackage) *DbPackageDeleteOne {
|
||||
func (c *DBPackageClient) DeleteOne(dp *DBPackage) *DBPackageDeleteOne {
|
||||
return c.DeleteOneID(dp.ID)
|
||||
}
|
||||
|
||||
// DeleteOneID returns a builder for deleting the given entity by its id.
|
||||
func (c *DbPackageClient) DeleteOneID(id int) *DbPackageDeleteOne {
|
||||
func (c *DBPackageClient) DeleteOneID(id int) *DBPackageDeleteOne {
|
||||
builder := c.Delete().Where(dbpackage.ID(id))
|
||||
builder.mutation.id = &id
|
||||
builder.mutation.op = OpDeleteOne
|
||||
return &DbPackageDeleteOne{builder}
|
||||
return &DBPackageDeleteOne{builder}
|
||||
}
|
||||
|
||||
// Query returns a query builder for DbPackage.
|
||||
func (c *DbPackageClient) Query() *DbPackageQuery {
|
||||
return &DbPackageQuery{
|
||||
// Query returns a query builder for DBPackage.
|
||||
func (c *DBPackageClient) Query() *DBPackageQuery {
|
||||
return &DBPackageQuery{
|
||||
config: c.config,
|
||||
ctx: &QueryContext{Type: TypeDbPackage},
|
||||
ctx: &QueryContext{Type: TypeDBPackage},
|
||||
inters: c.Interceptors(),
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a DbPackage entity by its id.
|
||||
func (c *DbPackageClient) Get(ctx context.Context, id int) (*DbPackage, error) {
|
||||
// Get returns a DBPackage entity by its id.
|
||||
func (c *DBPackageClient) Get(ctx context.Context, id int) (*DBPackage, error) {
|
||||
return c.Query().Where(dbpackage.ID(id)).Only(ctx)
|
||||
}
|
||||
|
||||
// GetX is like Get, but panics if an error occurs.
|
||||
func (c *DbPackageClient) GetX(ctx context.Context, id int) *DbPackage {
|
||||
func (c *DBPackageClient) GetX(ctx context.Context, id int) *DBPackage {
|
||||
obj, err := c.Get(ctx, id)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@ -281,36 +281,36 @@ func (c *DbPackageClient) GetX(ctx context.Context, id int) *DbPackage {
|
||||
}
|
||||
|
||||
// Hooks returns the client hooks.
|
||||
func (c *DbPackageClient) Hooks() []Hook {
|
||||
return c.hooks.DbPackage
|
||||
func (c *DBPackageClient) Hooks() []Hook {
|
||||
return c.hooks.DBPackage
|
||||
}
|
||||
|
||||
// Interceptors returns the client interceptors.
|
||||
func (c *DbPackageClient) Interceptors() []Interceptor {
|
||||
return c.inters.DbPackage
|
||||
func (c *DBPackageClient) Interceptors() []Interceptor {
|
||||
return c.inters.DBPackage
|
||||
}
|
||||
|
||||
func (c *DbPackageClient) mutate(ctx context.Context, m *DbPackageMutation) (Value, error) {
|
||||
func (c *DBPackageClient) mutate(ctx context.Context, m *DBPackageMutation) (Value, error) {
|
||||
switch m.Op() {
|
||||
case OpCreate:
|
||||
return (&DbPackageCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
return (&DBPackageCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
case OpUpdate:
|
||||
return (&DbPackageUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
return (&DBPackageUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
case OpUpdateOne:
|
||||
return (&DbPackageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
return (&DBPackageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
case OpDelete, OpDeleteOne:
|
||||
return (&DbPackageDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
|
||||
return (&DBPackageDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
|
||||
default:
|
||||
return nil, fmt.Errorf("ent: unknown DbPackage mutation op: %q", m.Op())
|
||||
return nil, fmt.Errorf("ent: unknown DBPackage mutation op: %q", m.Op())
|
||||
}
|
||||
}
|
||||
|
||||
// hooks and interceptors per client, for fast access.
|
||||
type (
|
||||
hooks struct {
|
||||
DbPackage []ent.Hook
|
||||
DBPackage []ent.Hook
|
||||
}
|
||||
inters struct {
|
||||
DbPackage []ent.Interceptor
|
||||
DBPackage []ent.Interceptor
|
||||
}
|
||||
)
|
||||
|
@ -13,8 +13,8 @@ import (
|
||||
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
|
||||
)
|
||||
|
||||
// DbPackage is the model entity for the DbPackage schema.
|
||||
type DbPackage struct {
|
||||
// DBPackage is the model entity for the DBPackage schema.
|
||||
type DBPackage struct {
|
||||
config `json:"-"`
|
||||
// ID of the ent.
|
||||
ID int `json:"id,omitempty"`
|
||||
@ -38,8 +38,6 @@ type DbPackage struct {
|
||||
BuildTimeStart time.Time `json:"build_time_start,omitempty"`
|
||||
// Updated holds the value of the "updated" field.
|
||||
Updated time.Time `json:"updated,omitempty"`
|
||||
// Hash holds the value of the "hash" field.
|
||||
Hash string `json:"hash,omitempty"`
|
||||
// Lto holds the value of the "lto" field.
|
||||
Lto dbpackage.Lto `json:"lto,omitempty"`
|
||||
// LastVersionBuild holds the value of the "last_version_build" field.
|
||||
@ -58,17 +56,13 @@ type DbPackage struct {
|
||||
IoIn *int64 `json:"io_in,omitempty"`
|
||||
// IoOut holds the value of the "io_out" field.
|
||||
IoOut *int64 `json:"io_out,omitempty"`
|
||||
// Srcinfo holds the value of the "srcinfo" field.
|
||||
Srcinfo *string `json:"srcinfo,omitempty"`
|
||||
// SrcinfoHash holds the value of the "srcinfo_hash" field.
|
||||
SrcinfoHash string `json:"srcinfo_hash,omitempty"`
|
||||
// Pkgbuild holds the value of the "pkgbuild" field.
|
||||
Pkgbuild string `json:"pkgbuild,omitempty"`
|
||||
// TagRev holds the value of the "tag_rev" field.
|
||||
TagRev *string `json:"tag_rev,omitempty"`
|
||||
selectValues sql.SelectValues
|
||||
}
|
||||
|
||||
// scanValues returns the types for scanning values from sql.Rows.
|
||||
func (*DbPackage) scanValues(columns []string) ([]any, error) {
|
||||
func (*DBPackage) scanValues(columns []string) ([]any, error) {
|
||||
values := make([]any, len(columns))
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
@ -76,7 +70,7 @@ func (*DbPackage) scanValues(columns []string) ([]any, error) {
|
||||
values[i] = new([]byte)
|
||||
case dbpackage.FieldID, dbpackage.FieldMaxRss, dbpackage.FieldUTime, dbpackage.FieldSTime, dbpackage.FieldIoIn, dbpackage.FieldIoOut:
|
||||
values[i] = new(sql.NullInt64)
|
||||
case dbpackage.FieldPkgbase, dbpackage.FieldStatus, dbpackage.FieldSkipReason, dbpackage.FieldRepository, dbpackage.FieldMarch, dbpackage.FieldVersion, dbpackage.FieldRepoVersion, dbpackage.FieldHash, dbpackage.FieldLto, dbpackage.FieldLastVersionBuild, dbpackage.FieldDebugSymbols, dbpackage.FieldSrcinfo, dbpackage.FieldSrcinfoHash, dbpackage.FieldPkgbuild:
|
||||
case dbpackage.FieldPkgbase, dbpackage.FieldStatus, dbpackage.FieldSkipReason, dbpackage.FieldRepository, dbpackage.FieldMarch, dbpackage.FieldVersion, dbpackage.FieldRepoVersion, dbpackage.FieldLto, dbpackage.FieldLastVersionBuild, dbpackage.FieldDebugSymbols, dbpackage.FieldTagRev:
|
||||
values[i] = new(sql.NullString)
|
||||
case dbpackage.FieldBuildTimeStart, dbpackage.FieldUpdated, dbpackage.FieldLastVerified:
|
||||
values[i] = new(sql.NullTime)
|
||||
@ -88,8 +82,8 @@ func (*DbPackage) scanValues(columns []string) ([]any, error) {
|
||||
}
|
||||
|
||||
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||
// to the DbPackage fields.
|
||||
func (dp *DbPackage) assignValues(columns []string, values []any) error {
|
||||
// to the DBPackage fields.
|
||||
func (dp *DBPackage) assignValues(columns []string, values []any) error {
|
||||
if m, n := len(values), len(columns); m < n {
|
||||
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||
}
|
||||
@ -163,12 +157,6 @@ func (dp *DbPackage) assignValues(columns []string, values []any) error {
|
||||
} else if value.Valid {
|
||||
dp.Updated = value.Time
|
||||
}
|
||||
case dbpackage.FieldHash:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field hash", values[i])
|
||||
} else if value.Valid {
|
||||
dp.Hash = value.String
|
||||
}
|
||||
case dbpackage.FieldLto:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field lto", values[i])
|
||||
@ -228,24 +216,12 @@ func (dp *DbPackage) assignValues(columns []string, values []any) error {
|
||||
dp.IoOut = new(int64)
|
||||
*dp.IoOut = value.Int64
|
||||
}
|
||||
case dbpackage.FieldSrcinfo:
|
||||
case dbpackage.FieldTagRev:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field srcinfo", values[i])
|
||||
return fmt.Errorf("unexpected type %T for field tag_rev", values[i])
|
||||
} else if value.Valid {
|
||||
dp.Srcinfo = new(string)
|
||||
*dp.Srcinfo = value.String
|
||||
}
|
||||
case dbpackage.FieldSrcinfoHash:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field srcinfo_hash", values[i])
|
||||
} else if value.Valid {
|
||||
dp.SrcinfoHash = value.String
|
||||
}
|
||||
case dbpackage.FieldPkgbuild:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field pkgbuild", values[i])
|
||||
} else if value.Valid {
|
||||
dp.Pkgbuild = value.String
|
||||
dp.TagRev = new(string)
|
||||
*dp.TagRev = value.String
|
||||
}
|
||||
default:
|
||||
dp.selectValues.Set(columns[i], values[i])
|
||||
@ -254,34 +230,34 @@ func (dp *DbPackage) assignValues(columns []string, values []any) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the DbPackage.
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the DBPackage.
|
||||
// This includes values selected through modifiers, order, etc.
|
||||
func (dp *DbPackage) Value(name string) (ent.Value, error) {
|
||||
func (dp *DBPackage) Value(name string) (ent.Value, error) {
|
||||
return dp.selectValues.Get(name)
|
||||
}
|
||||
|
||||
// Update returns a builder for updating this DbPackage.
|
||||
// Note that you need to call DbPackage.Unwrap() before calling this method if this DbPackage
|
||||
// Update returns a builder for updating this DBPackage.
|
||||
// Note that you need to call DBPackage.Unwrap() before calling this method if this DBPackage
|
||||
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||
func (dp *DbPackage) Update() *DbPackageUpdateOne {
|
||||
return NewDbPackageClient(dp.config).UpdateOne(dp)
|
||||
func (dp *DBPackage) Update() *DBPackageUpdateOne {
|
||||
return NewDBPackageClient(dp.config).UpdateOne(dp)
|
||||
}
|
||||
|
||||
// Unwrap unwraps the DbPackage entity that was returned from a transaction after it was closed,
|
||||
// Unwrap unwraps the DBPackage entity that was returned from a transaction after it was closed,
|
||||
// so that all future queries will be executed through the driver which created the transaction.
|
||||
func (dp *DbPackage) Unwrap() *DbPackage {
|
||||
func (dp *DBPackage) Unwrap() *DBPackage {
|
||||
_tx, ok := dp.config.driver.(*txDriver)
|
||||
if !ok {
|
||||
panic("ent: DbPackage is not a transactional entity")
|
||||
panic("ent: DBPackage is not a transactional entity")
|
||||
}
|
||||
dp.config.driver = _tx.drv
|
||||
return dp
|
||||
}
|
||||
|
||||
// String implements the fmt.Stringer.
|
||||
func (dp *DbPackage) String() string {
|
||||
func (dp *DBPackage) String() string {
|
||||
var builder strings.Builder
|
||||
builder.WriteString("DbPackage(")
|
||||
builder.WriteString("DBPackage(")
|
||||
builder.WriteString(fmt.Sprintf("id=%v, ", dp.ID))
|
||||
builder.WriteString("pkgbase=")
|
||||
builder.WriteString(dp.Pkgbase)
|
||||
@ -313,9 +289,6 @@ func (dp *DbPackage) String() string {
|
||||
builder.WriteString("updated=")
|
||||
builder.WriteString(dp.Updated.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("hash=")
|
||||
builder.WriteString(dp.Hash)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("lto=")
|
||||
builder.WriteString(fmt.Sprintf("%v", dp.Lto))
|
||||
builder.WriteString(", ")
|
||||
@ -353,19 +326,13 @@ func (dp *DbPackage) String() string {
|
||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
if v := dp.Srcinfo; v != nil {
|
||||
builder.WriteString("srcinfo=")
|
||||
if v := dp.TagRev; v != nil {
|
||||
builder.WriteString("tag_rev=")
|
||||
builder.WriteString(*v)
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("srcinfo_hash=")
|
||||
builder.WriteString(dp.SrcinfoHash)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("pkgbuild=")
|
||||
builder.WriteString(dp.Pkgbuild)
|
||||
builder.WriteByte(')')
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
// DbPackages is a parsable slice of DbPackage.
|
||||
type DbPackages []*DbPackage
|
||||
// DBPackages is a parsable slice of DBPackage.
|
||||
type DBPackages []*DBPackage
|
||||
|
@ -33,8 +33,6 @@ const (
|
||||
FieldBuildTimeStart = "build_time_start"
|
||||
// FieldUpdated holds the string denoting the updated field in the database.
|
||||
FieldUpdated = "updated"
|
||||
// FieldHash holds the string denoting the hash field in the database.
|
||||
FieldHash = "hash"
|
||||
// FieldLto holds the string denoting the lto field in the database.
|
||||
FieldLto = "lto"
|
||||
// FieldLastVersionBuild holds the string denoting the last_version_build field in the database.
|
||||
@ -53,12 +51,8 @@ const (
|
||||
FieldIoIn = "io_in"
|
||||
// FieldIoOut holds the string denoting the io_out field in the database.
|
||||
FieldIoOut = "io_out"
|
||||
// FieldSrcinfo holds the string denoting the srcinfo field in the database.
|
||||
FieldSrcinfo = "srcinfo"
|
||||
// FieldSrcinfoHash holds the string denoting the srcinfo_hash field in the database.
|
||||
FieldSrcinfoHash = "srcinfo_hash"
|
||||
// FieldPkgbuild holds the string denoting the pkgbuild field in the database.
|
||||
FieldPkgbuild = "pkgbuild"
|
||||
// FieldTagRev holds the string denoting the tag_rev field in the database.
|
||||
FieldTagRev = "tag_rev"
|
||||
// Table holds the table name of the dbpackage in the database.
|
||||
Table = "db_packages"
|
||||
)
|
||||
@ -76,7 +70,6 @@ var Columns = []string{
|
||||
FieldRepoVersion,
|
||||
FieldBuildTimeStart,
|
||||
FieldUpdated,
|
||||
FieldHash,
|
||||
FieldLto,
|
||||
FieldLastVersionBuild,
|
||||
FieldLastVerified,
|
||||
@ -86,9 +79,7 @@ var Columns = []string{
|
||||
FieldSTime,
|
||||
FieldIoIn,
|
||||
FieldIoOut,
|
||||
FieldSrcinfo,
|
||||
FieldSrcinfoHash,
|
||||
FieldPkgbuild,
|
||||
FieldTagRev,
|
||||
}
|
||||
|
||||
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||
@ -220,7 +211,7 @@ func DebugSymbolsValidator(ds DebugSymbols) error {
|
||||
}
|
||||
}
|
||||
|
||||
// OrderOption defines the ordering options for the DbPackage queries.
|
||||
// OrderOption defines the ordering options for the DBPackage queries.
|
||||
type OrderOption func(*sql.Selector)
|
||||
|
||||
// ByID orders the results by the id field.
|
||||
@ -273,11 +264,6 @@ func ByUpdated(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUpdated, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByHash orders the results by the hash field.
|
||||
func ByHash(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldHash, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByLto orders the results by the lto field.
|
||||
func ByLto(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldLto, opts...).ToFunc()
|
||||
@ -323,17 +309,7 @@ func ByIoOut(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldIoOut, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySrcinfo orders the results by the srcinfo field.
|
||||
func BySrcinfo(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSrcinfo, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySrcinfoHash orders the results by the srcinfo_hash field.
|
||||
func BySrcinfoHash(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSrcinfoHash, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByPkgbuild orders the results by the pkgbuild field.
|
||||
func ByPkgbuild(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldPkgbuild, opts...).ToFunc()
|
||||
// ByTagRev orders the results by the tag_rev field.
|
||||
func ByTagRev(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldTagRev, opts...).ToFunc()
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -13,33 +13,33 @@ import (
|
||||
"somegit.dev/ALHP/ALHP.GO/ent/dbpackage"
|
||||
)
|
||||
|
||||
// DbPackageCreate is the builder for creating a DbPackage entity.
|
||||
type DbPackageCreate struct {
|
||||
// DBPackageCreate is the builder for creating a DBPackage entity.
|
||||
type DBPackageCreate struct {
|
||||
config
|
||||
mutation *DbPackageMutation
|
||||
mutation *DBPackageMutation
|
||||
hooks []Hook
|
||||
}
|
||||
|
||||
// SetPkgbase sets the "pkgbase" field.
|
||||
func (dpc *DbPackageCreate) SetPkgbase(s string) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetPkgbase(s string) *DBPackageCreate {
|
||||
dpc.mutation.SetPkgbase(s)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetPackages sets the "packages" field.
|
||||
func (dpc *DbPackageCreate) SetPackages(s []string) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetPackages(s []string) *DBPackageCreate {
|
||||
dpc.mutation.SetPackages(s)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetStatus sets the "status" field.
|
||||
func (dpc *DbPackageCreate) SetStatus(d dbpackage.Status) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetStatus(d dbpackage.Status) *DBPackageCreate {
|
||||
dpc.mutation.SetStatus(d)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableStatus sets the "status" field if the given value is not nil.
|
||||
func (dpc *DbPackageCreate) SetNillableStatus(d *dbpackage.Status) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetNillableStatus(d *dbpackage.Status) *DBPackageCreate {
|
||||
if d != nil {
|
||||
dpc.SetStatus(*d)
|
||||
}
|
||||
@ -47,13 +47,13 @@ func (dpc *DbPackageCreate) SetNillableStatus(d *dbpackage.Status) *DbPackageCre
|
||||
}
|
||||
|
||||
// SetSkipReason sets the "skip_reason" field.
|
||||
func (dpc *DbPackageCreate) SetSkipReason(s string) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetSkipReason(s string) *DBPackageCreate {
|
||||
dpc.mutation.SetSkipReason(s)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableSkipReason sets the "skip_reason" field if the given value is not nil.
|
||||
func (dpc *DbPackageCreate) SetNillableSkipReason(s *string) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetNillableSkipReason(s *string) *DBPackageCreate {
|
||||
if s != nil {
|
||||
dpc.SetSkipReason(*s)
|
||||
}
|
||||
@ -61,25 +61,25 @@ func (dpc *DbPackageCreate) SetNillableSkipReason(s *string) *DbPackageCreate {
|
||||
}
|
||||
|
||||
// SetRepository sets the "repository" field.
|
||||
func (dpc *DbPackageCreate) SetRepository(d dbpackage.Repository) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetRepository(d dbpackage.Repository) *DBPackageCreate {
|
||||
dpc.mutation.SetRepository(d)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetMarch sets the "march" field.
|
||||
func (dpc *DbPackageCreate) SetMarch(s string) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetMarch(s string) *DBPackageCreate {
|
||||
dpc.mutation.SetMarch(s)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetVersion sets the "version" field.
|
||||
func (dpc *DbPackageCreate) SetVersion(s string) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetVersion(s string) *DBPackageCreate {
|
||||
dpc.mutation.SetVersion(s)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableVersion sets the "version" field if the given value is not nil.
|
||||
func (dpc *DbPackageCreate) SetNillableVersion(s *string) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetNillableVersion(s *string) *DBPackageCreate {
|
||||
if s != nil {
|
||||
dpc.SetVersion(*s)
|
||||
}
|
||||
@ -87,13 +87,13 @@ func (dpc *DbPackageCreate) SetNillableVersion(s *string) *DbPackageCreate {
|
||||
}
|
||||
|
||||
// SetRepoVersion sets the "repo_version" field.
|
||||
func (dpc *DbPackageCreate) SetRepoVersion(s string) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetRepoVersion(s string) *DBPackageCreate {
|
||||
dpc.mutation.SetRepoVersion(s)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableRepoVersion sets the "repo_version" field if the given value is not nil.
|
||||
func (dpc *DbPackageCreate) SetNillableRepoVersion(s *string) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetNillableRepoVersion(s *string) *DBPackageCreate {
|
||||
if s != nil {
|
||||
dpc.SetRepoVersion(*s)
|
||||
}
|
||||
@ -101,13 +101,13 @@ func (dpc *DbPackageCreate) SetNillableRepoVersion(s *string) *DbPackageCreate {
|
||||
}
|
||||
|
||||
// SetBuildTimeStart sets the "build_time_start" field.
|
||||
func (dpc *DbPackageCreate) SetBuildTimeStart(t time.Time) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetBuildTimeStart(t time.Time) *DBPackageCreate {
|
||||
dpc.mutation.SetBuildTimeStart(t)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableBuildTimeStart sets the "build_time_start" field if the given value is not nil.
|
||||
func (dpc *DbPackageCreate) SetNillableBuildTimeStart(t *time.Time) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetNillableBuildTimeStart(t *time.Time) *DBPackageCreate {
|
||||
if t != nil {
|
||||
dpc.SetBuildTimeStart(*t)
|
||||
}
|
||||
@ -115,41 +115,27 @@ func (dpc *DbPackageCreate) SetNillableBuildTimeStart(t *time.Time) *DbPackageCr
|
||||
}
|
||||
|
||||
// SetUpdated sets the "updated" field.
|
||||
func (dpc *DbPackageCreate) SetUpdated(t time.Time) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetUpdated(t time.Time) *DBPackageCreate {
|
||||
dpc.mutation.SetUpdated(t)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableUpdated sets the "updated" field if the given value is not nil.
|
||||
func (dpc *DbPackageCreate) SetNillableUpdated(t *time.Time) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetNillableUpdated(t *time.Time) *DBPackageCreate {
|
||||
if t != nil {
|
||||
dpc.SetUpdated(*t)
|
||||
}
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetHash sets the "hash" field.
|
||||
func (dpc *DbPackageCreate) SetHash(s string) *DbPackageCreate {
|
||||
dpc.mutation.SetHash(s)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableHash sets the "hash" field if the given value is not nil.
|
||||
func (dpc *DbPackageCreate) SetNillableHash(s *string) *DbPackageCreate {
|
||||
if s != nil {
|
||||
dpc.SetHash(*s)
|
||||
}
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetLto sets the "lto" field.
|
||||
func (dpc *DbPackageCreate) SetLto(d dbpackage.Lto) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetLto(d dbpackage.Lto) *DBPackageCreate {
|
||||
dpc.mutation.SetLto(d)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableLto sets the "lto" field if the given value is not nil.
|
||||
func (dpc *DbPackageCreate) SetNillableLto(d *dbpackage.Lto) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetNillableLto(d *dbpackage.Lto) *DBPackageCreate {
|
||||
if d != nil {
|
||||
dpc.SetLto(*d)
|
||||
}
|
||||
@ -157,13 +143,13 @@ func (dpc *DbPackageCreate) SetNillableLto(d *dbpackage.Lto) *DbPackageCreate {
|
||||
}
|
||||
|
||||
// SetLastVersionBuild sets the "last_version_build" field.
|
||||
func (dpc *DbPackageCreate) SetLastVersionBuild(s string) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetLastVersionBuild(s string) *DBPackageCreate {
|
||||
dpc.mutation.SetLastVersionBuild(s)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableLastVersionBuild sets the "last_version_build" field if the given value is not nil.
|
||||
func (dpc *DbPackageCreate) SetNillableLastVersionBuild(s *string) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetNillableLastVersionBuild(s *string) *DBPackageCreate {
|
||||
if s != nil {
|
||||
dpc.SetLastVersionBuild(*s)
|
||||
}
|
||||
@ -171,13 +157,13 @@ func (dpc *DbPackageCreate) SetNillableLastVersionBuild(s *string) *DbPackageCre
|
||||
}
|
||||
|
||||
// SetLastVerified sets the "last_verified" field.
|
||||
func (dpc *DbPackageCreate) SetLastVerified(t time.Time) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetLastVerified(t time.Time) *DBPackageCreate {
|
||||
dpc.mutation.SetLastVerified(t)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableLastVerified sets the "last_verified" field if the given value is not nil.
|
||||
func (dpc *DbPackageCreate) SetNillableLastVerified(t *time.Time) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetNillableLastVerified(t *time.Time) *DBPackageCreate {
|
||||
if t != nil {
|
||||
dpc.SetLastVerified(*t)
|
||||
}
|
||||
@ -185,13 +171,13 @@ func (dpc *DbPackageCreate) SetNillableLastVerified(t *time.Time) *DbPackageCrea
|
||||
}
|
||||
|
||||
// SetDebugSymbols sets the "debug_symbols" field.
|
||||
func (dpc *DbPackageCreate) SetDebugSymbols(ds dbpackage.DebugSymbols) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetDebugSymbols(ds dbpackage.DebugSymbols) *DBPackageCreate {
|
||||
dpc.mutation.SetDebugSymbols(ds)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableDebugSymbols sets the "debug_symbols" field if the given value is not nil.
|
||||
func (dpc *DbPackageCreate) SetNillableDebugSymbols(ds *dbpackage.DebugSymbols) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetNillableDebugSymbols(ds *dbpackage.DebugSymbols) *DBPackageCreate {
|
||||
if ds != nil {
|
||||
dpc.SetDebugSymbols(*ds)
|
||||
}
|
||||
@ -199,13 +185,13 @@ func (dpc *DbPackageCreate) SetNillableDebugSymbols(ds *dbpackage.DebugSymbols)
|
||||
}
|
||||
|
||||
// SetMaxRss sets the "max_rss" field.
|
||||
func (dpc *DbPackageCreate) SetMaxRss(i int64) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetMaxRss(i int64) *DBPackageCreate {
|
||||
dpc.mutation.SetMaxRss(i)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableMaxRss sets the "max_rss" field if the given value is not nil.
|
||||
func (dpc *DbPackageCreate) SetNillableMaxRss(i *int64) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetNillableMaxRss(i *int64) *DBPackageCreate {
|
||||
if i != nil {
|
||||
dpc.SetMaxRss(*i)
|
||||
}
|
||||
@ -213,13 +199,13 @@ func (dpc *DbPackageCreate) SetNillableMaxRss(i *int64) *DbPackageCreate {
|
||||
}
|
||||
|
||||
// SetUTime sets the "u_time" field.
|
||||
func (dpc *DbPackageCreate) SetUTime(i int64) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetUTime(i int64) *DBPackageCreate {
|
||||
dpc.mutation.SetUTime(i)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableUTime sets the "u_time" field if the given value is not nil.
|
||||
func (dpc *DbPackageCreate) SetNillableUTime(i *int64) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetNillableUTime(i *int64) *DBPackageCreate {
|
||||
if i != nil {
|
||||
dpc.SetUTime(*i)
|
||||
}
|
||||
@ -227,13 +213,13 @@ func (dpc *DbPackageCreate) SetNillableUTime(i *int64) *DbPackageCreate {
|
||||
}
|
||||
|
||||
// SetSTime sets the "s_time" field.
|
||||
func (dpc *DbPackageCreate) SetSTime(i int64) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetSTime(i int64) *DBPackageCreate {
|
||||
dpc.mutation.SetSTime(i)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableSTime sets the "s_time" field if the given value is not nil.
|
||||
func (dpc *DbPackageCreate) SetNillableSTime(i *int64) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetNillableSTime(i *int64) *DBPackageCreate {
|
||||
if i != nil {
|
||||
dpc.SetSTime(*i)
|
||||
}
|
||||
@ -241,13 +227,13 @@ func (dpc *DbPackageCreate) SetNillableSTime(i *int64) *DbPackageCreate {
|
||||
}
|
||||
|
||||
// SetIoIn sets the "io_in" field.
|
||||
func (dpc *DbPackageCreate) SetIoIn(i int64) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetIoIn(i int64) *DBPackageCreate {
|
||||
dpc.mutation.SetIoIn(i)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableIoIn sets the "io_in" field if the given value is not nil.
|
||||
func (dpc *DbPackageCreate) SetNillableIoIn(i *int64) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetNillableIoIn(i *int64) *DBPackageCreate {
|
||||
if i != nil {
|
||||
dpc.SetIoIn(*i)
|
||||
}
|
||||
@ -255,74 +241,46 @@ func (dpc *DbPackageCreate) SetNillableIoIn(i *int64) *DbPackageCreate {
|
||||
}
|
||||
|
||||
// SetIoOut sets the "io_out" field.
|
||||
func (dpc *DbPackageCreate) SetIoOut(i int64) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetIoOut(i int64) *DBPackageCreate {
|
||||
dpc.mutation.SetIoOut(i)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableIoOut sets the "io_out" field if the given value is not nil.
|
||||
func (dpc *DbPackageCreate) SetNillableIoOut(i *int64) *DbPackageCreate {
|
||||
func (dpc *DBPackageCreate) SetNillableIoOut(i *int64) *DBPackageCreate {
|
||||
if i != nil {
|
||||
dpc.SetIoOut(*i)
|
||||
}
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetSrcinfo sets the "srcinfo" field.
|
||||
func (dpc *DbPackageCreate) SetSrcinfo(s string) *DbPackageCreate {
|
||||
dpc.mutation.SetSrcinfo(s)
|
||||
// SetTagRev sets the "tag_rev" field.
|
||||
func (dpc *DBPackageCreate) SetTagRev(s string) *DBPackageCreate {
|
||||
dpc.mutation.SetTagRev(s)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableSrcinfo sets the "srcinfo" field if the given value is not nil.
|
||||
func (dpc *DbPackageCreate) SetNillableSrcinfo(s *string) *DbPackageCreate {
|
||||
// SetNillableTagRev sets the "tag_rev" field if the given value is not nil.
|
||||
func (dpc *DBPackageCreate) SetNillableTagRev(s *string) *DBPackageCreate {
|
||||
if s != nil {
|
||||
dpc.SetSrcinfo(*s)
|
||||
dpc.SetTagRev(*s)
|
||||
}
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetSrcinfoHash sets the "srcinfo_hash" field.
|
||||
func (dpc *DbPackageCreate) SetSrcinfoHash(s string) *DbPackageCreate {
|
||||
dpc.mutation.SetSrcinfoHash(s)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillableSrcinfoHash sets the "srcinfo_hash" field if the given value is not nil.
|
||||
func (dpc *DbPackageCreate) SetNillableSrcinfoHash(s *string) *DbPackageCreate {
|
||||
if s != nil {
|
||||
dpc.SetSrcinfoHash(*s)
|
||||
}
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetPkgbuild sets the "pkgbuild" field.
|
||||
func (dpc *DbPackageCreate) SetPkgbuild(s string) *DbPackageCreate {
|
||||
dpc.mutation.SetPkgbuild(s)
|
||||
return dpc
|
||||
}
|
||||
|
||||
// SetNillablePkgbuild sets the "pkgbuild" field if the given value is not nil.
|
||||
func (dpc *DbPackageCreate) SetNillablePkgbuild(s *string) *DbPackageCreate {
|
||||
if s != nil {
|
||||
dpc.SetPkgbuild(*s)
|
||||
}
|
||||
return dpc
|
||||
}
|
||||
|
||||
// Mutation returns the DbPackageMutation object of the builder.
|
||||
func (dpc *DbPackageCreate) Mutation() *DbPackageMutation {
|
||||
// Mutation returns the DBPackageMutation object of the builder.
|
||||
func (dpc *DBPackageCreate) Mutation() *DBPackageMutation {
|
||||
return dpc.mutation
|
||||
}
|
||||
|
||||
// Save creates the DbPackage in the database.
|
||||
func (dpc *DbPackageCreate) Save(ctx context.Context) (*DbPackage, error) {
|
||||
// Save creates the DBPackage in the database.
|
||||
func (dpc *DBPackageCreate) Save(ctx context.Context) (*DBPackage, error) {
|
||||
dpc.defaults()
|
||||
return withHooks(ctx, dpc.sqlSave, dpc.mutation, dpc.hooks)
|
||||
}
|
||||
|
||||
// SaveX calls Save and panics if Save returns an error.
|
||||
func (dpc *DbPackageCreate) SaveX(ctx context.Context) *DbPackage {
|
||||
func (dpc *DBPackageCreate) SaveX(ctx context.Context) *DBPackage {
|
||||
v, err := dpc.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@ -331,20 +289,20 @@ func (dpc *DbPackageCreate) SaveX(ctx context.Context) *DbPackage {
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (dpc *DbPackageCreate) Exec(ctx context.Context) error {
|
||||
func (dpc *DBPackageCreate) Exec(ctx context.Context) error {
|
||||
_, err := dpc.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (dpc *DbPackageCreate) ExecX(ctx context.Context) {
|
||||
func (dpc *DBPackageCreate) ExecX(ctx context.Context) {
|
||||
if err := dpc.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (dpc *DbPackageCreate) defaults() {
|
||||
func (dpc *DBPackageCreate) defaults() {
|
||||
if _, ok := dpc.mutation.Status(); !ok {
|
||||
v := dbpackage.DefaultStatus
|
||||
dpc.mutation.SetStatus(v)
|
||||
@ -360,50 +318,50 @@ func (dpc *DbPackageCreate) defaults() {
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (dpc *DbPackageCreate) check() error {
|
||||
func (dpc *DBPackageCreate) check() error {
|
||||
if _, ok := dpc.mutation.Pkgbase(); !ok {
|
||||
return &ValidationError{Name: "pkgbase", err: errors.New(`ent: missing required field "DbPackage.pkgbase"`)}
|
||||
return &ValidationError{Name: "pkgbase", err: errors.New(`ent: missing required field "DBPackage.pkgbase"`)}
|
||||
}
|
||||
if v, ok := dpc.mutation.Pkgbase(); ok {
|
||||
if err := dbpackage.PkgbaseValidator(v); err != nil {
|
||||
return &ValidationError{Name: "pkgbase", err: fmt.Errorf(`ent: validator failed for field "DbPackage.pkgbase": %w`, err)}
|
||||
return &ValidationError{Name: "pkgbase", err: fmt.Errorf(`ent: validator failed for field "DBPackage.pkgbase": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := dpc.mutation.Status(); ok {
|
||||
if err := dbpackage.StatusValidator(v); err != nil {
|
||||
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "DbPackage.status": %w`, err)}
|
||||
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "DBPackage.status": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := dpc.mutation.Repository(); !ok {
|
||||
return &ValidationError{Name: "repository", err: errors.New(`ent: missing required field "DbPackage.repository"`)}
|
||||
return &ValidationError{Name: "repository", err: errors.New(`ent: missing required field "DBPackage.repository"`)}
|
||||
}
|
||||
if v, ok := dpc.mutation.Repository(); ok {
|
||||
if err := dbpackage.RepositoryValidator(v); err != nil {
|
||||
return &ValidationError{Name: "repository", err: fmt.Errorf(`ent: validator failed for field "DbPackage.repository": %w`, err)}
|
||||
return &ValidationError{Name: "repository", err: fmt.Errorf(`ent: validator failed for field "DBPackage.repository": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := dpc.mutation.March(); !ok {
|
||||
return &ValidationError{Name: "march", err: errors.New(`ent: missing required field "DbPackage.march"`)}
|
||||
return &ValidationError{Name: "march", err: errors.New(`ent: missing required field "DBPackage.march"`)}
|
||||
}
|
||||
if v, ok := dpc.mutation.March(); ok {
|
||||
if err := dbpackage.MarchValidator(v); err != nil {
|
||||
return &ValidationError{Name: "march", err: fmt.Errorf(`ent: validator failed for field "DbPackage.march": %w`, err)}
|
||||
return &ValidationError{Name: "march", err: fmt.Errorf(`ent: validator failed for field "DBPackage.march": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := dpc.mutation.Lto(); ok {
|
||||
if err := dbpackage.LtoValidator(v); err != nil {
|
||||
return &ValidationError{Name: "lto", err: fmt.Errorf(`ent: validator failed for field "DbPackage.lto": %w`, err)}
|
||||
return &ValidationError{Name: "lto", err: fmt.Errorf(`ent: validator failed for field "DBPackage.lto": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := dpc.mutation.DebugSymbols(); ok {
|
||||
if err := dbpackage.DebugSymbolsValidator(v); err != nil {
|
||||
return &ValidationError{Name: "debug_symbols", err: fmt.Errorf(`ent: validator failed for field "DbPackage.debug_symbols": %w`, err)}
|
||||
return &ValidationError{Name: "debug_symbols", err: fmt.Errorf(`ent: validator failed for field "DBPackage.debug_symbols": %w`, err)}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dpc *DbPackageCreate) sqlSave(ctx context.Context) (*DbPackage, error) {
|
||||
func (dpc *DBPackageCreate) sqlSave(ctx context.Context) (*DBPackage, error) {
|
||||
if err := dpc.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -421,9 +379,9 @@ func (dpc *DbPackageCreate) sqlSave(ctx context.Context) (*DbPackage, error) {
|
||||
return _node, nil
|
||||
}
|
||||
|
||||
func (dpc *DbPackageCreate) createSpec() (*DbPackage, *sqlgraph.CreateSpec) {
|
||||
func (dpc *DBPackageCreate) createSpec() (*DBPackage, *sqlgraph.CreateSpec) {
|
||||
var (
|
||||
_node = &DbPackage{config: dpc.config}
|
||||
_node = &DBPackage{config: dpc.config}
|
||||
_spec = sqlgraph.NewCreateSpec(dbpackage.Table, sqlgraph.NewFieldSpec(dbpackage.FieldID, field.TypeInt))
|
||||
)
|
||||
if value, ok := dpc.mutation.Pkgbase(); ok {
|
||||
@ -466,10 +424,6 @@ func (dpc *DbPackageCreate) createSpec() (*DbPackage, *sqlgraph.CreateSpec) {
|
||||
_spec.SetField(dbpackage.FieldUpdated, field.TypeTime, value)
|
||||
_node.Updated = value
|
||||
}
|
||||
if value, ok := dpc.mutation.Hash(); ok {
|
||||
_spec.SetField(dbpackage.FieldHash, field.TypeString, value)
|
||||
_node.Hash = value
|
||||
}
|
||||
if value, ok := dpc.mutation.Lto(); ok {
|
||||
_spec.SetField(dbpackage.FieldLto, field.TypeEnum, value)
|
||||
_node.Lto = value
|
||||
@ -506,38 +460,30 @@ func (dpc *DbPackageCreate) createSpec() (*DbPackage, *sqlgraph.CreateSpec) {
|
||||
_spec.SetField(dbpackage.FieldIoOut, field.TypeInt64, value)
|
||||
_node.IoOut = &value
|
||||
}
|
||||
if value, ok := dpc.mutation.Srcinfo(); ok {
|
||||
_spec.SetField(dbpackage.FieldSrcinfo, field.TypeString, value)
|
||||
_node.Srcinfo = &value
|
||||
}
|
||||
if value, ok := dpc.mutation.SrcinfoHash(); ok {
|
||||
_spec.SetField(dbpackage.FieldSrcinfoHash, field.TypeString, value)
|
||||
_node.SrcinfoHash = value
|
||||
}
|
||||
if value, ok := dpc.mutation.Pkgbuild(); ok {
|
||||
_spec.SetField(dbpackage.FieldPkgbuild, field.TypeString, value)
|
||||
_node.Pkgbuild = value
|
||||
if value, ok := dpc.mutation.TagRev(); ok {
|
||||
_spec.SetField(dbpackage.FieldTagRev, field.TypeString, value)
|
||||
_node.TagRev = &value
|
||||
}
|
||||
return _node, _spec
|
||||
}
|
||||
|
||||
// DbPackageCreateBulk is the builder for creating many DbPackage entities in bulk.
|
||||
type DbPackageCreateBulk struct {
|
||||
// DBPackageCreateBulk is the builder for creating many DBPackage entities in bulk.
|
||||
type DBPackageCreateBulk struct {
|
||||
config
|
||||
builders []*DbPackageCreate
|
||||
builders []*DBPackageCreate
|
||||
}
|
||||
|
||||
// Save creates the DbPackage entities in the database.
|
||||
func (dpcb *DbPackageCreateBulk) Save(ctx context.Context) ([]*DbPackage, error) {
|
||||
// Save creates the DBPackage entities in the database.
|
||||
func (dpcb *DBPackageCreateBulk) Save(ctx context.Context) ([]*DBPackage, error) {
|
||||
specs := make([]*sqlgraph.CreateSpec, len(dpcb.builders))
|
||||
nodes := make([]*DbPackage, len(dpcb.builders))
|
||||
nodes := make([]*DBPackage, len(dpcb.builders))
|
||||
mutators := make([]Mutator, len(dpcb.builders))
|
||||
for i := range dpcb.builders {
|
||||
func(i int, root context.Context) {
|
||||
builder := dpcb.builders[i]
|
||||
builder.defaults()
|
||||
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
||||
mutation, ok := m.(*DbPackageMutation)
|
||||
mutation, ok := m.(*DBPackageMutation)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
||||
}
|
||||
@ -584,7 +530,7 @@ func (dpcb *DbPackageCreateBulk) Save(ctx context.Context) ([]*DbPackage, error)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (dpcb *DbPackageCreateBulk) SaveX(ctx context.Context) []*DbPackage {
|
||||
func (dpcb *DBPackageCreateBulk) SaveX(ctx context.Context) []*DBPackage {
|
||||
v, err := dpcb.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@ -593,13 +539,13 @@ func (dpcb *DbPackageCreateBulk) SaveX(ctx context.Context) []*DbPackage {
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (dpcb *DbPackageCreateBulk) Exec(ctx context.Context) error {
|
||||
func (dpcb *DBPackageCreateBulk) Exec(ctx context.Context) error {
|
||||
_, err := dpcb.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (dpcb *DbPackageCreateBulk) ExecX(ctx context.Context) {
|
||||
func (dpcb *DBPackageCreateBulk) ExecX(ctx context.Context) {
|
||||
if err := dpcb.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -12,26 +12,26 @@ import (
|
||||
"somegit.dev/ALHP/ALHP.GO/ent/predicate"
|
||||
)
|
||||
|
||||
// DbPackageDelete is the builder for deleting a DbPackage entity.
|
||||
type DbPackageDelete struct {
|
||||
// DBPackageDelete is the builder for deleting a DBPackage entity.
|
||||
type DBPackageDelete struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *DbPackageMutation
|
||||
mutation *DBPackageMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the DbPackageDelete builder.
|
||||
func (dpd *DbPackageDelete) Where(ps ...predicate.DbPackage) *DbPackageDelete {
|
||||
// Where appends a list predicates to the DBPackageDelete builder.
|
||||
func (dpd *DBPackageDelete) Where(ps ...predicate.DBPackage) *DBPackageDelete {
|
||||
dpd.mutation.Where(ps...)
|
||||
return dpd
|
||||
}
|
||||
|
||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||
func (dpd *DbPackageDelete) Exec(ctx context.Context) (int, error) {
|
||||
func (dpd *DBPackageDelete) Exec(ctx context.Context) (int, error) {
|
||||
return withHooks(ctx, dpd.sqlExec, dpd.mutation, dpd.hooks)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (dpd *DbPackageDelete) ExecX(ctx context.Context) int {
|
||||
func (dpd *DBPackageDelete) ExecX(ctx context.Context) int {
|
||||
n, err := dpd.Exec(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@ -39,7 +39,7 @@ func (dpd *DbPackageDelete) ExecX(ctx context.Context) int {
|
||||
return n
|
||||
}
|
||||
|
||||
func (dpd *DbPackageDelete) sqlExec(ctx context.Context) (int, error) {
|
||||
func (dpd *DBPackageDelete) sqlExec(ctx context.Context) (int, error) {
|
||||
_spec := sqlgraph.NewDeleteSpec(dbpackage.Table, sqlgraph.NewFieldSpec(dbpackage.FieldID, field.TypeInt))
|
||||
if ps := dpd.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
@ -56,19 +56,19 @@ func (dpd *DbPackageDelete) sqlExec(ctx context.Context) (int, error) {
|
||||
return affected, err
|
||||
}
|
||||
|
||||
// DbPackageDeleteOne is the builder for deleting a single DbPackage entity.
|
||||
type DbPackageDeleteOne struct {
|
||||
dpd *DbPackageDelete
|
||||
// DBPackageDeleteOne is the builder for deleting a single DBPackage entity.
|
||||
type DBPackageDeleteOne struct {
|
||||
dpd *DBPackageDelete
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the DbPackageDelete builder.
|
||||
func (dpdo *DbPackageDeleteOne) Where(ps ...predicate.DbPackage) *DbPackageDeleteOne {
|
||||
// Where appends a list predicates to the DBPackageDelete builder.
|
||||
func (dpdo *DBPackageDeleteOne) Where(ps ...predicate.DBPackage) *DBPackageDeleteOne {
|
||||
dpdo.dpd.mutation.Where(ps...)
|
||||
return dpdo
|
||||
}
|
||||
|
||||
// Exec executes the deletion query.
|
||||
func (dpdo *DbPackageDeleteOne) Exec(ctx context.Context) error {
|
||||
func (dpdo *DBPackageDeleteOne) Exec(ctx context.Context) error {
|
||||
n, err := dpdo.dpd.Exec(ctx)
|
||||
switch {
|
||||
case err != nil:
|
||||
@ -81,7 +81,7 @@ func (dpdo *DbPackageDeleteOne) Exec(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (dpdo *DbPackageDeleteOne) ExecX(ctx context.Context) {
|
||||
func (dpdo *DBPackageDeleteOne) ExecX(ctx context.Context) {
|
||||
if err := dpdo.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -14,53 +14,53 @@ import (
|
||||
"somegit.dev/ALHP/ALHP.GO/ent/predicate"
|
||||
)
|
||||
|
||||
// DbPackageQuery is the builder for querying DbPackage entities.
|
||||
type DbPackageQuery struct {
|
||||
// DBPackageQuery is the builder for querying DBPackage entities.
|
||||
type DBPackageQuery struct {
|
||||
config
|
||||
ctx *QueryContext
|
||||
order []dbpackage.OrderOption
|
||||
inters []Interceptor
|
||||
predicates []predicate.DbPackage
|
||||
predicates []predicate.DBPackage
|
||||
modifiers []func(*sql.Selector)
|
||||
// intermediate query (i.e. traversal path).
|
||||
sql *sql.Selector
|
||||
path func(context.Context) (*sql.Selector, error)
|
||||
}
|
||||
|
||||
// Where adds a new predicate for the DbPackageQuery builder.
|
||||
func (dpq *DbPackageQuery) Where(ps ...predicate.DbPackage) *DbPackageQuery {
|
||||
// Where adds a new predicate for the DBPackageQuery builder.
|
||||
func (dpq *DBPackageQuery) Where(ps ...predicate.DBPackage) *DBPackageQuery {
|
||||
dpq.predicates = append(dpq.predicates, ps...)
|
||||
return dpq
|
||||
}
|
||||
|
||||
// Limit the number of records to be returned by this query.
|
||||
func (dpq *DbPackageQuery) Limit(limit int) *DbPackageQuery {
|
||||
func (dpq *DBPackageQuery) Limit(limit int) *DBPackageQuery {
|
||||
dpq.ctx.Limit = &limit
|
||||
return dpq
|
||||
}
|
||||
|
||||
// Offset to start from.
|
||||
func (dpq *DbPackageQuery) Offset(offset int) *DbPackageQuery {
|
||||
func (dpq *DBPackageQuery) Offset(offset int) *DBPackageQuery {
|
||||
dpq.ctx.Offset = &offset
|
||||
return dpq
|
||||
}
|
||||
|
||||
// Unique configures the query builder to filter duplicate records on query.
|
||||
// By default, unique is set to true, and can be disabled using this method.
|
||||
func (dpq *DbPackageQuery) Unique(unique bool) *DbPackageQuery {
|
||||
func (dpq *DBPackageQuery) Unique(unique bool) *DBPackageQuery {
|
||||
dpq.ctx.Unique = &unique
|
||||
return dpq
|
||||
}
|
||||
|
||||
// Order specifies how the records should be ordered.
|
||||
func (dpq *DbPackageQuery) Order(o ...dbpackage.OrderOption) *DbPackageQuery {
|
||||
func (dpq *DBPackageQuery) Order(o ...dbpackage.OrderOption) *DBPackageQuery {
|
||||
dpq.order = append(dpq.order, o...)
|
||||
return dpq
|
||||
}
|
||||
|
||||
// First returns the first DbPackage entity from the query.
|
||||
// Returns a *NotFoundError when no DbPackage was found.
|
||||
func (dpq *DbPackageQuery) First(ctx context.Context) (*DbPackage, error) {
|
||||
// First returns the first DBPackage entity from the query.
|
||||
// Returns a *NotFoundError when no DBPackage was found.
|
||||
func (dpq *DBPackageQuery) First(ctx context.Context) (*DBPackage, error) {
|
||||
nodes, err := dpq.Limit(1).All(setContextOp(ctx, dpq.ctx, "First"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -72,7 +72,7 @@ func (dpq *DbPackageQuery) First(ctx context.Context) (*DbPackage, error) {
|
||||
}
|
||||
|
||||
// FirstX is like First, but panics if an error occurs.
|
||||
func (dpq *DbPackageQuery) FirstX(ctx context.Context) *DbPackage {
|
||||
func (dpq *DBPackageQuery) FirstX(ctx context.Context) *DBPackage {
|
||||
node, err := dpq.First(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
@ -80,9 +80,9 @@ func (dpq *DbPackageQuery) FirstX(ctx context.Context) *DbPackage {
|
||||
return node
|
||||
}
|
||||
|
||||
// FirstID returns the first DbPackage ID from the query.
|
||||
// Returns a *NotFoundError when no DbPackage ID was found.
|
||||
func (dpq *DbPackageQuery) FirstID(ctx context.Context) (id int, err error) {
|
||||
// FirstID returns the first DBPackage ID from the query.
|
||||
// Returns a *NotFoundError when no DBPackage ID was found.
|
||||
func (dpq *DBPackageQuery) FirstID(ctx context.Context) (id int, err error) {
|
||||
var ids []int
|
||||
if ids, err = dpq.Limit(1).IDs(setContextOp(ctx, dpq.ctx, "FirstID")); err != nil {
|
||||
return
|
||||
@ -95,7 +95,7 @@ func (dpq *DbPackageQuery) FirstID(ctx context.Context) (id int, err error) {
|
||||
}
|
||||
|
||||
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||
func (dpq *DbPackageQuery) FirstIDX(ctx context.Context) int {
|
||||
func (dpq *DBPackageQuery) FirstIDX(ctx context.Context) int {
|
||||
id, err := dpq.FirstID(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
@ -103,10 +103,10 @@ func (dpq *DbPackageQuery) FirstIDX(ctx context.Context) int {
|
||||
return id
|
||||
}
|
||||
|
||||
// Only returns a single DbPackage entity found by the query, ensuring it only returns one.
|
||||
// Returns a *NotSingularError when more than one DbPackage entity is found.
|
||||
// Returns a *NotFoundError when no DbPackage entities are found.
|
||||
func (dpq *DbPackageQuery) Only(ctx context.Context) (*DbPackage, error) {
|
||||
// Only returns a single DBPackage entity found by the query, ensuring it only returns one.
|
||||
// Returns a *NotSingularError when more than one DBPackage entity is found.
|
||||
// Returns a *NotFoundError when no DBPackage entities are found.
|
||||
func (dpq *DBPackageQuery) Only(ctx context.Context) (*DBPackage, error) {
|
||||
nodes, err := dpq.Limit(2).All(setContextOp(ctx, dpq.ctx, "Only"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -122,7 +122,7 @@ func (dpq *DbPackageQuery) Only(ctx context.Context) (*DbPackage, error) {
|
||||
}
|
||||
|
||||
// OnlyX is like Only, but panics if an error occurs.
|
||||
func (dpq *DbPackageQuery) OnlyX(ctx context.Context) *DbPackage {
|
||||
func (dpq *DBPackageQuery) OnlyX(ctx context.Context) *DBPackage {
|
||||
node, err := dpq.Only(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@ -130,10 +130,10 @@ func (dpq *DbPackageQuery) OnlyX(ctx context.Context) *DbPackage {
|
||||
return node
|
||||
}
|
||||
|
||||
// OnlyID is like Only, but returns the only DbPackage ID in the query.
|
||||
// Returns a *NotSingularError when more than one DbPackage ID is found.
|
||||
// OnlyID is like Only, but returns the only DBPackage ID in the query.
|
||||
// Returns a *NotSingularError when more than one DBPackage ID is found.
|
||||
// Returns a *NotFoundError when no entities are found.
|
||||
func (dpq *DbPackageQuery) OnlyID(ctx context.Context) (id int, err error) {
|
||||
func (dpq *DBPackageQuery) OnlyID(ctx context.Context) (id int, err error) {
|
||||
var ids []int
|
||||
if ids, err = dpq.Limit(2).IDs(setContextOp(ctx, dpq.ctx, "OnlyID")); err != nil {
|
||||
return
|
||||
@ -150,7 +150,7 @@ func (dpq *DbPackageQuery) OnlyID(ctx context.Context) (id int, err error) {
|
||||
}
|
||||
|
||||
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||
func (dpq *DbPackageQuery) OnlyIDX(ctx context.Context) int {
|
||||
func (dpq *DBPackageQuery) OnlyIDX(ctx context.Context) int {
|
||||
id, err := dpq.OnlyID(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@ -158,18 +158,18 @@ func (dpq *DbPackageQuery) OnlyIDX(ctx context.Context) int {
|
||||
return id
|
||||
}
|
||||
|
||||
// All executes the query and returns a list of DbPackages.
|
||||
func (dpq *DbPackageQuery) All(ctx context.Context) ([]*DbPackage, error) {
|
||||
// All executes the query and returns a list of DBPackages.
|
||||
func (dpq *DBPackageQuery) All(ctx context.Context) ([]*DBPackage, error) {
|
||||
ctx = setContextOp(ctx, dpq.ctx, "All")
|
||||
if err := dpq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qr := querierAll[[]*DbPackage, *DbPackageQuery]()
|
||||
return withInterceptors[[]*DbPackage](ctx, dpq, qr, dpq.inters)
|
||||
qr := querierAll[[]*DBPackage, *DBPackageQuery]()
|
||||
return withInterceptors[[]*DBPackage](ctx, dpq, qr, dpq.inters)
|
||||
}
|
||||
|
||||
// AllX is like All, but panics if an error occurs.
|
||||
func (dpq *DbPackageQuery) AllX(ctx context.Context) []*DbPackage {
|
||||
func (dpq *DBPackageQuery) AllX(ctx context.Context) []*DBPackage {
|
||||
nodes, err := dpq.All(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@ -177,8 +177,8 @@ func (dpq *DbPackageQuery) AllX(ctx context.Context) []*DbPackage {
|
||||
return nodes
|
||||
}
|
||||
|
||||
// IDs executes the query and returns a list of DbPackage IDs.
|
||||
func (dpq *DbPackageQuery) IDs(ctx context.Context) (ids []int, err error) {
|
||||
// IDs executes the query and returns a list of DBPackage IDs.
|
||||
func (dpq *DBPackageQuery) IDs(ctx context.Context) (ids []int, err error) {
|
||||
if dpq.ctx.Unique == nil && dpq.path != nil {
|
||||
dpq.Unique(true)
|
||||
}
|
||||
@ -190,7 +190,7 @@ func (dpq *DbPackageQuery) IDs(ctx context.Context) (ids []int, err error) {
|
||||
}
|
||||
|
||||
// IDsX is like IDs, but panics if an error occurs.
|
||||
func (dpq *DbPackageQuery) IDsX(ctx context.Context) []int {
|
||||
func (dpq *DBPackageQuery) IDsX(ctx context.Context) []int {
|
||||
ids, err := dpq.IDs(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@ -199,16 +199,16 @@ func (dpq *DbPackageQuery) IDsX(ctx context.Context) []int {
|
||||
}
|
||||
|
||||
// Count returns the count of the given query.
|
||||
func (dpq *DbPackageQuery) Count(ctx context.Context) (int, error) {
|
||||
func (dpq *DBPackageQuery) Count(ctx context.Context) (int, error) {
|
||||
ctx = setContextOp(ctx, dpq.ctx, "Count")
|
||||
if err := dpq.prepareQuery(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return withInterceptors[int](ctx, dpq, querierCount[*DbPackageQuery](), dpq.inters)
|
||||
return withInterceptors[int](ctx, dpq, querierCount[*DBPackageQuery](), dpq.inters)
|
||||
}
|
||||
|
||||
// CountX is like Count, but panics if an error occurs.
|
||||
func (dpq *DbPackageQuery) CountX(ctx context.Context) int {
|
||||
func (dpq *DBPackageQuery) CountX(ctx context.Context) int {
|
||||
count, err := dpq.Count(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@ -217,7 +217,7 @@ func (dpq *DbPackageQuery) CountX(ctx context.Context) int {
|
||||
}
|
||||
|
||||
// Exist returns true if the query has elements in the graph.
|
||||
func (dpq *DbPackageQuery) Exist(ctx context.Context) (bool, error) {
|
||||
func (dpq *DBPackageQuery) Exist(ctx context.Context) (bool, error) {
|
||||
ctx = setContextOp(ctx, dpq.ctx, "Exist")
|
||||
switch _, err := dpq.FirstID(ctx); {
|
||||
case IsNotFound(err):
|
||||
@ -230,7 +230,7 @@ func (dpq *DbPackageQuery) Exist(ctx context.Context) (bool, error) {
|
||||
}
|
||||
|
||||
// ExistX is like Exist, but panics if an error occurs.
|
||||
func (dpq *DbPackageQuery) ExistX(ctx context.Context) bool {
|
||||
func (dpq *DBPackageQuery) ExistX(ctx context.Context) bool {
|
||||
exist, err := dpq.Exist(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@ -238,18 +238,18 @@ func (dpq *DbPackageQuery) ExistX(ctx context.Context) bool {
|
||||
return exist
|
||||
}
|
||||
|
||||
// Clone returns a duplicate of the DbPackageQuery builder, including all associated steps. It can be
|
||||
// Clone returns a duplicate of the DBPackageQuery builder, including all associated steps. It can be
|
||||
// used to prepare common query builders and use them differently after the clone is made.
|
||||
func (dpq *DbPackageQuery) Clone() *DbPackageQuery {
|
||||
func (dpq *DBPackageQuery) Clone() *DBPackageQuery {
|
||||
if dpq == nil {
|
||||
return nil
|
||||
}
|
||||
return &DbPackageQuery{
|
||||
return &DBPackageQuery{
|
||||
config: dpq.config,
|
||||
ctx: dpq.ctx.Clone(),
|
||||
order: append([]dbpackage.OrderOption{}, dpq.order...),
|
||||
inters: append([]Interceptor{}, dpq.inters...),
|
||||
predicates: append([]predicate.DbPackage{}, dpq.predicates...),
|
||||
predicates: append([]predicate.DBPackage{}, dpq.predicates...),
|
||||
// clone intermediate query.
|
||||
sql: dpq.sql.Clone(),
|
||||
path: dpq.path,
|
||||
@ -266,13 +266,13 @@ func (dpq *DbPackageQuery) Clone() *DbPackageQuery {
|
||||
// Count int `json:"count,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.DbPackage.Query().
|
||||
// client.DBPackage.Query().
|
||||
// GroupBy(dbpackage.FieldPkgbase).
|
||||
// Aggregate(ent.Count()).
|
||||
// Scan(ctx, &v)
|
||||
func (dpq *DbPackageQuery) GroupBy(field string, fields ...string) *DbPackageGroupBy {
|
||||
func (dpq *DBPackageQuery) GroupBy(field string, fields ...string) *DBPackageGroupBy {
|
||||
dpq.ctx.Fields = append([]string{field}, fields...)
|
||||
grbuild := &DbPackageGroupBy{build: dpq}
|
||||
grbuild := &DBPackageGroupBy{build: dpq}
|
||||
grbuild.flds = &dpq.ctx.Fields
|
||||
grbuild.label = dbpackage.Label
|
||||
grbuild.scan = grbuild.Scan
|
||||
@ -288,23 +288,23 @@ func (dpq *DbPackageQuery) GroupBy(field string, fields ...string) *DbPackageGro
|
||||
// Pkgbase string `json:"pkgbase,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.DbPackage.Query().
|
||||
// client.DBPackage.Query().
|
||||
// Select(dbpackage.FieldPkgbase).
|
||||
// Scan(ctx, &v)
|
||||
func (dpq *DbPackageQuery) Select(fields ...string) *DbPackageSelect {
|
||||
func (dpq *DBPackageQuery) Select(fields ...string) *DBPackageSelect {
|
||||
dpq.ctx.Fields = append(dpq.ctx.Fields, fields...)
|
||||
sbuild := &DbPackageSelect{DbPackageQuery: dpq}
|
||||
sbuild := &DBPackageSelect{DBPackageQuery: dpq}
|
||||
sbuild.label = dbpackage.Label
|
||||
sbuild.flds, sbuild.scan = &dpq.ctx.Fields, sbuild.Scan
|
||||
return sbuild
|
||||
}
|
||||
|
||||
// Aggregate returns a DbPackageSelect configured with the given aggregations.
|
||||
func (dpq *DbPackageQuery) Aggregate(fns ...AggregateFunc) *DbPackageSelect {
|
||||
// Aggregate returns a DBPackageSelect configured with the given aggregations.
|
||||
func (dpq *DBPackageQuery) Aggregate(fns ...AggregateFunc) *DBPackageSelect {
|
||||
return dpq.Select().Aggregate(fns...)
|
||||
}
|
||||
|
||||
func (dpq *DbPackageQuery) prepareQuery(ctx context.Context) error {
|
||||
func (dpq *DBPackageQuery) prepareQuery(ctx context.Context) error {
|
||||
for _, inter := range dpq.inters {
|
||||
if inter == nil {
|
||||
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||
@ -330,16 +330,16 @@ func (dpq *DbPackageQuery) prepareQuery(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dpq *DbPackageQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*DbPackage, error) {
|
||||
func (dpq *DBPackageQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*DBPackage, error) {
|
||||
var (
|
||||
nodes = []*DbPackage{}
|
||||
nodes = []*DBPackage{}
|
||||
_spec = dpq.querySpec()
|
||||
)
|
||||
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||
return (*DbPackage).scanValues(nil, columns)
|
||||
return (*DBPackage).scanValues(nil, columns)
|
||||
}
|
||||
_spec.Assign = func(columns []string, values []any) error {
|
||||
node := &DbPackage{config: dpq.config}
|
||||
node := &DBPackage{config: dpq.config}
|
||||
nodes = append(nodes, node)
|
||||
return node.assignValues(columns, values)
|
||||
}
|
||||
@ -358,7 +358,7 @@ func (dpq *DbPackageQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*D
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (dpq *DbPackageQuery) sqlCount(ctx context.Context) (int, error) {
|
||||
func (dpq *DBPackageQuery) sqlCount(ctx context.Context) (int, error) {
|
||||
_spec := dpq.querySpec()
|
||||
if len(dpq.modifiers) > 0 {
|
||||
_spec.Modifiers = dpq.modifiers
|
||||
@ -370,7 +370,7 @@ func (dpq *DbPackageQuery) sqlCount(ctx context.Context) (int, error) {
|
||||
return sqlgraph.CountNodes(ctx, dpq.driver, _spec)
|
||||
}
|
||||
|
||||
func (dpq *DbPackageQuery) querySpec() *sqlgraph.QuerySpec {
|
||||
func (dpq *DBPackageQuery) querySpec() *sqlgraph.QuerySpec {
|
||||
_spec := sqlgraph.NewQuerySpec(dbpackage.Table, dbpackage.Columns, sqlgraph.NewFieldSpec(dbpackage.FieldID, field.TypeInt))
|
||||
_spec.From = dpq.sql
|
||||
if unique := dpq.ctx.Unique; unique != nil {
|
||||
@ -410,7 +410,7 @@ func (dpq *DbPackageQuery) querySpec() *sqlgraph.QuerySpec {
|
||||
return _spec
|
||||
}
|
||||
|
||||
func (dpq *DbPackageQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||
func (dpq *DBPackageQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||
builder := sql.Dialect(dpq.driver.Dialect())
|
||||
t1 := builder.Table(dbpackage.Table)
|
||||
columns := dpq.ctx.Fields
|
||||
@ -446,33 +446,33 @@ func (dpq *DbPackageQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||
}
|
||||
|
||||
// Modify adds a query modifier for attaching custom logic to queries.
|
||||
func (dpq *DbPackageQuery) Modify(modifiers ...func(s *sql.Selector)) *DbPackageSelect {
|
||||
func (dpq *DBPackageQuery) Modify(modifiers ...func(s *sql.Selector)) *DBPackageSelect {
|
||||
dpq.modifiers = append(dpq.modifiers, modifiers...)
|
||||
return dpq.Select()
|
||||
}
|
||||
|
||||
// DbPackageGroupBy is the group-by builder for DbPackage entities.
|
||||
type DbPackageGroupBy struct {
|
||||
// DBPackageGroupBy is the group-by builder for DBPackage entities.
|
||||
type DBPackageGroupBy struct {
|
||||
selector
|
||||
build *DbPackageQuery
|
||||
build *DBPackageQuery
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the group-by query.
|
||||
func (dpgb *DbPackageGroupBy) Aggregate(fns ...AggregateFunc) *DbPackageGroupBy {
|
||||
func (dpgb *DBPackageGroupBy) Aggregate(fns ...AggregateFunc) *DBPackageGroupBy {
|
||||
dpgb.fns = append(dpgb.fns, fns...)
|
||||
return dpgb
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (dpgb *DbPackageGroupBy) Scan(ctx context.Context, v any) error {
|
||||
func (dpgb *DBPackageGroupBy) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, dpgb.build.ctx, "GroupBy")
|
||||
if err := dpgb.build.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*DbPackageQuery, *DbPackageGroupBy](ctx, dpgb.build, dpgb, dpgb.build.inters, v)
|
||||
return scanWithInterceptors[*DBPackageQuery, *DBPackageGroupBy](ctx, dpgb.build, dpgb, dpgb.build.inters, v)
|
||||
}
|
||||
|
||||
func (dpgb *DbPackageGroupBy) sqlScan(ctx context.Context, root *DbPackageQuery, v any) error {
|
||||
func (dpgb *DBPackageGroupBy) sqlScan(ctx context.Context, root *DBPackageQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx).Select()
|
||||
aggregation := make([]string, 0, len(dpgb.fns))
|
||||
for _, fn := range dpgb.fns {
|
||||
@ -499,28 +499,28 @@ func (dpgb *DbPackageGroupBy) sqlScan(ctx context.Context, root *DbPackageQuery,
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
|
||||
// DbPackageSelect is the builder for selecting fields of DbPackage entities.
|
||||
type DbPackageSelect struct {
|
||||
*DbPackageQuery
|
||||
// DBPackageSelect is the builder for selecting fields of DBPackage entities.
|
||||
type DBPackageSelect struct {
|
||||
*DBPackageQuery
|
||||
selector
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the selector query.
|
||||
func (dps *DbPackageSelect) Aggregate(fns ...AggregateFunc) *DbPackageSelect {
|
||||
func (dps *DBPackageSelect) Aggregate(fns ...AggregateFunc) *DBPackageSelect {
|
||||
dps.fns = append(dps.fns, fns...)
|
||||
return dps
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (dps *DbPackageSelect) Scan(ctx context.Context, v any) error {
|
||||
func (dps *DBPackageSelect) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, dps.ctx, "Select")
|
||||
if err := dps.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*DbPackageQuery, *DbPackageSelect](ctx, dps.DbPackageQuery, dps, dps.inters, v)
|
||||
return scanWithInterceptors[*DBPackageQuery, *DBPackageSelect](ctx, dps.DBPackageQuery, dps, dps.inters, v)
|
||||
}
|
||||
|
||||
func (dps *DbPackageSelect) sqlScan(ctx context.Context, root *DbPackageQuery, v any) error {
|
||||
func (dps *DBPackageSelect) sqlScan(ctx context.Context, root *DBPackageQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx)
|
||||
aggregation := make([]string, 0, len(dps.fns))
|
||||
for _, fn := range dps.fns {
|
||||
@ -542,7 +542,7 @@ func (dps *DbPackageSelect) sqlScan(ctx context.Context, root *DbPackageQuery, v
|
||||
}
|
||||
|
||||
// Modify adds a query modifier for attaching custom logic to queries.
|
||||
func (dps *DbPackageSelect) Modify(modifiers ...func(s *sql.Selector)) *DbPackageSelect {
|
||||
func (dps *DBPackageSelect) Modify(modifiers ...func(s *sql.Selector)) *DBPackageSelect {
|
||||
dps.modifiers = append(dps.modifiers, modifiers...)
|
||||
return dps
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -9,16 +9,16 @@ import (
|
||||
"somegit.dev/ALHP/ALHP.GO/ent"
|
||||
)
|
||||
|
||||
// The DbPackageFunc type is an adapter to allow the use of ordinary
|
||||
// function as DbPackage mutator.
|
||||
type DbPackageFunc func(context.Context, *ent.DbPackageMutation) (ent.Value, error)
|
||||
// The DBPackageFunc type is an adapter to allow the use of ordinary
|
||||
// function as DBPackage mutator.
|
||||
type DBPackageFunc func(context.Context, *ent.DBPackageMutation) (ent.Value, error)
|
||||
|
||||
// Mutate calls f(ctx, m).
|
||||
func (f DbPackageFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||
if mv, ok := m.(*ent.DbPackageMutation); ok {
|
||||
func (f DBPackageFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||
if mv, ok := m.(*ent.DBPackageMutation); ok {
|
||||
return f(ctx, mv)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DbPackageMutation", m)
|
||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DBPackageMutation", m)
|
||||
}
|
||||
|
||||
// Condition is a hook condition function.
|
||||
|
@ -21,7 +21,6 @@ var (
|
||||
{Name: "repo_version", Type: field.TypeString, Nullable: true},
|
||||
{Name: "build_time_start", Type: field.TypeTime, Nullable: true},
|
||||
{Name: "updated", Type: field.TypeTime, Nullable: true},
|
||||
{Name: "hash", Type: field.TypeString, Nullable: true},
|
||||
{Name: "lto", Type: field.TypeEnum, Nullable: true, Enums: []string{"enabled", "unknown", "disabled", "auto_disabled"}, Default: "unknown"},
|
||||
{Name: "last_version_build", Type: field.TypeString, Nullable: true},
|
||||
{Name: "last_verified", Type: field.TypeTime, Nullable: true},
|
||||
@ -31,9 +30,7 @@ var (
|
||||
{Name: "s_time", Type: field.TypeInt64, Nullable: true},
|
||||
{Name: "io_in", Type: field.TypeInt64, Nullable: true},
|
||||
{Name: "io_out", Type: field.TypeInt64, Nullable: true},
|
||||
{Name: "srcinfo", Type: field.TypeString, Nullable: true, Size: 2147483647},
|
||||
{Name: "srcinfo_hash", Type: field.TypeString, Nullable: true},
|
||||
{Name: "pkgbuild", Type: field.TypeString, Nullable: true},
|
||||
{Name: "tag_rev", Type: field.TypeString, Nullable: true},
|
||||
}
|
||||
// DbPackagesTable holds the schema information for the "db_packages" table.
|
||||
DbPackagesTable = &schema.Table{
|
||||
|
737
ent/mutation.go
737
ent/mutation.go
File diff suppressed because it is too large
Load Diff
@ -6,5 +6,5 @@ import (
|
||||
"entgo.io/ent/dialect/sql"
|
||||
)
|
||||
|
||||
// DbPackage is the predicate function for dbpackage builders.
|
||||
type DbPackage func(*sql.Selector)
|
||||
// DBPackage is the predicate function for dbpackage builders.
|
||||
type DBPackage func(*sql.Selector)
|
||||
|
@ -11,7 +11,7 @@ import (
|
||||
// (default values, validators, hooks and policies) and stitches it
|
||||
// to their package variables.
|
||||
func init() {
|
||||
dbpackageFields := schema.DbPackage{}.Fields()
|
||||
dbpackageFields := schema.DBPackage{}.Fields()
|
||||
_ = dbpackageFields
|
||||
// dbpackageDescPkgbase is the schema descriptor for pkgbase field.
|
||||
dbpackageDescPkgbase := dbpackageFields[0].Descriptor()
|
||||
|
@ -5,13 +5,13 @@ import (
|
||||
"entgo.io/ent/schema/field"
|
||||
)
|
||||
|
||||
// DbPackage holds the schema definition for the DbPackage entity.
|
||||
type DbPackage struct {
|
||||
// DBPackage holds the schema definition for the DbPackage entity.
|
||||
type DBPackage struct {
|
||||
ent.Schema
|
||||
}
|
||||
|
||||
// Fields of the DbPackage.
|
||||
func (DbPackage) Fields() []ent.Field {
|
||||
// Fields of the DBPackage.
|
||||
func (DBPackage) Fields() []ent.Field {
|
||||
return []ent.Field{
|
||||
field.String("pkgbase").NotEmpty().Immutable(),
|
||||
field.Strings("packages").Optional(),
|
||||
@ -24,7 +24,6 @@ func (DbPackage) Fields() []ent.Field {
|
||||
field.String("repo_version").Optional(),
|
||||
field.Time("build_time_start").Optional(),
|
||||
field.Time("updated").Optional(),
|
||||
field.String("hash").Optional(),
|
||||
field.Enum("lto").Values("enabled", "unknown", "disabled", "auto_disabled").Default("unknown").Optional(),
|
||||
field.String("last_version_build").Optional(),
|
||||
field.Time("last_verified").Optional(),
|
||||
@ -34,13 +33,11 @@ func (DbPackage) Fields() []ent.Field {
|
||||
field.Int64("s_time").Optional().Nillable(),
|
||||
field.Int64("io_in").Optional().Nillable(),
|
||||
field.Int64("io_out").Optional().Nillable(),
|
||||
field.Text("srcinfo").Optional().Nillable(),
|
||||
field.String("srcinfo_hash").Optional(),
|
||||
field.String("pkgbuild").Optional(),
|
||||
field.String("tag_rev").Optional().Nillable(),
|
||||
}
|
||||
}
|
||||
|
||||
// Edges of the DbPackage.
|
||||
func (DbPackage) Edges() []ent.Edge {
|
||||
// Edges of the DBPackage.
|
||||
func (DBPackage) Edges() []ent.Edge {
|
||||
return nil
|
||||
}
|
||||
|
@ -12,8 +12,8 @@ import (
|
||||
// Tx is a transactional client that is created by calling Client.Tx().
|
||||
type Tx struct {
|
||||
config
|
||||
// DbPackage is the client for interacting with the DbPackage builders.
|
||||
DbPackage *DbPackageClient
|
||||
// DBPackage is the client for interacting with the DBPackage builders.
|
||||
DBPackage *DBPackageClient
|
||||
|
||||
// lazily loaded.
|
||||
client *Client
|
||||
@ -145,7 +145,7 @@ func (tx *Tx) Client() *Client {
|
||||
}
|
||||
|
||||
func (tx *Tx) init() {
|
||||
tx.DbPackage = NewDbPackageClient(tx.config)
|
||||
tx.DBPackage = NewDBPackageClient(tx.config)
|
||||
}
|
||||
|
||||
// txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation.
|
||||
@ -155,7 +155,7 @@ func (tx *Tx) init() {
|
||||
// of them in order to commit or rollback the transaction.
|
||||
//
|
||||
// If a closed transaction is embedded in one of the generated entities, and the entity
|
||||
// applies a query, for example: DbPackage.QueryXXX(), the query will be executed
|
||||
// applies a query, for example: DBPackage.QueryXXX(), the query will be executed
|
||||
// through the driver which created this transaction.
|
||||
//
|
||||
// Note that txDriver is not goroutine safe.
|
||||
|
8
go.mod
8
go.mod
@ -12,14 +12,13 @@ require (
|
||||
github.com/jackc/pgx/v4 v4.18.1
|
||||
github.com/otiai10/copy v1.11.0
|
||||
github.com/sethvargo/go-retry v0.2.4
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/sirupsen/logrus v1.9.2
|
||||
github.com/wercker/journalhook v0.0.0-20180428041537-5d0a5ae867b3
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
lukechampine.com/blake3 v1.1.7
|
||||
)
|
||||
|
||||
require (
|
||||
ariga.io/atlas v0.10.2-0.20230427182402-87a07dfb83bf // indirect
|
||||
ariga.io/atlas v0.11.0 // indirect
|
||||
github.com/agext/levenshtein v1.2.3 // indirect
|
||||
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect
|
||||
@ -33,10 +32,9 @@ require (
|
||||
github.com/jackc/pgproto3/v2 v2.3.2 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
|
||||
github.com/jackc/pgtype v1.14.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/zclconf/go-cty v1.13.1 // indirect
|
||||
golang.org/x/crypto v0.8.0 // indirect
|
||||
golang.org/x/crypto v0.9.0 // indirect
|
||||
golang.org/x/mod v0.10.0 // indirect
|
||||
golang.org/x/sys v0.8.0 // indirect
|
||||
golang.org/x/text v0.9.0 // indirect
|
||||
|
18
go.sum
18
go.sum
@ -1,5 +1,5 @@
|
||||
ariga.io/atlas v0.10.2-0.20230427182402-87a07dfb83bf h1:Tq2DRB39ZHScIwWACjPKLv5oEErv7zv6PBb5RTz5CKA=
|
||||
ariga.io/atlas v0.10.2-0.20230427182402-87a07dfb83bf/go.mod h1:+TR129FJZ5Lvzms6dvCeGWh1yR6hMvmXBhug4hrNIGk=
|
||||
ariga.io/atlas v0.11.0 h1:aGR7MzsUfmdlDYCpRErQeY2NSuRlPE0/q6drNE/5buM=
|
||||
ariga.io/atlas v0.11.0/go.mod h1:+TR129FJZ5Lvzms6dvCeGWh1yR6hMvmXBhug4hrNIGk=
|
||||
entgo.io/ent v0.12.3 h1:N5lO2EOrHpCH5HYfiMOCHYbo+oh5M8GjT0/cx5x6xkk=
|
||||
entgo.io/ent v0.12.3/go.mod h1:AigGGx+tbrBBYHAzGOg8ND661E5cxx1Uiu5o/otJ6Yg=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
@ -92,9 +92,6 @@ github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0f
|
||||
github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk=
|
||||
github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
@ -137,8 +134,8 @@ github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXY
|
||||
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
||||
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/sirupsen/logrus v1.9.2 h1:oxx1eChJGI6Uks2ZC4W1zpLlVgqB8ner4EuQwV4Ik1Y=
|
||||
github.com/sirupsen/logrus v1.9.2/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
@ -181,8 +178,8 @@ golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5y
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ=
|
||||
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
|
||||
golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
|
||||
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
@ -211,7 +208,6 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@ -257,5 +253,3 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0=
|
||||
lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=
|
||||
|
@ -15,7 +15,7 @@ import (
|
||||
func housekeeping(repo, march string, wg *sync.WaitGroup) error {
|
||||
defer wg.Done()
|
||||
fullRepo := repo + "-" + march
|
||||
log.Debugf("[%s] Start housekeeping", fullRepo)
|
||||
log.Debugf("[%s] start housekeeping", fullRepo)
|
||||
packages, err := Glob(filepath.Join(conf.Basedir.Repo, fullRepo, "/**/*.pkg.tar.zst"))
|
||||
if err != nil {
|
||||
return err
|
||||
@ -29,9 +29,9 @@ func housekeeping(repo, march string, wg *sync.WaitGroup) error {
|
||||
if ent.IsNotFound(err) {
|
||||
log.Infof("[HK] removing orphan %s->%s", fullRepo, filepath.Base(path))
|
||||
pkg := &ProtoPackage{
|
||||
FullRepo: mPackage.FullRepo(),
|
||||
FullRepo: *mPackage.FullRepo(),
|
||||
PkgFiles: []string{path},
|
||||
March: mPackage.MArch(),
|
||||
March: *mPackage.MArch(),
|
||||
}
|
||||
buildManager.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
|
||||
continue
|
||||
@ -43,22 +43,12 @@ func housekeeping(repo, march string, wg *sync.WaitGroup) error {
|
||||
pkg := &ProtoPackage{
|
||||
Pkgbase: dbPkg.Pkgbase,
|
||||
Repo: mPackage.Repo(),
|
||||
FullRepo: mPackage.FullRepo(),
|
||||
FullRepo: *mPackage.FullRepo(),
|
||||
DBPackage: dbPkg,
|
||||
March: mPackage.MArch(),
|
||||
Arch: mPackage.Arch(),
|
||||
March: *mPackage.MArch(),
|
||||
Arch: *mPackage.Arch(),
|
||||
}
|
||||
|
||||
var upstream string
|
||||
switch pkg.DBPackage.Repository {
|
||||
case dbpackage.RepositoryCore, dbpackage.RepositoryExtra:
|
||||
upstream = "upstream-core-extra"
|
||||
case dbpackage.RepositoryCommunity:
|
||||
upstream = "upstream-community"
|
||||
}
|
||||
pkg.Pkgbuild = filepath.Join(conf.Basedir.Work, upstreamDir, upstream, dbPkg.Pkgbase, "repos",
|
||||
pkg.DBPackage.Repository.String()+"-"+conf.Arch, "PKGBUILD")
|
||||
|
||||
// check if package is still part of repo
|
||||
dbs, err := alpmHandle.SyncDBs()
|
||||
if err != nil {
|
||||
@ -72,7 +62,7 @@ func housekeeping(repo, march string, wg *sync.WaitGroup) error {
|
||||
// package not found on mirror/db -> not part of any repo anymore
|
||||
log.Infof("[HK] %s->%s not included in repo", pkg.FullRepo, mPackage.Name())
|
||||
buildManager.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
|
||||
err = db.DbPackage.DeleteOne(pkg.DBPackage).Exec(context.Background())
|
||||
err = db.DBPackage.DeleteOne(pkg.DBPackage).Exec(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -100,7 +90,7 @@ func housekeeping(repo, march string, wg *sync.WaitGroup) error {
|
||||
repoVer, err := pkg.repoVersion()
|
||||
if err == nil && repoVer != dbPkg.RepoVersion {
|
||||
log.Infof("[HK] %s->%s update repoVersion %s->%s", pkg.FullRepo, pkg.Pkgbase, dbPkg.RepoVersion, repoVer)
|
||||
pkg.DBPackage, err = pkg.DBPackage.Update().SetRepoVersion(repoVer).ClearHash().Save(context.Background())
|
||||
pkg.DBPackage, err = pkg.DBPackage.Update().SetRepoVersion(repoVer).ClearTagRev().Save(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -108,7 +98,7 @@ func housekeeping(repo, march string, wg *sync.WaitGroup) error {
|
||||
}
|
||||
|
||||
// check all packages from db for existence
|
||||
dbPackages, err := db.DbPackage.Query().Where(
|
||||
dbPackages, err := db.DBPackage.Query().Where(
|
||||
dbpackage.And(
|
||||
dbpackage.RepositoryEQ(dbpackage.Repository(repo)),
|
||||
dbpackage.March(march),
|
||||
@ -130,9 +120,9 @@ func housekeeping(repo, march string, wg *sync.WaitGroup) error {
|
||||
|
||||
if !pkg.isAvailable(alpmHandle) {
|
||||
log.Infof("[HK] %s->%s not found on mirror, removing", pkg.FullRepo, pkg.Pkgbase)
|
||||
err = db.DbPackage.DeleteOne(dbPkg).Exec(context.Background())
|
||||
err = db.DBPackage.DeleteOne(dbPkg).Exec(context.Background())
|
||||
if err != nil {
|
||||
log.Errorf("[HK] Error deleting package %s: %v", dbPkg.Pkgbase, err)
|
||||
log.Errorf("[HK] error deleting package %s->%s: %v", pkg.FullRepo, dbPkg.Pkgbase, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
@ -165,7 +155,7 @@ func housekeeping(repo, march string, wg *sync.WaitGroup) error {
|
||||
}
|
||||
if len(missingSplits) > 0 {
|
||||
log.Infof("[HK] %s->%s missing split-package(s): %s", fullRepo, dbPkg.Pkgbase, missingSplits)
|
||||
pkg.DBPackage, err = pkg.DBPackage.Update().ClearRepoVersion().ClearHash().SetStatus(dbpackage.StatusQueued).Save(context.Background())
|
||||
pkg.DBPackage, err = pkg.DBPackage.Update().ClearRepoVersion().ClearTagRev().SetStatus(dbpackage.StatusQueued).Save(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -180,7 +170,7 @@ func housekeeping(repo, march string, wg *sync.WaitGroup) error {
|
||||
}
|
||||
case dbPkg.Status == dbpackage.StatusLatest && dbPkg.RepoVersion == "":
|
||||
log.Infof("[HK] reseting missing package %s->%s with no repo version", fullRepo, dbPkg.Pkgbase)
|
||||
err = dbPkg.Update().SetStatus(dbpackage.StatusQueued).ClearHash().ClearRepoVersion().Exec(context.Background())
|
||||
err = dbPkg.Update().SetStatus(dbpackage.StatusQueued).ClearTagRev().ClearRepoVersion().Exec(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -224,7 +214,7 @@ func logHK() error {
|
||||
continue
|
||||
}
|
||||
|
||||
pkgSkipped, err := db.DbPackage.Query().Where(
|
||||
pkgSkipped, err := db.DBPackage.Query().Where(
|
||||
dbpackage.Pkgbase(pkg.Pkgbase),
|
||||
dbpackage.March(pkg.March),
|
||||
dbpackage.StatusEQ(dbpackage.StatusSkipped),
|
||||
@ -245,8 +235,8 @@ func logHK() error {
|
||||
sLogContent := string(logContent)
|
||||
|
||||
if rePortError.MatchString(sLogContent) || reSigError.MatchString(sLogContent) || reDownloadError.MatchString(sLogContent) {
|
||||
rows, err := db.DbPackage.Update().Where(dbpackage.And(dbpackage.Pkgbase(pkg.Pkgbase), dbpackage.March(pkg.March),
|
||||
dbpackage.StatusEQ(dbpackage.StatusFailed))).ClearHash().SetStatus(dbpackage.StatusQueued).Save(context.Background())
|
||||
rows, err := db.DBPackage.Update().Where(dbpackage.And(dbpackage.Pkgbase(pkg.Pkgbase), dbpackage.March(pkg.March),
|
||||
dbpackage.StatusEQ(dbpackage.StatusFailed))).ClearTagRev().SetStatus(dbpackage.StatusQueued).Save(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -255,12 +245,12 @@ func logHK() error {
|
||||
log.Infof("[HK/%s/%s] fixable build-error detected, requeueing package (%d)", pkg.March, pkg.Pkgbase, rows)
|
||||
}
|
||||
} else if reLdError.MatchString(sLogContent) || reRustLTOError.MatchString(sLogContent) {
|
||||
rows, err := db.DbPackage.Update().Where(
|
||||
rows, err := db.DBPackage.Update().Where(
|
||||
dbpackage.Pkgbase(pkg.Pkgbase),
|
||||
dbpackage.March(pkg.March),
|
||||
dbpackage.StatusEQ(dbpackage.StatusFailed),
|
||||
dbpackage.LtoNotIn(dbpackage.LtoAutoDisabled, dbpackage.LtoDisabled),
|
||||
).ClearHash().SetStatus(dbpackage.StatusQueued).SetLto(dbpackage.LtoAutoDisabled).Save(context.Background())
|
||||
).ClearTagRev().SetStatus(dbpackage.StatusQueued).SetLto(dbpackage.LtoAutoDisabled).Save(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
30
main.go
30
main.go
@ -40,17 +40,17 @@ func main() {
|
||||
|
||||
confStr, err := os.ReadFile("config.yaml")
|
||||
if err != nil {
|
||||
log.Fatalf("Error reading config file: %v", err)
|
||||
log.Fatalf("error reading config file: %v", err)
|
||||
}
|
||||
|
||||
err = yaml.Unmarshal(confStr, &conf)
|
||||
if err != nil {
|
||||
log.Fatalf("Error parsing config file: %v", err)
|
||||
log.Fatalf("error parsing config file: %v", err)
|
||||
}
|
||||
|
||||
lvl, err := log.ParseLevel(conf.Logging.Level)
|
||||
if err != nil {
|
||||
log.Fatalf("Error parsing log level from config: %v", err)
|
||||
log.Fatalf("error parsing log level from config: %v", err)
|
||||
}
|
||||
log.SetLevel(lvl)
|
||||
if *journalLog {
|
||||
@ -59,18 +59,18 @@ func main() {
|
||||
|
||||
err = syscall.Setpriority(syscall.PRIO_PROCESS, 0, 5)
|
||||
if err != nil {
|
||||
log.Infof("Failed to drop priority: %v", err)
|
||||
log.Infof("failed to drop priority: %v", err)
|
||||
}
|
||||
|
||||
err = os.MkdirAll(conf.Basedir.Repo, 0o755)
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating repo dir: %v", err)
|
||||
log.Fatalf("error creating repo dir: %v", err)
|
||||
}
|
||||
|
||||
if conf.DB.Driver == "pgx" {
|
||||
pdb, err := sql.Open("pgx", conf.DB.ConnectTo)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to open database %s: %v", conf.DB.ConnectTo, err)
|
||||
log.Fatalf("failed to open database %s: %v", conf.DB.ConnectTo, err)
|
||||
}
|
||||
|
||||
drv := sql.OpenDB(dialect.Postgres, pdb.DB())
|
||||
@ -78,7 +78,7 @@ func main() {
|
||||
} else {
|
||||
db, err = ent.Open(conf.DB.Driver, conf.DB.ConnectTo)
|
||||
if err != nil {
|
||||
log.Panicf("Failed to open database %s: %v", conf.DB.ConnectTo, err)
|
||||
log.Panicf("failed to open database %s: %v", conf.DB.ConnectTo, err)
|
||||
}
|
||||
defer func(Client *ent.Client) {
|
||||
_ = Client.Close()
|
||||
@ -86,7 +86,7 @@ func main() {
|
||||
}
|
||||
|
||||
if err := db.Schema.Create(context.Background(), migrate.WithDropIndex(true), migrate.WithDropColumn(true)); err != nil {
|
||||
log.Panicf("Automigrate failed: %v", err)
|
||||
log.Panicf("automigrate failed: %v", err)
|
||||
}
|
||||
|
||||
buildManager = &BuildManager{
|
||||
@ -101,17 +101,17 @@ func main() {
|
||||
|
||||
err = setupChroot()
|
||||
if err != nil {
|
||||
log.Panicf("Unable to setup chroot: %v", err)
|
||||
log.Panicf("unable to setup chroot: %v", err)
|
||||
}
|
||||
err = syncMarchs()
|
||||
if err != nil {
|
||||
log.Panicf("Error syncing marchs: %v", err)
|
||||
log.Panicf("error syncing marchs: %v", err)
|
||||
}
|
||||
|
||||
alpmHandle, err = initALPM(filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot),
|
||||
filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot, "/var/lib/pacman"))
|
||||
if err != nil {
|
||||
log.Panicf("Error while ALPM-init: %v", err)
|
||||
log.Panicf("error while ALPM-init: %v", err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@ -129,20 +129,20 @@ killLoop:
|
||||
case <-reloadSignals:
|
||||
confStr, err := os.ReadFile("config.yaml")
|
||||
if err != nil {
|
||||
log.Panicf("Unable to open config: %v", err)
|
||||
log.Panicf("unable to open config: %v", err)
|
||||
}
|
||||
|
||||
err = yaml.Unmarshal(confStr, &conf)
|
||||
if err != nil {
|
||||
log.Panicf("Unable to parse config: %v", err)
|
||||
log.Panicf("unable to parse config: %v", err)
|
||||
}
|
||||
|
||||
lvl, err := log.ParseLevel(conf.Logging.Level)
|
||||
if err != nil {
|
||||
log.Panicf("Failure setting logging level: %v", err)
|
||||
log.Panicf("failure setting logging level: %v", err)
|
||||
}
|
||||
log.SetLevel(lvl)
|
||||
log.Infof("Config reloaded")
|
||||
log.Infof("config reloaded")
|
||||
}
|
||||
}
|
||||
|
||||
|
21
package.go
21
package.go
@ -22,9 +22,10 @@ func (pkg Package) Name() string {
|
||||
}
|
||||
|
||||
// MArch returns package's march
|
||||
func (pkg Package) MArch() string {
|
||||
func (pkg Package) MArch() *string {
|
||||
splitPath := strings.Split(string(pkg), string(filepath.Separator))
|
||||
return strings.Join(strings.Split(splitPath[len(splitPath)-4], "-")[1:], "-")
|
||||
res := strings.Join(strings.Split(splitPath[len(splitPath)-4], "-")[1:], "-")
|
||||
return &res
|
||||
}
|
||||
|
||||
// Repo returns package's dbpackage.Repository
|
||||
@ -34,9 +35,9 @@ func (pkg Package) Repo() dbpackage.Repository {
|
||||
}
|
||||
|
||||
// FullRepo returns package's dbpackage.Repository-march
|
||||
func (pkg Package) FullRepo() string {
|
||||
func (pkg Package) FullRepo() *string {
|
||||
splitPath := strings.Split(string(pkg), string(filepath.Separator))
|
||||
return splitPath[len(splitPath)-4]
|
||||
return &splitPath[len(splitPath)-4]
|
||||
}
|
||||
|
||||
// Version returns version extracted from package
|
||||
@ -46,10 +47,10 @@ func (pkg Package) Version() string {
|
||||
}
|
||||
|
||||
// Arch returns package's Architecture
|
||||
func (pkg Package) Arch() string {
|
||||
func (pkg Package) Arch() *string {
|
||||
fNameSplit := strings.Split(filepath.Base(string(pkg)), "-")
|
||||
fNameSplit = strings.Split(fNameSplit[len(fNameSplit)-1], ".")
|
||||
return fNameSplit[0]
|
||||
return &fNameSplit[0]
|
||||
}
|
||||
|
||||
// HasValidSignature returns if package has valid detached signature file
|
||||
@ -69,13 +70,13 @@ func (pkg Package) HasValidSignature() (bool, error) {
|
||||
}
|
||||
|
||||
// DBPackage returns ent.DBPackage for package
|
||||
func (pkg Package) DBPackage(db *ent.Client) (*ent.DbPackage, error) {
|
||||
return pkg.DBPackageIsolated(pkg.MArch(), pkg.Repo(), db)
|
||||
func (pkg Package) DBPackage(db *ent.Client) (*ent.DBPackage, error) {
|
||||
return pkg.DBPackageIsolated(*pkg.MArch(), pkg.Repo(), db)
|
||||
}
|
||||
|
||||
// DBPackageIsolated returns ent.DBPackage like DBPackage, but not relying on the path for march and repo
|
||||
func (pkg Package) DBPackageIsolated(march string, repo dbpackage.Repository, db *ent.Client) (*ent.DbPackage, error) {
|
||||
dbPkg, err := db.DbPackage.Query().Where(func(s *sql.Selector) {
|
||||
func (pkg Package) DBPackageIsolated(march string, repo dbpackage.Repository, db *ent.Client) (*ent.DBPackage, error) {
|
||||
dbPkg, err := db.DBPackage.Query().Where(func(s *sql.Selector) {
|
||||
s.Where(
|
||||
sql.And(
|
||||
sqljson.ValueContains(dbpackage.FieldPackages, pkg.Name()),
|
||||
|
25
pkgbuild.go
25
pkgbuild.go
@ -1,25 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type PKGBUILD string
|
||||
|
||||
// FullRepo returns full-repo from PKGBUILD'S path
|
||||
func (p PKGBUILD) FullRepo() string {
|
||||
sPkgbuild := strings.Split(string(p), string(filepath.Separator))
|
||||
return sPkgbuild[len(sPkgbuild)-2]
|
||||
}
|
||||
|
||||
// Repo returns repo from PKGBUILD's path
|
||||
func (p PKGBUILD) Repo() string {
|
||||
return strings.Split(p.FullRepo(), "-")[0]
|
||||
}
|
||||
|
||||
// PkgBase returns pkgbase from PKGBUILD's path
|
||||
func (p PKGBUILD) PkgBase() string {
|
||||
sPkgbuild := strings.Split(string(p), string(filepath.Separator))
|
||||
return sPkgbuild[len(sPkgbuild)-4]
|
||||
}
|
258
proto_package.go
258
proto_package.go
@ -25,7 +25,6 @@ import (
|
||||
|
||||
type ProtoPackage struct {
|
||||
Pkgbase string
|
||||
Pkgbuild string
|
||||
Srcinfo *srcinfo.Srcinfo
|
||||
Arch string
|
||||
PkgFiles []string
|
||||
@ -33,54 +32,46 @@ type ProtoPackage struct {
|
||||
March string
|
||||
FullRepo string
|
||||
Version string
|
||||
Hash string
|
||||
DBPackage *ent.DbPackage
|
||||
DBPackage *ent.DBPackage
|
||||
Pkgbuild string
|
||||
State *StateInfo
|
||||
}
|
||||
|
||||
func (p *ProtoPackage) isEligible(ctx context.Context) (bool, error) {
|
||||
if err := p.genSrcinfo(); err != nil {
|
||||
return false, fmt.Errorf("error generating SRCINFO: %w", err)
|
||||
}
|
||||
p.Version = constructVersion(p.Srcinfo.Pkgver, p.Srcinfo.Pkgrel, p.Srcinfo.Epoch)
|
||||
var (
|
||||
NotEligibleError = errors.New("package is not eligible")
|
||||
)
|
||||
|
||||
func (p *ProtoPackage) isEligible(ctx context.Context) (bool, error) {
|
||||
if !p.isAvailable(alpmHandle) {
|
||||
log.Debugf("[%s/%s] Not available on mirror, skipping build", p.FullRepo, p.Pkgbase)
|
||||
log.Debugf("[%s/%s] not available on mirror, skipping build", p.FullRepo, p.Pkgbase)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
p.toDBPackage(true)
|
||||
skipping := false
|
||||
switch {
|
||||
case Contains(p.Srcinfo.Arch, "any"):
|
||||
log.Debugf("Skipped %s: any-Package", p.Srcinfo.Pkgbase)
|
||||
log.Debugf("skipped %s: any-package", p.Srcinfo.Pkgbase)
|
||||
p.DBPackage.SkipReason = "arch = any"
|
||||
p.DBPackage.Status = dbpackage.StatusSkipped
|
||||
skipping = true
|
||||
case Contains(conf.Blacklist.Packages, p.Srcinfo.Pkgbase):
|
||||
log.Debugf("Skipped %s: blacklisted package", p.Srcinfo.Pkgbase)
|
||||
log.Debugf("skipped %s: blacklisted package", p.Pkgbase)
|
||||
p.DBPackage.SkipReason = "blacklisted"
|
||||
p.DBPackage.Status = dbpackage.StatusSkipped
|
||||
skipping = true
|
||||
case Contains(p.Srcinfo.MakeDepends, "ghc") || Contains(p.Srcinfo.MakeDepends, "haskell-ghc") ||
|
||||
Contains(p.Srcinfo.Depends, "ghc") || Contains(p.Srcinfo.Depends, "haskell-ghc"):
|
||||
log.Debugf("Skipped %s: haskell package", p.Srcinfo.Pkgbase)
|
||||
p.DBPackage.SkipReason = "blacklisted (haskell)"
|
||||
p.DBPackage.Status = dbpackage.StatusSkipped
|
||||
skipping = true
|
||||
case p.DBPackage.MaxRss != nil && datasize.ByteSize(*p.DBPackage.MaxRss)*datasize.KB > conf.Build.MemoryLimit:
|
||||
log.Debugf("Skipped %s: memory limit exceeded (%s)", p.Srcinfo.Pkgbase, datasize.ByteSize(*p.DBPackage.MaxRss)*datasize.KB)
|
||||
log.Debugf("skipped %s: memory limit exceeded (%s)", p.Srcinfo.Pkgbase, datasize.ByteSize(*p.DBPackage.MaxRss)*datasize.KB)
|
||||
p.DBPackage.SkipReason = "memory limit exceeded"
|
||||
p.DBPackage.Status = dbpackage.StatusSkipped
|
||||
skipping = true
|
||||
case p.isPkgFailed():
|
||||
log.Debugf("Skipped %s: failed build", p.Srcinfo.Pkgbase)
|
||||
log.Debugf("skipped %s: failed build", p.Srcinfo.Pkgbase)
|
||||
skipping = true
|
||||
}
|
||||
|
||||
if skipping {
|
||||
p.DBPackage = p.DBPackage.Update().SetUpdated(time.Now()).SetVersion(p.Version).
|
||||
SetPackages(packages2slice(p.Srcinfo.Packages)).SetStatus(p.DBPackage.Status).
|
||||
SetSkipReason(p.DBPackage.SkipReason).SetHash(p.Hash).SaveX(ctx)
|
||||
p.DBPackage = p.DBPackage.Update().SetUpdated(time.Now()).SetPackages(packages2slice(p.Srcinfo.Packages)).SetVersion(p.Version).SetStatus(p.DBPackage.Status).
|
||||
SetSkipReason(p.DBPackage.SkipReason).SetTagRev(p.State.TagRev).SaveX(ctx)
|
||||
return false, nil
|
||||
} else {
|
||||
p.DBPackage = p.DBPackage.Update().SetUpdated(time.Now()).SetPackages(packages2slice(p.Srcinfo.Packages)).SetVersion(p.Version).SaveX(ctx)
|
||||
@ -94,8 +85,8 @@ func (p *ProtoPackage) isEligible(ctx context.Context) (bool, error) {
|
||||
if err != nil {
|
||||
p.DBPackage = p.DBPackage.Update().ClearRepoVersion().SaveX(ctx)
|
||||
} else if err == nil && alpm.VerCmp(repoVer, p.Version) > 0 {
|
||||
log.Debugf("Skipped %s: Version in repo higher than in PKGBUILD (%s < %s)", p.Srcinfo.Pkgbase, p.Version, repoVer)
|
||||
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusLatest).ClearSkipReason().SetHash(p.Hash).SaveX(ctx)
|
||||
log.Debugf("skipped %s: version in repo higher than in PKGBUILD (%s < %s)", p.Srcinfo.Pkgbase, p.Version, repoVer)
|
||||
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusLatest).ClearSkipReason().SetTagRev(p.State.TagRev).SaveX(ctx)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@ -104,12 +95,12 @@ func (p *ProtoPackage) isEligible(ctx context.Context) (bool, error) {
|
||||
switch err.(type) {
|
||||
default:
|
||||
return false, fmt.Errorf("error solving deps: %w", err)
|
||||
case MultiplePKGBUILDError:
|
||||
log.Infof("Skipped %s: Multiple PKGBUILDs for dependency found: %v", p.Srcinfo.Pkgbase, err)
|
||||
case MultipleStateFilesError:
|
||||
log.Infof("skipped %s: Multiple PKGBUILDs for dependency found: %v", p.Srcinfo.Pkgbase, err)
|
||||
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusSkipped).SetSkipReason("multiple PKGBUILD for dep. found").SaveX(ctx)
|
||||
return false, err
|
||||
case UnableToSatisfyError:
|
||||
log.Infof("Skipped %s: unable to resolve dependencies: %v", p.Srcinfo.Pkgbase, err)
|
||||
log.Infof("skipped %s: unable to resolve dependencies: %v", p.Srcinfo.Pkgbase, err)
|
||||
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusSkipped).SetSkipReason("unable to resolve dependencies").SaveX(ctx)
|
||||
return false, err
|
||||
}
|
||||
@ -119,7 +110,7 @@ func (p *ProtoPackage) isEligible(ctx context.Context) (bool, error) {
|
||||
|
||||
if !isLatest {
|
||||
if local != nil {
|
||||
log.Infof("Delayed %s: not all dependencies are up to date (local: %s==%s, sync: %s==%s)",
|
||||
log.Infof("delayed %s: not all dependencies are up to date (local: %s==%s, sync: %s==%s)",
|
||||
p.Srcinfo.Pkgbase, local.Name(), local.Version(), local.Name(), syncVersion)
|
||||
p.DBPackage.Update().SetStatus(dbpackage.StatusDelayed).
|
||||
SetSkipReason(fmt.Sprintf("waiting for %s==%s", local.Name(), syncVersion)).ExecX(ctx)
|
||||
@ -133,7 +124,7 @@ func (p *ProtoPackage) isEligible(ctx context.Context) (bool, error) {
|
||||
return false, errors.New("overdue package waiting")
|
||||
}
|
||||
} else {
|
||||
log.Infof("Delayed %s: not all dependencies are up to date or resolvable", p.Srcinfo.Pkgbase)
|
||||
log.Infof("delayed %s: not all dependencies are up to date or resolvable", p.Srcinfo.Pkgbase)
|
||||
p.DBPackage.Update().SetStatus(dbpackage.StatusDelayed).SetSkipReason("waiting for mirror").ExecX(ctx)
|
||||
}
|
||||
return false, nil
|
||||
@ -146,22 +137,6 @@ func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) {
|
||||
start := time.Now().UTC()
|
||||
chroot := "build_" + uuid.New().String()
|
||||
|
||||
err := p.genSrcinfo()
|
||||
if err != nil {
|
||||
return time.Since(start), fmt.Errorf("error generating srcinfo: %w", err)
|
||||
}
|
||||
p.Version = constructVersion(p.Srcinfo.Pkgver, p.Srcinfo.Pkgrel, p.Srcinfo.Epoch)
|
||||
|
||||
log.Infof("[P] build starting: %s->%s->%s", p.FullRepo, p.Pkgbase, p.Version)
|
||||
|
||||
p.toDBPackage(true)
|
||||
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusBuilding).ClearSkipReason().SaveX(ctx)
|
||||
|
||||
err = p.importKeys()
|
||||
if err != nil {
|
||||
log.Warningf("[P] failed to import pgp keys for %s->%s->%s: %v", p.FullRepo, p.Pkgbase, p.Version, err)
|
||||
}
|
||||
|
||||
buildFolder, err := p.setupBuildDir()
|
||||
if err != nil {
|
||||
return time.Since(start), fmt.Errorf("error setting up build folder: %w", err)
|
||||
@ -175,6 +150,30 @@ func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) {
|
||||
}
|
||||
}()
|
||||
|
||||
err = p.genSrcinfo()
|
||||
if err != nil {
|
||||
return time.Since(start), fmt.Errorf("error generating srcinfo: %w", err)
|
||||
}
|
||||
p.Version = constructVersion(p.Srcinfo.Pkgver, p.Srcinfo.Pkgrel, p.Srcinfo.Epoch)
|
||||
|
||||
elig, err := p.isEligible(context.Background())
|
||||
if err != nil {
|
||||
log.Warningf("[QG] %s->%s: %v", p.FullRepo, p.Pkgbase, err)
|
||||
}
|
||||
|
||||
if !elig {
|
||||
return time.Since(start), NotEligibleError
|
||||
}
|
||||
|
||||
log.Infof("[P] build starting: %s->%s->%s", p.FullRepo, p.Pkgbase, p.Version)
|
||||
|
||||
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusBuilding).ClearSkipReason().SaveX(ctx)
|
||||
|
||||
err = p.importKeys()
|
||||
if err != nil {
|
||||
log.Warningf("[P] failed to import pgp keys for %s->%s->%s: %v", p.FullRepo, p.Pkgbase, p.Version, err)
|
||||
}
|
||||
|
||||
buildNo := 1
|
||||
versionSlice := strings.Split(p.DBPackage.LastVersionBuild, ".")
|
||||
if strings.Join(versionSlice[:len(versionSlice)-1], ".") == p.Version {
|
||||
@ -253,7 +252,7 @@ func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) {
|
||||
ClearIoIn().
|
||||
ClearUTime().
|
||||
ClearSTime().
|
||||
SetHash(p.Hash).
|
||||
SetTagRev(p.State.TagRev).
|
||||
ExecX(ctx)
|
||||
return time.Since(start), fmt.Errorf("build failed: exit code %d", cmd.ProcessState.ExitCode())
|
||||
}
|
||||
@ -308,7 +307,7 @@ func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) {
|
||||
SetLto(dbpackage.LtoEnabled).
|
||||
SetBuildTimeStart(start).
|
||||
SetLastVersionBuild(p.Version).
|
||||
SetHash(p.Hash).
|
||||
SetTagRev(p.State.TagRev).
|
||||
SetMaxRss(Rusage.Maxrss).
|
||||
SetIoOut(Rusage.Oublock).
|
||||
SetIoIn(Rusage.Inblock).
|
||||
@ -337,19 +336,16 @@ func (p *ProtoPackage) setupBuildDir() (string, error) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
files, err := filepath.Glob(filepath.Join(filepath.Dir(p.Pkgbuild), "*"))
|
||||
cmd := exec.Command("git", "clone", "--depth", "1", "--branch", p.State.TagVer,
|
||||
fmt.Sprintf("https://gitlab.archlinux.org/archlinux/packaging/packages/%s.git", p.Pkgbase), buildDir)
|
||||
res, err := cmd.CombinedOutput()
|
||||
log.Debug(string(res))
|
||||
if err != nil {
|
||||
log.Fatalf("error cloning package repo %s: %v", p.Pkgbase, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
err = copy.Copy(file, filepath.Join(buildDir, filepath.Base(file)))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
p.Pkgbuild = filepath.Join(buildDir, "PKGBUILD")
|
||||
|
||||
return buildDir, nil
|
||||
}
|
||||
|
||||
@ -444,8 +440,27 @@ func (p *ProtoPackage) isAvailable(h *alpm.Handle) bool {
|
||||
var pkg alpm.IPackage
|
||||
if p.Srcinfo != nil {
|
||||
pkg, err = dbs.FindSatisfier(p.Srcinfo.Packages[0].Pkgname)
|
||||
} else {
|
||||
} else if len(p.DBPackage.Packages) > 0 {
|
||||
pkg, err = dbs.FindSatisfier(p.DBPackage.Packages[0])
|
||||
} else {
|
||||
cmd := exec.Command("unbuffer", "pacsift", "--exact", "--base="+p.Pkgbase, "--repo="+p.Repo.String()) //nolint:gosec
|
||||
var res []byte
|
||||
res, err = cmd.CombinedOutput()
|
||||
log.Debug(string(res))
|
||||
if err != nil || len(res) == 0 {
|
||||
log.Warningf("error getting packages from pacsift for %s: %v", p.Pkgbase, err)
|
||||
buildManager.alpmMutex.Unlock()
|
||||
return false
|
||||
}
|
||||
|
||||
if len(strings.Split(strings.TrimSpace(string(res)), "\n")) > 0 {
|
||||
splitOut := strings.Split(strings.Split(strings.TrimSpace(string(res)), "\n")[0], "/")
|
||||
pkg, err = dbs.FindSatisfier(splitOut[1])
|
||||
} else {
|
||||
log.Warningf("error getting packages from pacsift for %s", p.Pkgbase)
|
||||
buildManager.alpmMutex.Unlock()
|
||||
return false
|
||||
}
|
||||
}
|
||||
buildManager.alpmMutex.Unlock()
|
||||
if err != nil {
|
||||
@ -467,27 +482,31 @@ func (p *ProtoPackage) isAvailable(h *alpm.Handle) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *ProtoPackage) SVN2GITVersion(h *alpm.Handle) (string, error) {
|
||||
if p.Pkgbuild == "" && p.Pkgbase == "" {
|
||||
func (p *ProtoPackage) GitVersion(h *alpm.Handle) (string, error) {
|
||||
if p.Pkgbase == "" {
|
||||
return "", fmt.Errorf("invalid arguments")
|
||||
}
|
||||
|
||||
pkgBuilds, _ := Glob(filepath.Join(conf.Basedir.Work, upstreamDir, "**/"+p.Pkgbase+"/repos/*/PKGBUILD"))
|
||||
stateFiles, _ := Glob(filepath.Join(conf.Basedir.Work, stateDir, "**/"+p.Pkgbase))
|
||||
|
||||
var fPkgbuilds []string
|
||||
for _, pkgbuild := range pkgBuilds {
|
||||
mPkgbuild := PKGBUILD(pkgbuild)
|
||||
if mPkgbuild.FullRepo() == "trunk" || containsSubStr(mPkgbuild.FullRepo(), conf.Blacklist.Repo) {
|
||||
var fStateFiles []string
|
||||
for _, stateFile := range stateFiles {
|
||||
_, subRepo, _, err := stateFileMeta(stateFile)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if !Contains(fPkgbuilds, pkgbuild) {
|
||||
fPkgbuilds = append(fPkgbuilds, pkgbuild)
|
||||
if subRepo != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if !Contains(fStateFiles, stateFile) {
|
||||
fStateFiles = append(fStateFiles, stateFile)
|
||||
}
|
||||
}
|
||||
|
||||
if len(fPkgbuilds) > 1 {
|
||||
log.Infof("%s: multiple PKGBUILD found, try resolving from mirror", p.Pkgbase)
|
||||
if len(fStateFiles) > 1 {
|
||||
log.Infof("%s: multiple statefiles found, try resolving from mirror", p.Pkgbase)
|
||||
dbs, err := h.SyncDBs()
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -501,54 +520,37 @@ func (p *ProtoPackage) SVN2GITVersion(h *alpm.Handle) (string, error) {
|
||||
}
|
||||
|
||||
pkgloop:
|
||||
for _, pkgbuild := range fPkgbuilds {
|
||||
repo := strings.Split(filepath.Base(filepath.Dir(pkgbuild)), "-")[0]
|
||||
upstreamA := strings.Split(filepath.Dir(pkgbuild), "/")
|
||||
upstream := upstreamA[len(upstreamA)-4]
|
||||
for _, stateFile := range fStateFiles {
|
||||
repo, _, _, err := stateFileMeta(stateFile)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
switch upstream {
|
||||
case "upstream-core-extra":
|
||||
if iPackage.DB().Name() == repo && (repo == "extra" || repo == "core") {
|
||||
fPkgbuilds = []string{pkgbuild}
|
||||
break pkgloop
|
||||
}
|
||||
case "upstream-community":
|
||||
if iPackage.DB().Name() == repo && repo == "community" {
|
||||
fPkgbuilds = []string{pkgbuild}
|
||||
break pkgloop
|
||||
}
|
||||
if iPackage.DB().Name() == repo {
|
||||
fStateFiles = []string{stateFile}
|
||||
break pkgloop
|
||||
}
|
||||
}
|
||||
|
||||
if len(fPkgbuilds) > 1 {
|
||||
return "", MultiplePKGBUILDError{fmt.Errorf("%s: multiple PKGBUILD found: %s", p.Pkgbase, fPkgbuilds)}
|
||||
if len(fStateFiles) > 1 {
|
||||
return "", MultipleStateFilesError{fmt.Errorf("%s: multiple statefiles found: %s", p.Pkgbase, fStateFiles)}
|
||||
}
|
||||
log.Infof("%s: resolving successful: MirrorRepo=%s; PKGBUILD chosen: %s", p.Pkgbase, iPackage.DB().Name(), fPkgbuilds[0])
|
||||
} else if len(fPkgbuilds) == 0 {
|
||||
return "", fmt.Errorf("%s: no matching PKGBUILD found (searched: %s, canidates: %s)", p.Pkgbase,
|
||||
filepath.Join(conf.Basedir.Work, upstreamDir, "**/"+p.Pkgbase+"/repos/*/PKGBUILD"), pkgBuilds)
|
||||
log.Infof("%s: resolving successful: MirrorRepo=%s; statefile chosen: %s", p.Pkgbase, iPackage.DB().Name(), fStateFiles[0])
|
||||
} else if len(fStateFiles) == 0 {
|
||||
return "", fmt.Errorf("%s: no matching statefile found (searched: %s, canidates: %s)", p.Pkgbase,
|
||||
filepath.Join(conf.Basedir.Work, stateDir, "**/"+p.Pkgbase), stateFiles)
|
||||
}
|
||||
|
||||
pPkg := PKGBUILD(fPkgbuilds[0])
|
||||
dbPkg, err := db.DbPackage.Query().Where(dbpackage.RepositoryEQ(dbpackage.Repository(pPkg.Repo())),
|
||||
dbpackage.March(p.March), dbpackage.Pkgbase(p.Pkgbase)).Only(context.Background())
|
||||
if err == nil {
|
||||
return dbPkg.Version, nil
|
||||
}
|
||||
|
||||
cmd := exec.Command("makepkg", "--printsrcinfo")
|
||||
cmd.Dir = filepath.Dir(fPkgbuilds[0])
|
||||
res, err := cmd.Output()
|
||||
rawState, err := os.ReadFile(fStateFiles[0])
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", fmt.Errorf("error reading statefile %s: %w", fStateFiles[0], err)
|
||||
}
|
||||
|
||||
info, err := srcinfo.Parse(string(res))
|
||||
state, err := parseState(string(rawState))
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", fmt.Errorf("error parsing statefile: %w", err)
|
||||
}
|
||||
|
||||
return constructVersion(info.Pkgver, info.Pkgrel, info.Epoch), nil
|
||||
return state.PkgVer, nil
|
||||
}
|
||||
|
||||
func (p *ProtoPackage) isPkgFailed() bool {
|
||||
@ -556,14 +558,6 @@ func (p *ProtoPackage) isPkgFailed() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
if err := p.genSrcinfo(); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if p.Version == "" {
|
||||
p.Version = constructVersion(p.Srcinfo.Pkgver, p.Srcinfo.Pkgrel, p.Srcinfo.Epoch)
|
||||
}
|
||||
|
||||
if alpm.VerCmp(p.DBPackage.Version, p.Version) < 0 {
|
||||
return false
|
||||
}
|
||||
@ -575,16 +569,7 @@ func (p *ProtoPackage) genSrcinfo() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
if p.DBPackage != nil && p.DBPackage.Srcinfo != nil {
|
||||
var err error
|
||||
p.Srcinfo, err = srcinfo.Parse(*p.DBPackage.Srcinfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
cmd := exec.Command("makepkg", "--printsrcinfo", "-p", filepath.Base(p.Pkgbuild)) //nolint:gosec
|
||||
cmd := exec.Command("makepkg", "--printsrcinfo", "-p", filepath.Base(p.Pkgbuild))
|
||||
cmd.Dir = filepath.Dir(p.Pkgbuild)
|
||||
res, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
@ -595,11 +580,7 @@ func (p *ProtoPackage) genSrcinfo() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.Srcinfo = info
|
||||
if p.DBPackage != nil {
|
||||
p.DBPackage = p.DBPackage.Update().SetSrcinfoHash(p.Hash).SetSrcinfo(string(res)).SaveX(context.Background())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -638,27 +619,32 @@ func (p *ProtoPackage) findPkgFiles() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *ProtoPackage) toDBPackage(create bool) {
|
||||
func (p *ProtoPackage) toDBPackage(create bool) error {
|
||||
if p.DBPackage != nil {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
dbPkg, err := db.DbPackage.Query().Where(dbpackage.And(dbpackage.Pkgbase(p.Pkgbase), dbpackage.March(p.March),
|
||||
dbpackage.RepositoryEQ(p.Repo))).Only(context.Background())
|
||||
if err != nil && create {
|
||||
dbPkg = db.DbPackage.Create().
|
||||
dbPkg, err := db.DBPackage.Query().Where(
|
||||
dbpackage.Pkgbase(p.Pkgbase),
|
||||
dbpackage.March(p.March),
|
||||
dbpackage.RepositoryEQ(p.Repo),
|
||||
).Only(context.Background())
|
||||
if err != nil && ent.IsNotFound(err) && create {
|
||||
dbPkg = db.DBPackage.Create().
|
||||
SetPkgbase(p.Pkgbase).
|
||||
SetMarch(p.March).
|
||||
SetPackages(packages2slice(p.Srcinfo.Packages)).
|
||||
SetRepository(p.Repo).
|
||||
SaveX(context.Background())
|
||||
} else if err != nil && !ent.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
p.DBPackage = dbPkg
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *ProtoPackage) exists() (bool, error) {
|
||||
dbPkg, err := db.DbPackage.Query().Where(dbpackage.And(dbpackage.Pkgbase(p.Pkgbase), dbpackage.March(p.March))).Exist(context.Background())
|
||||
dbPkg, err := db.DBPackage.Query().Where(dbpackage.And(dbpackage.Pkgbase(p.Pkgbase), dbpackage.March(p.March))).Exist(context.Background())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -692,7 +678,7 @@ func (p *ProtoPackage) isMirrorLatest(h *alpm.Handle) (latest bool, foundPkg alp
|
||||
svn2gitVer, err := (&ProtoPackage{
|
||||
Pkgbase: pkg.Base(),
|
||||
March: p.March,
|
||||
}).SVN2GITVersion(h)
|
||||
}).GitVersion(h)
|
||||
if err != nil {
|
||||
return false, nil, "", err
|
||||
} else if svn2gitVer == "" {
|
||||
|
95
utils.go
95
utils.go
@ -1,8 +1,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"github.com/Jguer/go-alpm/v2"
|
||||
paconf "github.com/Morganamilo/go-pacmanconf"
|
||||
@ -10,9 +8,7 @@ import (
|
||||
"github.com/c2h5oh/datasize"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gopkg.in/yaml.v2"
|
||||
"io"
|
||||
"io/fs"
|
||||
"lukechampine.com/blake3"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
@ -30,7 +26,7 @@ const (
|
||||
pristineChroot = "root"
|
||||
buildDir = "build"
|
||||
lastUpdate = "lastupdate"
|
||||
upstreamDir = "upstream"
|
||||
stateDir = "state"
|
||||
chrootDir = "chroot"
|
||||
makepkgDir = "makepkg"
|
||||
waitingDir = "to_be_moved"
|
||||
@ -43,8 +39,6 @@ var (
|
||||
reVar = regexp.MustCompile(`(?mU)^#?[^\S\r\n]*(\w+)[^\S\r\n]*=[^\S\r\n]*([("])([^)"]*)([)"])[^\S\r\n]*$`)
|
||||
reEnvClean = regexp.MustCompile(`(?m) ([\s\\]+) `)
|
||||
rePkgRel = regexp.MustCompile(`(?m)^pkgrel\s*=\s*(.+)$`)
|
||||
rePkgSource = regexp.MustCompile(`(?msU)^source.*=.*\((.+)\)$`)
|
||||
rePkgSum = regexp.MustCompile(`(?msU)^sha256sums.*=.*\((.+)\)$`)
|
||||
rePkgFile = regexp.MustCompile(`^(.+)(?:-.+){2}-(?:x86_64|any)\.pkg\.tar\.zst(?:\.sig)*$`)
|
||||
reLdError = regexp.MustCompile(`(?mi).*collect2: error: ld returned (\d+) exit status.*`)
|
||||
reDownloadError = regexp.MustCompile(`(?m)^error: could not rename .+$`)
|
||||
@ -56,7 +50,7 @@ var (
|
||||
type Conf struct {
|
||||
Arch string
|
||||
Repos, March []string
|
||||
Svn2git map[string]string
|
||||
StateRepo string `yaml:"state_repo"`
|
||||
Basedir struct {
|
||||
Repo, Work, Debug string
|
||||
}
|
||||
@ -73,9 +67,8 @@ type Conf struct {
|
||||
Level string
|
||||
}
|
||||
Blacklist struct {
|
||||
Packages []string
|
||||
Repo []string
|
||||
LTO []string `yaml:"lto"`
|
||||
Packages, Repo []string
|
||||
LTO []string `yaml:"lto"`
|
||||
}
|
||||
Housekeeping struct {
|
||||
Interval string
|
||||
@ -85,19 +78,24 @@ type Conf struct {
|
||||
Skipped, Queued, Latest, Failed, Signing, Building, Unknown string
|
||||
}
|
||||
}
|
||||
KernelPatches map[string]string `yaml:"kernel_patches"`
|
||||
KernelToPatch []string `yaml:"kernel_to_patch"`
|
||||
}
|
||||
|
||||
type Globs []string
|
||||
|
||||
type MultiplePKGBUILDError struct {
|
||||
type MultipleStateFilesError struct {
|
||||
error
|
||||
}
|
||||
type UnableToSatisfyError struct {
|
||||
error
|
||||
}
|
||||
|
||||
type StateInfo struct {
|
||||
Pkgbase string
|
||||
PkgVer string
|
||||
TagVer string
|
||||
TagRev string
|
||||
}
|
||||
|
||||
func updateLastUpdated() error {
|
||||
err := os.WriteFile(filepath.Join(conf.Basedir.Repo, lastUpdate), []byte(strconv.FormatInt(time.Now().Unix(), 10)), 0o644) //nolint:gosec
|
||||
if err != nil {
|
||||
@ -125,22 +123,6 @@ func statusID2string(s dbpackage.Status) string {
|
||||
}
|
||||
}
|
||||
|
||||
func b3sum(filePath string) (string, error) {
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer func(file *os.File) {
|
||||
_ = file.Close()
|
||||
}(file)
|
||||
|
||||
hash := blake3.New(32, nil) //nolint:gomnd
|
||||
if _, err := io.Copy(hash, file); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return hex.EncodeToString(hash.Sum(nil)), nil
|
||||
}
|
||||
|
||||
func containsSubStr(str string, subList []string) bool {
|
||||
for _, checkStr := range subList {
|
||||
if strings.Contains(str, checkStr) {
|
||||
@ -187,27 +169,21 @@ func pkgList2MaxMem(pkgList []*ProtoPackage) datasize.ByteSize {
|
||||
return datasize.ByteSize(sum) * datasize.KB
|
||||
}
|
||||
|
||||
func genQueue() ([]*ProtoPackage, error) {
|
||||
pkgs, err := db.DbPackage.Query().Where(dbpackage.Or(dbpackage.StatusEQ(dbpackage.StatusQueued),
|
||||
dbpackage.StatusEQ(dbpackage.StatusBuild), dbpackage.StatusEQ(dbpackage.StatusBuilding))).All(context.Background())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func stateFileMeta(stateFile string) (repo string, subRepo *string, arch string, err error) {
|
||||
nameSplit := strings.Split(filepath.Base(filepath.Dir(stateFile)), "-")
|
||||
if len(nameSplit) < 2 {
|
||||
err = fmt.Errorf("error getting metainfo")
|
||||
return
|
||||
}
|
||||
|
||||
var pkgbuilds []*ProtoPackage
|
||||
for _, pkg := range pkgs {
|
||||
pkgbuilds = append(pkgbuilds, &ProtoPackage{
|
||||
Pkgbase: pkg.Pkgbase,
|
||||
Repo: pkg.Repository,
|
||||
March: pkg.March,
|
||||
FullRepo: pkg.Repository.String() + "-" + pkg.March,
|
||||
Hash: pkg.Hash,
|
||||
DBPackage: pkg,
|
||||
Pkgbuild: pkg.Pkgbuild,
|
||||
Version: pkg.RepoVersion,
|
||||
})
|
||||
repo = nameSplit[0]
|
||||
if len(nameSplit) == 3 {
|
||||
subRepo = &nameSplit[1]
|
||||
arch = nameSplit[2]
|
||||
} else {
|
||||
arch = nameSplit[1]
|
||||
}
|
||||
return pkgbuilds, nil
|
||||
return
|
||||
}
|
||||
|
||||
func movePackagesLive(fullRepo string) error {
|
||||
@ -238,11 +214,11 @@ func movePackagesLive(fullRepo string) error {
|
||||
return fmt.Errorf("unable to create folder for debug-packages: %w", mkErr)
|
||||
}
|
||||
forPackage := strings.TrimSuffix(pkg.Name(), "-debug")
|
||||
log.Debugf("[MOVE] Found debug package for package %s: %s", forPackage, pkg.Name())
|
||||
log.Debugf("[MOVE] found debug package for package %s: %s", forPackage, pkg.Name())
|
||||
debugPkgs++
|
||||
|
||||
if _, err := os.Stat(filepath.Join(conf.Basedir.Debug, march, filepath.Base(file))); err == nil {
|
||||
log.Warningf("[MOVE] Overwrite existing debug infos for %s: %s", forPackage,
|
||||
log.Warningf("[MOVE] overwrite existing debug infos for %s: %s", forPackage,
|
||||
filepath.Join(conf.Basedir.Debug, march, filepath.Base(file)))
|
||||
}
|
||||
|
||||
@ -254,7 +230,7 @@ func movePackagesLive(fullRepo string) error {
|
||||
continue
|
||||
}
|
||||
|
||||
log.Warningf("[MOVE] Deleting package %s: %v", pkg.Name(), err)
|
||||
log.Warningf("[MOVE] deleting package %s: %v", pkg.Name(), err)
|
||||
_ = os.Remove(file)
|
||||
_ = os.Remove(file + ".sig")
|
||||
continue
|
||||
@ -271,7 +247,6 @@ func movePackagesLive(fullRepo string) error {
|
||||
|
||||
toAdd = append(toAdd, &ProtoPackage{
|
||||
DBPackage: dbPkg,
|
||||
Pkgbase: dbPkg.Pkgbase,
|
||||
PkgFiles: []string{filepath.Join(conf.Basedir.Repo, fullRepo, "os", conf.Arch, filepath.Base(file))},
|
||||
Version: pkg.Version(),
|
||||
March: march,
|
||||
@ -279,7 +254,7 @@ func movePackagesLive(fullRepo string) error {
|
||||
}
|
||||
|
||||
if len(toAdd) > 0 {
|
||||
log.Infof("[%s] Adding %d (%d with debug) packages", fullRepo, len(toAdd), debugPkgs)
|
||||
log.Infof("[%s] adding %d (%d with debug) packages", fullRepo, len(toAdd), debugPkgs)
|
||||
buildManager.repoAdd[fullRepo] <- toAdd
|
||||
}
|
||||
return nil
|
||||
@ -563,6 +538,20 @@ func setupMakepkg(march string, flags map[string]any) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseState(state string) (*StateInfo, error) {
|
||||
ss := strings.Split(state, " ")
|
||||
if len(ss) != 4 {
|
||||
return nil, fmt.Errorf("invalid state file")
|
||||
}
|
||||
|
||||
return &StateInfo{
|
||||
Pkgbase: ss[0],
|
||||
PkgVer: ss[1],
|
||||
TagVer: ss[2],
|
||||
TagRev: strings.Trim(ss[3], "\n"),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ContainsPkg(pkgs []*ProtoPackage, pkg *ProtoPackage, repoSensitive bool) bool {
|
||||
for _, tPkg := range pkgs {
|
||||
if tPkg.PkgbaseEquals(pkg, repoSensitive) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user