added golangci linter; code cleanup
This commit is contained in:
parent
e58a6a3ad7
commit
ff21894aac
75
.golangci.yaml
Normal file
75
.golangci.yaml
Normal file
@ -0,0 +1,75 @@
|
||||
linters-settings:
|
||||
dupl:
|
||||
threshold: 100
|
||||
goconst:
|
||||
min-len: 3
|
||||
min-occurrences: 4
|
||||
gocritic:
|
||||
enabled-tags:
|
||||
- diagnostic
|
||||
- experimental
|
||||
- opinionated
|
||||
- performance
|
||||
- style
|
||||
disabled-checks:
|
||||
- whyNoLint
|
||||
- filepathJoin
|
||||
gomnd:
|
||||
checks:
|
||||
- argument
|
||||
- case
|
||||
- condition
|
||||
- return
|
||||
ignored-numbers:
|
||||
- '0'
|
||||
- '1'
|
||||
- '2'
|
||||
- '3'
|
||||
- '4'
|
||||
- '5'
|
||||
ignored-functions:
|
||||
- strings.SplitN
|
||||
- os.OpenFile
|
||||
- os.MkdirAll
|
||||
- os.WriteFile
|
||||
govet:
|
||||
check-shadowing: false
|
||||
lll:
|
||||
line-length: 140
|
||||
misspell:
|
||||
locale: US
|
||||
nolintlint:
|
||||
allow-unused: false # report any unused nolint directives
|
||||
require-explanation: false # don't require an explanation for nolint directives
|
||||
require-specific: false # don't require nolint directives to be specific about which linter is being skipped
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- bodyclose
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
- errcheck
|
||||
- exportloopref
|
||||
- gochecknoinits
|
||||
- gocritic
|
||||
- gofmt
|
||||
- gomnd
|
||||
- goprintffuncname
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- lll
|
||||
- misspell
|
||||
- nakedret
|
||||
- noctx
|
||||
- nolintlint
|
||||
- staticcheck
|
||||
- stylecheck
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unparam
|
||||
- unused
|
||||
- whitespace
|
||||
|
16
go.mod
16
go.mod
@ -9,7 +9,7 @@ require (
|
||||
github.com/Morganamilo/go-srcinfo v1.0.0
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/jackc/pgx/v4 v4.17.2
|
||||
github.com/otiai10/copy v1.7.0
|
||||
github.com/otiai10/copy v1.9.0
|
||||
github.com/sethvargo/go-retry v0.2.3
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/wercker/journalhook v0.0.0-20180428041537-5d0a5ae867b3
|
||||
@ -19,13 +19,13 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
ariga.io/atlas v0.8.1 // indirect
|
||||
ariga.io/atlas v0.8.2 // indirect
|
||||
github.com/agext/levenshtein v1.2.3 // indirect
|
||||
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect
|
||||
github.com/go-openapi/inflect v0.19.0 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/hashicorp/hcl/v2 v2.14.1 // indirect
|
||||
github.com/hashicorp/hcl/v2 v2.15.0 // indirect
|
||||
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
||||
github.com/jackc/pgconn v1.13.0 // indirect
|
||||
github.com/jackc/pgio v1.0.0 // indirect
|
||||
@ -33,11 +33,11 @@ require (
|
||||
github.com/jackc/pgproto3/v2 v2.3.1 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect
|
||||
github.com/jackc/pgtype v1.12.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.1.2 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.1 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/zclconf/go-cty v1.12.0 // indirect
|
||||
golang.org/x/crypto v0.1.0 // indirect
|
||||
golang.org/x/mod v0.6.0 // indirect
|
||||
golang.org/x/sys v0.1.0 // indirect
|
||||
github.com/zclconf/go-cty v1.12.1 // indirect
|
||||
golang.org/x/crypto v0.3.0 // indirect
|
||||
golang.org/x/mod v0.7.0 // indirect
|
||||
golang.org/x/sys v0.2.0 // indirect
|
||||
golang.org/x/text v0.4.0 // indirect
|
||||
)
|
||||
|
37
go.sum
37
go.sum
@ -1,5 +1,5 @@
|
||||
ariga.io/atlas v0.8.1 h1:eTZS+qyBpqMTkCbatFquYXIqGPAIEzjQzkVXlOwkcEY=
|
||||
ariga.io/atlas v0.8.1/go.mod h1:ft47uSh5hWGDCmQC9DsztZg6Xk+KagM5Ts/mZYKb9JE=
|
||||
ariga.io/atlas v0.8.2 h1:uXRegk0Zd7nlCYC60tdx12xUvN2NmeGTc2MB5HnnbkA=
|
||||
ariga.io/atlas v0.8.2/go.mod h1:ft47uSh5hWGDCmQC9DsztZg6Xk+KagM5Ts/mZYKb9JE=
|
||||
entgo.io/ent v0.11.4 h1:grwVY0fp31BZ6oEo3YrXenAuv8VJmEw7F/Bi6WqeH3Q=
|
||||
entgo.io/ent v0.11.4/go.mod h1:fnQIXL36RYnCk/9nvG4aE7YHBFZhCycfh7wMjY5p7SE=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
@ -14,7 +14,6 @@ github.com/Morganamilo/go-srcinfo v1.0.0 h1:Wh4nEF+HJWo+29hnxM18Q2hi+DUf0GejS13+
|
||||
github.com/Morganamilo/go-srcinfo v1.0.0/go.mod h1:MP6VGY1NNpVUmYIEgoM9acix95KQqIRyqQ0hCLsyYUY=
|
||||
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
|
||||
github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
|
||||
github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3 h1:ZSTrOEhiM5J5RFxEaFvMZVEAM1KvT1YzbEOwB2EAGjA=
|
||||
github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw=
|
||||
github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo=
|
||||
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
|
||||
@ -40,8 +39,8 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/hashicorp/hcl/v2 v2.14.1 h1:x0BpjfZ+CYdbiz+8yZTQ+gdLO7IXvOut7Da+XJayx34=
|
||||
github.com/hashicorp/hcl/v2 v2.14.1/go.mod h1:e4z5nxYlWNPdDSNYX+ph14EvWYMFm3eP0zIUqPc2jr0=
|
||||
github.com/hashicorp/hcl/v2 v2.15.0 h1:CPDXO6+uORPjKflkWCCwoWc9uRp+zSIPcCQ+BrxV7m8=
|
||||
github.com/hashicorp/hcl/v2 v2.15.0/go.mod h1:JRmR89jycNkrrqnMmvPDMd56n1rQJ2Q6KocSLCMCXng=
|
||||
github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
|
||||
github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
|
||||
github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8=
|
||||
@ -91,8 +90,8 @@ github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dv
|
||||
github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.1.2 h1:XhdX4fqAJUA0yj+kUwMavO0hHrSPAecYdYf1ZmxHvak=
|
||||
github.com/klauspost/cpuid/v2 v2.1.2/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
|
||||
github.com/klauspost/cpuid/v2 v2.2.1 h1:U33DW0aiEj633gHYw3LoDNfkDiYnE5Q8M/TKJn2f2jI=
|
||||
github.com/klauspost/cpuid/v2 v2.2.1/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
@ -115,13 +114,13 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky
|
||||
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
|
||||
github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=
|
||||
github.com/otiai10/copy v1.7.0 h1:hVoPiN+t+7d2nzzwMiDHPSOogsWAStewq3TwU05+clE=
|
||||
github.com/otiai10/copy v1.7.0/go.mod h1:rmRl6QPdJj6EiUqXQ/4Nn2lLXoNQjFCQbbNrxgc/t3U=
|
||||
github.com/otiai10/copy v1.9.0 h1:7KFNiCgZ91Ru4qW4CWPf/7jqtxLagGRmIxWldPP9VY4=
|
||||
github.com/otiai10/copy v1.9.0/go.mod h1:hsfX19wcn0UWIHUQ3/4fHuehhk2UyArQ9dVFAn3FczI=
|
||||
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
|
||||
github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs=
|
||||
github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo=
|
||||
github.com/otiai10/mint v1.3.3 h1:7JgpsBaN0uMkyju4tbYHu0mnM55hNKVYLsXmwr15NQI=
|
||||
github.com/otiai10/mint v1.3.3/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc=
|
||||
github.com/otiai10/mint v1.4.0 h1:umwcf7gbpEwf7WFzqmWwSv0CzbeMsae2u9ZvpP8j2q4=
|
||||
github.com/otiai10/mint v1.4.0/go.mod h1:gifjb2MYOoULtKLqUAEILUG/9KONW6f7YsJ6vQLTlFI=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
@ -155,8 +154,8 @@ github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PK
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/wercker/journalhook v0.0.0-20180428041537-5d0a5ae867b3 h1:shC1HB1UogxN5Ech3Yqaaxj1X/P656PPCB4RbojIJqc=
|
||||
github.com/wercker/journalhook v0.0.0-20180428041537-5d0a5ae867b3/go.mod h1:XCsSkdKK4gwBMNrOCZWww0pX6AOt+2gYc5Z6jBRrNVg=
|
||||
github.com/zclconf/go-cty v1.12.0 h1:F5E/vbilcrCtat9sYcEjlwwg1mDqbRTjyXR57nnx5sc=
|
||||
github.com/zclconf/go-cty v1.12.0/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA=
|
||||
github.com/zclconf/go-cty v1.12.1 h1:PcupnljUm9EIvbgSHQnHhUr3fO6oFmkOrvs2BAFNXXY=
|
||||
github.com/zclconf/go-cty v1.12.1/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA=
|
||||
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
@ -179,13 +178,13 @@ golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWP
|
||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/crypto v0.3.0 h1:a06MkbcxBrEFc0w0QIZWXrH/9cCX6KJyWbBOIwAn+7A=
|
||||
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I=
|
||||
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
|
||||
golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA=
|
||||
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
@ -210,8 +209,8 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
58
main.go
58
main.go
@ -94,14 +94,14 @@ func (b *BuildManager) htmlWorker(ctx context.Context) {
|
||||
Name: repo,
|
||||
}
|
||||
|
||||
pkgs := db.DbPackage.Query().Order(ent.Asc(dbpackage.FieldPkgbase)).Where(dbpackage.MarchEQ(march), dbpackage.RepositoryEQ(dbpackage.Repository(repo))).AllX(ctx)
|
||||
pkgs := db.DbPackage.Query().Order(ent.Asc(dbpackage.FieldPkgbase)).
|
||||
Where(dbpackage.MarchEQ(march), dbpackage.RepositoryEQ(dbpackage.Repository(repo))).AllX(ctx)
|
||||
|
||||
for _, pkg := range pkgs {
|
||||
|
||||
addPkg := Pkg{
|
||||
Pkgbase: pkg.Pkgbase,
|
||||
Status: strings.ToUpper(pkg.Status.String()),
|
||||
Class: statusId2string(pkg.Status),
|
||||
Class: statusID2string(pkg.Status),
|
||||
Skip: pkg.SkipReason,
|
||||
Version: pkg.RepoVersion,
|
||||
Svn2GitVersion: pkg.Version,
|
||||
@ -181,7 +181,8 @@ func (b *BuildManager) htmlWorker(ctx context.Context) {
|
||||
Count int `json:"count"`
|
||||
}
|
||||
|
||||
db.DbPackage.Query().Where(dbpackage.StatusNEQ(dbpackage.StatusSkipped)).GroupBy(dbpackage.FieldLto).Aggregate(ent.Count()).ScanX(ctx, &v2)
|
||||
db.DbPackage.Query().Where(dbpackage.StatusNEQ(dbpackage.StatusSkipped)).
|
||||
GroupBy(dbpackage.FieldLto).Aggregate(ent.Count()).ScanX(ctx, &v2)
|
||||
|
||||
for _, c := range v2 {
|
||||
switch c.Status {
|
||||
@ -200,7 +201,7 @@ func (b *BuildManager) htmlWorker(ctx context.Context) {
|
||||
continue
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(filepath.Join(conf.Basedir.Repo, "packages.html"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
f, err := os.OpenFile(filepath.Join(conf.Basedir.Repo, "packages.html"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644)
|
||||
if err != nil {
|
||||
log.Warningf("[HTML] Erro ropening output file: %v", err)
|
||||
continue
|
||||
@ -235,9 +236,10 @@ func (b *BuildManager) repoWorker(repo string) {
|
||||
}
|
||||
|
||||
for _, pkg := range pkgL {
|
||||
pkg.toDbPackage(true)
|
||||
if _, err := os.Stat(filepath.Join(conf.Basedir.Debug, pkg.March, pkg.DbPackage.Packages[0]+"-debug-"+pkg.Version+"-"+conf.Arch+".pkg.tar.zst")); err == nil {
|
||||
pkg.DbPackage = pkg.DbPackage.Update().
|
||||
pkg.toDBPackage(true)
|
||||
if _, err := os.Stat(filepath.Join(conf.Basedir.Debug, pkg.March,
|
||||
pkg.DBPackage.Packages[0]+"-debug-"+pkg.Version+"-"+conf.Arch+".pkg.tar.zst")); err == nil {
|
||||
pkg.DBPackage = pkg.DBPackage.Update().
|
||||
SetStatus(dbpackage.StatusLatest).
|
||||
ClearSkipReason().
|
||||
SetDebugSymbols(dbpackage.DebugSymbolsAvailable).
|
||||
@ -245,7 +247,7 @@ func (b *BuildManager) repoWorker(repo string) {
|
||||
SetHash(pkg.Hash).
|
||||
SaveX(context.Background())
|
||||
} else {
|
||||
pkg.DbPackage = pkg.DbPackage.Update().
|
||||
pkg.DBPackage = pkg.DBPackage.Update().
|
||||
SetStatus(dbpackage.StatusLatest).
|
||||
ClearSkipReason().
|
||||
SetDebugSymbols(dbpackage.DebugSymbolsNotAvailable).
|
||||
@ -255,7 +257,7 @@ func (b *BuildManager) repoWorker(repo string) {
|
||||
}
|
||||
}
|
||||
|
||||
cmd = exec.Command("paccache", "-rc", filepath.Join(conf.Basedir.Repo, repo, "os", conf.Arch), "-k", "1")
|
||||
cmd = exec.Command("paccache", "-rc", filepath.Join(conf.Basedir.Repo, repo, "os", conf.Arch), "-k", "1") //nolint:gosec
|
||||
res, err = cmd.CombinedOutput()
|
||||
log.Debug(string(res))
|
||||
if err != nil {
|
||||
@ -301,8 +303,8 @@ func (b *BuildManager) repoWorker(repo string) {
|
||||
log.Warningf("Error while deleting package %s: %s", pkg.Pkgbase, string(res))
|
||||
}
|
||||
|
||||
if pkg.DbPackage != nil {
|
||||
_ = pkg.DbPackage.Update().ClearRepoVersion().ClearHash().Exec(context.Background())
|
||||
if pkg.DBPackage != nil {
|
||||
_ = pkg.DBPackage.Update().ClearRepoVersion().ClearHash().Exec(context.Background())
|
||||
}
|
||||
|
||||
for _, file := range pkg.PkgFiles {
|
||||
@ -320,7 +322,7 @@ func (b *BuildManager) repoWorker(repo string) {
|
||||
}
|
||||
|
||||
func (b *BuildManager) syncWorker(ctx context.Context) error {
|
||||
err := os.MkdirAll(filepath.Join(conf.Basedir.Work, upstreamDir), 0755)
|
||||
err := os.MkdirAll(filepath.Join(conf.Basedir.Work, upstreamDir), 0o755)
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating upstream dir: %v", err)
|
||||
}
|
||||
@ -392,7 +394,8 @@ func (b *BuildManager) syncWorker(ctx context.Context) error {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
alpmHandle, err = initALPM(filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot), filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot, "/var/lib/pacman"))
|
||||
alpmHandle, err = initALPM(filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot),
|
||||
filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot, "/var/lib/pacman"))
|
||||
if err != nil {
|
||||
log.Warningf("Error while ALPM-init: %v", err)
|
||||
}
|
||||
@ -509,23 +512,23 @@ func main() {
|
||||
log.Warningf("Failed to drop priority: %v", err)
|
||||
}
|
||||
|
||||
err = os.MkdirAll(conf.Basedir.Repo, 0755)
|
||||
err = os.MkdirAll(conf.Basedir.Repo, 0o755)
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating repo dir: %v", err)
|
||||
}
|
||||
|
||||
if conf.Db.Driver == "pgx" {
|
||||
pdb, err := sql.Open("pgx", conf.Db.ConnectTo)
|
||||
if conf.DB.Driver == "pgx" {
|
||||
pdb, err := sql.Open("pgx", conf.DB.ConnectTo)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to open database %s: %v", conf.Db.ConnectTo, err)
|
||||
log.Fatalf("Failed to open database %s: %v", conf.DB.ConnectTo, err)
|
||||
}
|
||||
|
||||
drv := sql.OpenDB(dialect.Postgres, pdb.DB())
|
||||
db = ent.NewClient(ent.Driver(drv))
|
||||
} else {
|
||||
db, err = ent.Open(conf.Db.Driver, conf.Db.ConnectTo)
|
||||
db, err = ent.Open(conf.DB.Driver, conf.DB.ConnectTo)
|
||||
if err != nil {
|
||||
log.Panicf("Failed to open database %s: %v", conf.Db.ConnectTo, err)
|
||||
log.Panicf("Failed to open database %s: %v", conf.DB.ConnectTo, err)
|
||||
}
|
||||
defer func(Client *ent.Client) {
|
||||
_ = Client.Close()
|
||||
@ -544,16 +547,17 @@ func main() {
|
||||
|
||||
err = setupChroot()
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to setup chroot: %v", err)
|
||||
log.Panicf("Unable to setup chroot: %v", err)
|
||||
}
|
||||
err = syncMarchs()
|
||||
if err != nil {
|
||||
log.Fatalf("Error syncing marchs: %v", err)
|
||||
log.Panicf("Error syncing marchs: %v", err)
|
||||
}
|
||||
|
||||
alpmHandle, err = initALPM(filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot), filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot, "/var/lib/pacman"))
|
||||
alpmHandle, err = initALPM(filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot),
|
||||
filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot, "/var/lib/pacman"))
|
||||
if err != nil {
|
||||
log.Fatalf("Error while ALPM-init: %v", err)
|
||||
log.Panicf("Error while ALPM-init: %v", err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@ -571,17 +575,17 @@ killLoop:
|
||||
case <-reloadSignals:
|
||||
confStr, err := os.ReadFile("config.yaml")
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to open config: %v", err)
|
||||
log.Panicf("Unable to open config: %v", err)
|
||||
}
|
||||
|
||||
err = yaml.Unmarshal(confStr, &conf)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to parse config: %v", err)
|
||||
log.Panicf("Unable to parse config: %v", err)
|
||||
}
|
||||
|
||||
lvl, err := log.ParseLevel(conf.Logging.Level)
|
||||
if err != nil {
|
||||
log.Fatalf("Failure setting logging level: %v", err)
|
||||
log.Panicf("Failure setting logging level: %v", err)
|
||||
}
|
||||
log.SetLevel(lvl)
|
||||
log.Infof("Config reloaded")
|
||||
|
@ -54,13 +54,14 @@ func (pkg Package) Arch() string {
|
||||
|
||||
// HasValidSignature returns if package has valid detached signature file
|
||||
func (pkg Package) HasValidSignature() (bool, error) {
|
||||
cmd := exec.Command("gpg", "--verify", string(pkg)+".sig")
|
||||
cmd := exec.Command("gpg", "--verify", string(pkg)+".sig") //nolint:gosec
|
||||
res, err := cmd.CombinedOutput()
|
||||
if cmd.ProcessState.ExitCode() == 2 || cmd.ProcessState.ExitCode() == 1 {
|
||||
switch {
|
||||
case cmd.ProcessState.ExitCode() == 2 || cmd.ProcessState.ExitCode() == 1:
|
||||
return false, nil
|
||||
} else if cmd.ProcessState.ExitCode() == 0 {
|
||||
case cmd.ProcessState.ExitCode() == 0:
|
||||
return true, nil
|
||||
} else if err != nil {
|
||||
case err != nil:
|
||||
return false, fmt.Errorf("error checking signature: %w (%s)", err, res)
|
||||
}
|
||||
|
||||
|
190
proto_package.go
190
proto_package.go
@ -37,7 +37,7 @@ type ProtoPackage struct {
|
||||
FullRepo string
|
||||
Version string
|
||||
Hash string
|
||||
DbPackage *ent.DbPackage
|
||||
DBPackage *ent.DbPackage
|
||||
}
|
||||
|
||||
func (p *ProtoPackage) isEligible(ctx context.Context) (bool, error) {
|
||||
@ -51,51 +51,49 @@ func (p *ProtoPackage) isEligible(ctx context.Context) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
p.toDbPackage(true)
|
||||
p.toDBPackage(true)
|
||||
skipping := false
|
||||
if Contains(p.Srcinfo.Arch, "any") {
|
||||
switch {
|
||||
case Contains(p.Srcinfo.Arch, "any"):
|
||||
log.Debugf("Skipped %s: any-Package", p.Srcinfo.Pkgbase)
|
||||
p.DbPackage.SkipReason = "arch = any"
|
||||
p.DbPackage.Status = dbpackage.StatusSkipped
|
||||
p.DBPackage.SkipReason = "arch = any"
|
||||
p.DBPackage.Status = dbpackage.StatusSkipped
|
||||
skipping = true
|
||||
} else if Contains(conf.Blacklist.Packages, p.Srcinfo.Pkgbase) {
|
||||
case Contains(conf.Blacklist.Packages, p.Srcinfo.Pkgbase):
|
||||
log.Debugf("Skipped %s: blacklisted package", p.Srcinfo.Pkgbase)
|
||||
p.DbPackage.SkipReason = "blacklisted"
|
||||
p.DbPackage.Status = dbpackage.StatusSkipped
|
||||
p.DBPackage.SkipReason = "blacklisted"
|
||||
p.DBPackage.Status = dbpackage.StatusSkipped
|
||||
skipping = true
|
||||
} else if Contains(p.Srcinfo.MakeDepends, "ghc") || Contains(p.Srcinfo.MakeDepends, "haskell-ghc") ||
|
||||
Contains(p.Srcinfo.Depends, "ghc") || Contains(p.Srcinfo.Depends, "haskell-ghc") {
|
||||
// Skip Haskell packages for now, as we are facing linking problems with them,
|
||||
// most likely caused by not having a dependency check implemented yet and building at random.
|
||||
// https://git.harting.dev/anonfunc/ALHP.GO/issues/11
|
||||
case Contains(p.Srcinfo.MakeDepends, "ghc") || Contains(p.Srcinfo.MakeDepends, "haskell-ghc") ||
|
||||
Contains(p.Srcinfo.Depends, "ghc") || Contains(p.Srcinfo.Depends, "haskell-ghc"):
|
||||
log.Debugf("Skipped %s: haskell package", p.Srcinfo.Pkgbase)
|
||||
p.DbPackage.SkipReason = "blacklisted (haskell)"
|
||||
p.DbPackage.Status = dbpackage.StatusSkipped
|
||||
p.DBPackage.SkipReason = "blacklisted (haskell)"
|
||||
p.DBPackage.Status = dbpackage.StatusSkipped
|
||||
skipping = true
|
||||
} else if p.isPkgFailed() {
|
||||
case p.isPkgFailed():
|
||||
log.Debugf("Skipped %s: failed build", p.Srcinfo.Pkgbase)
|
||||
skipping = true
|
||||
}
|
||||
|
||||
if skipping {
|
||||
p.DbPackage = p.DbPackage.Update().SetUpdated(time.Now()).SetVersion(p.Version).
|
||||
SetPackages(packages2slice(p.Srcinfo.Packages)).SetStatus(p.DbPackage.Status).
|
||||
SetSkipReason(p.DbPackage.SkipReason).SetHash(p.Hash).SaveX(ctx)
|
||||
p.DBPackage = p.DBPackage.Update().SetUpdated(time.Now()).SetVersion(p.Version).
|
||||
SetPackages(packages2slice(p.Srcinfo.Packages)).SetStatus(p.DBPackage.Status).
|
||||
SetSkipReason(p.DBPackage.SkipReason).SetHash(p.Hash).SaveX(ctx)
|
||||
return false, nil
|
||||
} else {
|
||||
p.DbPackage = p.DbPackage.Update().SetUpdated(time.Now()).SetPackages(packages2slice(p.Srcinfo.Packages)).SetVersion(p.Version).SaveX(ctx)
|
||||
p.DBPackage = p.DBPackage.Update().SetUpdated(time.Now()).SetPackages(packages2slice(p.Srcinfo.Packages)).SetVersion(p.Version).SaveX(ctx)
|
||||
}
|
||||
|
||||
if Contains(conf.Blacklist.LTO, p.Pkgbase) {
|
||||
p.DbPackage = p.DbPackage.Update().SetLto(dbpackage.LtoDisabled).SaveX(ctx)
|
||||
p.DBPackage = p.DBPackage.Update().SetLto(dbpackage.LtoDisabled).SaveX(ctx)
|
||||
}
|
||||
|
||||
repoVer, err := p.repoVersion()
|
||||
if err != nil {
|
||||
p.DbPackage = p.DbPackage.Update().ClearRepoVersion().SaveX(ctx)
|
||||
p.DBPackage = p.DBPackage.Update().ClearRepoVersion().SaveX(ctx)
|
||||
} else if err == nil && alpm.VerCmp(repoVer, p.Version) > 0 {
|
||||
log.Debugf("Skipped %s: Version in repo higher than in PKGBUILD (%s < %s)", p.Srcinfo.Pkgbase, p.Version, repoVer)
|
||||
p.DbPackage = p.DbPackage.Update().SetStatus(dbpackage.StatusLatest).ClearSkipReason().SetHash(p.Hash).SaveX(ctx)
|
||||
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusLatest).ClearSkipReason().SetHash(p.Hash).SaveX(ctx)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@ -106,33 +104,35 @@ func (p *ProtoPackage) isEligible(ctx context.Context) (bool, error) {
|
||||
return false, fmt.Errorf("error solving deps: %w", err)
|
||||
case MultiplePKGBUILDError:
|
||||
log.Infof("Skipped %s: Multiple PKGBUILDs for dependency found: %v", p.Srcinfo.Pkgbase, err)
|
||||
p.DbPackage = p.DbPackage.Update().SetStatus(dbpackage.StatusSkipped).SetSkipReason("multiple PKGBUILD for dep. found").SaveX(ctx)
|
||||
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusSkipped).SetSkipReason("multiple PKGBUILD for dep. found").SaveX(ctx)
|
||||
return false, err
|
||||
case UnableToSatisfyError:
|
||||
log.Infof("Skipped %s: unable to resolve dependencies: %v", p.Srcinfo.Pkgbase, err)
|
||||
p.DbPackage = p.DbPackage.Update().SetStatus(dbpackage.StatusSkipped).SetSkipReason("unable to resolve dependencies").SaveX(ctx)
|
||||
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusSkipped).SetSkipReason("unable to resolve dependencies").SaveX(ctx)
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
p.DbPackage = p.DbPackage.Update().SetStatus(dbpackage.StatusQueued).SaveX(ctx)
|
||||
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusQueued).SaveX(ctx)
|
||||
|
||||
if !isLatest {
|
||||
if local != nil {
|
||||
log.Infof("Delayed %s: not all dependencies are up to date (local: %s==%s, sync: %s==%s)", p.Srcinfo.Pkgbase, local.Name(), local.Version(), local.Name(), syncVersion)
|
||||
p.DbPackage.Update().SetStatus(dbpackage.StatusDelayed).SetSkipReason(fmt.Sprintf("waiting for %s==%s", local.Name(), syncVersion)).ExecX(ctx)
|
||||
log.Infof("Delayed %s: not all dependencies are up to date (local: %s==%s, sync: %s==%s)",
|
||||
p.Srcinfo.Pkgbase, local.Name(), local.Version(), local.Name(), syncVersion)
|
||||
p.DBPackage.Update().SetStatus(dbpackage.StatusDelayed).
|
||||
SetSkipReason(fmt.Sprintf("waiting for %s==%s", local.Name(), syncVersion)).ExecX(ctx)
|
||||
|
||||
// Returning an error here causes the package to be purged.
|
||||
// Purge delayed packages in case delay is caused by inconsistencies in svn2git.
|
||||
// Worst case would be clients downloading a package update twice, once from their official mirror,
|
||||
// and then after build from ALHP. Best case we prevent a not buildable package from staying in the repos
|
||||
// in an outdated version.
|
||||
if time.Since(local.BuildDate()).Hours() >= 48 && p.DbPackage.RepoVersion != "" {
|
||||
if time.Since(local.BuildDate()).Hours() >= 48 && p.DBPackage.RepoVersion != "" {
|
||||
return false, errors.New("overdue package waiting")
|
||||
}
|
||||
} else {
|
||||
log.Infof("Delayed %s: not all dependencies are up to date or resolvable", p.Srcinfo.Pkgbase)
|
||||
p.DbPackage.Update().SetStatus(dbpackage.StatusDelayed).SetSkipReason("waiting for mirror").ExecX(ctx)
|
||||
p.DBPackage.Update().SetStatus(dbpackage.StatusDelayed).SetSkipReason("waiting for mirror").ExecX(ctx)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
@ -145,10 +145,9 @@ func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) {
|
||||
// which leads to errors when it's trying to remove the same temporary download file.
|
||||
// This can be removed as soon as we can pass separate cache locations to makechrootpkg.
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
time.Sleep(time.Duration(rand.Float32()*60) * time.Second)
|
||||
time.Sleep(time.Duration(rand.Float32()*60) * time.Second) //nolint:gosec
|
||||
start := time.Now().UTC()
|
||||
workerId := uuid.New()
|
||||
chroot := "build_" + workerId.String()
|
||||
chroot := "build_" + uuid.New().String()
|
||||
|
||||
if p.Version == "" {
|
||||
if p.Srcinfo == nil {
|
||||
@ -163,8 +162,8 @@ func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) {
|
||||
|
||||
log.Infof("[%s/%s/%s] Build starting", p.FullRepo, p.Pkgbase, p.Version)
|
||||
|
||||
p.toDbPackage(true)
|
||||
p.DbPackage = p.DbPackage.Update().SetStatus(dbpackage.StatusBuilding).ClearSkipReason().SaveX(ctx)
|
||||
p.toDBPackage(true)
|
||||
p.DBPackage = p.DBPackage.Update().SetStatus(dbpackage.StatusBuilding).ClearSkipReason().SaveX(ctx)
|
||||
|
||||
err := p.importKeys()
|
||||
if err != nil {
|
||||
@ -185,7 +184,7 @@ func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) {
|
||||
}()
|
||||
|
||||
buildNo := 1
|
||||
versionSlice := strings.Split(p.DbPackage.LastVersionBuild, ".")
|
||||
versionSlice := strings.Split(p.DBPackage.LastVersionBuild, ".")
|
||||
if strings.Join(versionSlice[:len(versionSlice)-1], ".") == p.Version {
|
||||
buildNo, err = strconv.Atoi(versionSlice[len(versionSlice)-1])
|
||||
if err != nil {
|
||||
@ -202,7 +201,7 @@ func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) {
|
||||
if Contains(conf.KernelToPatch, p.Pkgbase) {
|
||||
err = p.prepareKernelPatches()
|
||||
if err != nil {
|
||||
p.DbPackage.Update().SetStatus(dbpackage.StatusFailed).SetSkipReason("failed to apply patch").SetHash(p.Hash).ExecX(ctx)
|
||||
p.DBPackage.Update().SetStatus(dbpackage.StatusFailed).SetSkipReason("failed to apply patch").SetHash(p.Hash).ExecX(ctx)
|
||||
return time.Since(start), fmt.Errorf("error modifying PKGBUILD for kernel patch: %w", err)
|
||||
}
|
||||
}
|
||||
@ -211,12 +210,13 @@ func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) {
|
||||
|
||||
// default to LTO
|
||||
makepkgFile := makepkg
|
||||
if p.DbPackage.Lto == dbpackage.LtoDisabled || p.DbPackage.Lto == dbpackage.LtoAutoDisabled {
|
||||
if p.DBPackage.Lto == dbpackage.LtoDisabled || p.DBPackage.Lto == dbpackage.LtoAutoDisabled {
|
||||
// use non-lto makepkg.conf if LTO is blacklisted for this package
|
||||
makepkgFile = makepkgLTO
|
||||
}
|
||||
cmd := exec.CommandContext(ctx, "makechrootpkg", "-c", "-D", filepath.Join(conf.Basedir.Work, makepkgDir), "-l", chroot, "-r", filepath.Join(conf.Basedir.Work, chrootDir), "--",
|
||||
"-m", "--noprogressbar", "--config", filepath.Join(conf.Basedir.Work, makepkgDir, fmt.Sprintf(makepkgFile, p.March)))
|
||||
cmd := exec.CommandContext(ctx, "makechrootpkg", "-c", "-D", filepath.Join(conf.Basedir.Work, makepkgDir), //nolint:gosec
|
||||
"-l", chroot, "-r", filepath.Join(conf.Basedir.Work, chrootDir), "--", "-m", "--noprogressbar", "--config",
|
||||
filepath.Join(conf.Basedir.Work, makepkgDir, fmt.Sprintf(makepkgFile, p.March)))
|
||||
cmd.Dir = filepath.Dir(p.Pkgbuild)
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
@ -231,7 +231,7 @@ func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) {
|
||||
|
||||
Rusage, ok := cmd.ProcessState.SysUsage().(*syscall.Rusage)
|
||||
if !ok {
|
||||
log.Fatalf("Rusage is not of type *syscall.Rusage, are we running on unix-like?")
|
||||
log.Panicf("Rusage is not of type *syscall.Rusage, are we running on unix-like?")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
@ -239,26 +239,27 @@ func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) {
|
||||
return time.Since(start), ctx.Err()
|
||||
}
|
||||
|
||||
if p.DbPackage.Lto != dbpackage.LtoAutoDisabled && p.DbPackage.Lto != dbpackage.LtoDisabled && (reLdError.MatchString(out.String()) || reRustLTOError.MatchString(out.String())) {
|
||||
p.DbPackage.Update().SetStatus(dbpackage.StatusQueued).SetSkipReason("non-LTO rebuild").SetLto(dbpackage.LtoAutoDisabled).ExecX(ctx)
|
||||
if p.DBPackage.Lto != dbpackage.LtoAutoDisabled && p.DBPackage.Lto != dbpackage.LtoDisabled &&
|
||||
(reLdError.MatchString(out.String()) || reRustLTOError.MatchString(out.String())) {
|
||||
p.DBPackage.Update().SetStatus(dbpackage.StatusQueued).SetSkipReason("non-LTO rebuild").SetLto(dbpackage.LtoAutoDisabled).ExecX(ctx)
|
||||
return time.Since(start), fmt.Errorf("ld/lto-incomp error detected, LTO disabled")
|
||||
}
|
||||
|
||||
if reDownloadError.MatchString(out.String()) || rePortError.MatchString(out.String()) || reSigError.MatchString(out.String()) {
|
||||
p.DbPackage.Update().SetStatus(dbpackage.StatusQueued).ExecX(ctx)
|
||||
p.DBPackage.Update().SetStatus(dbpackage.StatusQueued).ExecX(ctx)
|
||||
return time.Since(start), fmt.Errorf("known builderror detected")
|
||||
}
|
||||
|
||||
err = os.MkdirAll(filepath.Join(conf.Basedir.Repo, logDir, p.March), 0755)
|
||||
err = os.MkdirAll(filepath.Join(conf.Basedir.Repo, logDir, p.March), 0o755)
|
||||
if err != nil {
|
||||
return time.Since(start), fmt.Errorf("error creating logdir: %w", err)
|
||||
}
|
||||
err = os.WriteFile(filepath.Join(conf.Basedir.Repo, logDir, p.March, p.Pkgbase+".log"), out.Bytes(), 0644)
|
||||
err = os.WriteFile(filepath.Join(conf.Basedir.Repo, logDir, p.March, p.Pkgbase+".log"), out.Bytes(), 0o644) //nolint:gosec
|
||||
if err != nil {
|
||||
return time.Since(start), fmt.Errorf("error warting to logdir: %w", err)
|
||||
}
|
||||
|
||||
p.DbPackage.Update().
|
||||
p.DBPackage.Update().
|
||||
SetStatus(dbpackage.StatusFailed).
|
||||
ClearSkipReason().
|
||||
SetBuildTimeStart(start).
|
||||
@ -296,7 +297,7 @@ func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) {
|
||||
|
||||
holdingDir := filepath.Join(conf.Basedir.Work, waitingDir, p.FullRepo)
|
||||
for _, file := range copyFiles {
|
||||
err = os.MkdirAll(holdingDir, 0755)
|
||||
err = os.MkdirAll(holdingDir, 0o755)
|
||||
if err != nil {
|
||||
return time.Since(start), fmt.Errorf("error creating %s: %w", holdingDir, err)
|
||||
}
|
||||
@ -317,8 +318,8 @@ func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if p.DbPackage.Lto != dbpackage.LtoDisabled && p.DbPackage.Lto != dbpackage.LtoAutoDisabled {
|
||||
p.DbPackage.Update().
|
||||
if p.DBPackage.Lto != dbpackage.LtoDisabled && p.DBPackage.Lto != dbpackage.LtoAutoDisabled {
|
||||
p.DBPackage.Update().
|
||||
SetStatus(dbpackage.StatusBuild).
|
||||
SetLto(dbpackage.LtoEnabled).
|
||||
SetBuildTimeStart(start).
|
||||
@ -331,7 +332,7 @@ func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) {
|
||||
SetSTime(Rusage.Stime.Sec).
|
||||
ExecX(ctx)
|
||||
} else {
|
||||
p.DbPackage.Update().
|
||||
p.DBPackage.Update().
|
||||
SetStatus(dbpackage.StatusBuild).
|
||||
SetBuildTimeStart(start).
|
||||
SetLastVersionBuild(p.Version).
|
||||
@ -347,14 +348,14 @@ func (p *ProtoPackage) build(ctx context.Context) (time.Duration, error) {
|
||||
}
|
||||
|
||||
func (p *ProtoPackage) Priority() float64 {
|
||||
if p.DbPackage == nil {
|
||||
if p.DBPackage == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
if p.DbPackage.STime == nil || p.DbPackage.UTime == nil {
|
||||
if p.DBPackage.STime == nil || p.DBPackage.UTime == nil {
|
||||
return 0
|
||||
} else {
|
||||
return float64(*p.DbPackage.STime + *p.DbPackage.UTime)
|
||||
return float64(*p.DBPackage.STime + *p.DBPackage.UTime)
|
||||
}
|
||||
}
|
||||
|
||||
@ -366,7 +367,7 @@ func (p *ProtoPackage) setupBuildDir() (string, error) {
|
||||
return "", fmt.Errorf("removing old builddir failed: %w", err)
|
||||
}
|
||||
|
||||
err = os.MkdirAll(buildDir, 0755)
|
||||
err = os.MkdirAll(buildDir, 0o755)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -413,7 +414,7 @@ func (p *ProtoPackage) increasePkgRel(buildNo int) error {
|
||||
p.Version = constructVersion(p.Srcinfo.Pkgver, p.Srcinfo.Pkgrel, p.Srcinfo.Epoch)
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(p.Pkgbuild, os.O_RDWR, 0644)
|
||||
f, err := os.OpenFile(p.Pkgbuild, os.O_RDWR, 0o644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -450,7 +451,7 @@ func (p *ProtoPackage) increasePkgRel(buildNo int) error {
|
||||
}
|
||||
|
||||
func (p *ProtoPackage) prepareKernelPatches() error {
|
||||
f, err := os.OpenFile(p.Pkgbuild, os.O_RDWR, 0644)
|
||||
f, err := os.OpenFile(p.Pkgbuild, os.O_RDWR, 0o644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -480,28 +481,21 @@ func (p *ProtoPackage) prepareKernelPatches() error {
|
||||
}
|
||||
|
||||
newPKGBUILD := string(fStr)
|
||||
if conf.KernelPatches[curVer] == "none" {
|
||||
switch {
|
||||
case conf.KernelPatches[curVer] == "none":
|
||||
return fmt.Errorf("no patch available")
|
||||
} else if conf.KernelPatches[curVer] == "skip" {
|
||||
case conf.KernelPatches[curVer] == "skip":
|
||||
log.Debugf("[KP] skipped patching for %s", p.Pkgbase)
|
||||
} else {
|
||||
default:
|
||||
log.Debugf("[KP] choose patch %s for kernel %s", curVer, p.Srcinfo.Pkgver)
|
||||
|
||||
// add patch to source-array
|
||||
orgSource := rePkgSource.FindStringSubmatch(newPKGBUILD)
|
||||
if orgSource == nil || len(orgSource) < 1 {
|
||||
return fmt.Errorf("no source=() found")
|
||||
}
|
||||
|
||||
sources := strings.Split(orgSource[1], "\n")
|
||||
sources = append(sources, fmt.Sprintf("\"%s\"", conf.KernelPatches[curVer]))
|
||||
|
||||
sources = append(sources, fmt.Sprintf("%q", conf.KernelPatches[curVer]))
|
||||
newPKGBUILD = rePkgSource.ReplaceAllLiteralString(newPKGBUILD, fmt.Sprintf("source=(%s)", strings.Join(sources, "\n")))
|
||||
|
||||
// add patch sha256 to sha256sums-array (yes, hardcoded to sha256)
|
||||
// TODO: support all sums that makepkg also supports
|
||||
// get sum
|
||||
resp, err := http.Get(conf.KernelPatches[curVer])
|
||||
resp, err := http.Get(conf.KernelPatches[curVer]) //nolint:bodyclose,noctx
|
||||
if err != nil || resp.StatusCode != 200 {
|
||||
return err
|
||||
}
|
||||
@ -513,15 +507,12 @@ func (p *ProtoPackage) prepareKernelPatches() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
orgSums := rePkgSum.FindStringSubmatch(newPKGBUILD)
|
||||
if orgSums == nil || len(orgSums) < 1 {
|
||||
return fmt.Errorf("no sha256sums=() found")
|
||||
}
|
||||
|
||||
sums := strings.Split(orgSums[1], "\n")
|
||||
sums = append(sums, fmt.Sprintf("'%s'", hex.EncodeToString(h.Sum(nil))))
|
||||
|
||||
newPKGBUILD = rePkgSum.ReplaceAllLiteralString(newPKGBUILD, fmt.Sprintf("sha256sums=(\n%s\n)", strings.Join(sums, "\n")))
|
||||
}
|
||||
|
||||
@ -583,7 +574,7 @@ func (p *ProtoPackage) isAvailable(h *alpm.Handle) bool {
|
||||
if p.Srcinfo != nil {
|
||||
pkg, err = dbs.FindSatisfier(p.Srcinfo.Packages[0].Pkgname)
|
||||
} else {
|
||||
pkg, err = dbs.FindSatisfier(p.DbPackage.Packages[0])
|
||||
pkg, err = dbs.FindSatisfier(p.DBPackage.Packages[0])
|
||||
}
|
||||
buildManager.alpmMutex.Unlock()
|
||||
if err != nil {
|
||||
@ -597,7 +588,8 @@ func (p *ProtoPackage) isAvailable(h *alpm.Handle) bool {
|
||||
}
|
||||
|
||||
if p.Srcinfo != nil && (!Contains(p.Srcinfo.Arch, pkg.Architecture()) || p.Srcinfo.Pkgbase != pkg.Base()) {
|
||||
log.Debugf("%s: arch (%s!=%s) or pkgbase (%s!=%s) does not match", p.Pkgbase, p.Srcinfo.Arch[0], pkg.Architecture(), pkg.Base(), p.Pkgbase)
|
||||
log.Debugf("%s: arch (%s!=%s) or pkgbase (%s!=%s) does not match", p.Pkgbase, p.Srcinfo.Arch[0],
|
||||
pkg.Architecture(), pkg.Base(), p.Pkgbase)
|
||||
return false
|
||||
}
|
||||
|
||||
@ -662,11 +654,13 @@ func (p *ProtoPackage) SVN2GITVersion(h *alpm.Handle) (string, error) {
|
||||
}
|
||||
log.Infof("%s: resolving successful: MirrorRepo=%s; PKGBUILD chosen: %s", p.Pkgbase, iPackage.DB().Name(), fPkgbuilds[0])
|
||||
} else if len(fPkgbuilds) == 0 {
|
||||
return "", fmt.Errorf("%s: no matching PKGBUILD found (searched: %s, canidates: %s)", p.Pkgbase, filepath.Join(conf.Basedir.Work, upstreamDir, "**/"+p.Pkgbase+"/repos/*/PKGBUILD"), pkgBuilds)
|
||||
return "", fmt.Errorf("%s: no matching PKGBUILD found (searched: %s, canidates: %s)", p.Pkgbase,
|
||||
filepath.Join(conf.Basedir.Work, upstreamDir, "**/"+p.Pkgbase+"/repos/*/PKGBUILD"), pkgBuilds)
|
||||
}
|
||||
|
||||
pPkg := PKGBUILD(fPkgbuilds[0])
|
||||
dbPkg, err := db.DbPackage.Query().Where(dbpackage.RepositoryEQ(dbpackage.Repository(pPkg.Repo())), dbpackage.March(p.March), dbpackage.Pkgbase(p.Pkgbase)).Only(context.Background())
|
||||
dbPkg, err := db.DbPackage.Query().Where(dbpackage.RepositoryEQ(dbpackage.Repository(pPkg.Repo())),
|
||||
dbpackage.March(p.March), dbpackage.Pkgbase(p.Pkgbase)).Only(context.Background())
|
||||
if err == nil {
|
||||
return dbPkg.Version, nil
|
||||
}
|
||||
@ -687,7 +681,7 @@ func (p *ProtoPackage) SVN2GITVersion(h *alpm.Handle) (string, error) {
|
||||
}
|
||||
|
||||
func (p *ProtoPackage) isPkgFailed() bool {
|
||||
if p.DbPackage.Version == "" {
|
||||
if p.DBPackage.Version == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
@ -699,10 +693,10 @@ func (p *ProtoPackage) isPkgFailed() bool {
|
||||
p.Version = constructVersion(p.Srcinfo.Pkgver, p.Srcinfo.Pkgrel, p.Srcinfo.Epoch)
|
||||
}
|
||||
|
||||
if alpm.VerCmp(p.DbPackage.Version, p.Version) < 0 {
|
||||
if alpm.VerCmp(p.DBPackage.Version, p.Version) < 0 {
|
||||
return false
|
||||
}
|
||||
return p.DbPackage.Status == dbpackage.StatusFailed
|
||||
return p.DBPackage.Status == dbpackage.StatusFailed
|
||||
}
|
||||
|
||||
func (p *ProtoPackage) genSrcinfo() error {
|
||||
@ -710,16 +704,16 @@ func (p *ProtoPackage) genSrcinfo() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
if p.DbPackage != nil && p.DbPackage.Srcinfo != nil {
|
||||
if p.DBPackage != nil && p.DBPackage.Srcinfo != nil {
|
||||
var err error
|
||||
p.Srcinfo, err = srcinfo.Parse(*p.DbPackage.Srcinfo)
|
||||
p.Srcinfo, err = srcinfo.Parse(*p.DBPackage.Srcinfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
cmd := exec.Command("makepkg", "--printsrcinfo", "-p", filepath.Base(p.Pkgbuild))
|
||||
cmd := exec.Command("makepkg", "--printsrcinfo", "-p", filepath.Base(p.Pkgbuild)) //nolint:gosec
|
||||
cmd.Dir = filepath.Dir(p.Pkgbuild)
|
||||
res, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
@ -732,8 +726,8 @@ func (p *ProtoPackage) genSrcinfo() error {
|
||||
}
|
||||
|
||||
p.Srcinfo = info
|
||||
if p.DbPackage != nil {
|
||||
p.DbPackage = p.DbPackage.Update().SetSrcinfoHash(p.Hash).SetSrcinfo(string(res)).SaveX(context.Background())
|
||||
if p.DBPackage != nil {
|
||||
p.DBPackage = p.DBPackage.Update().SetSrcinfoHash(p.Hash).SetSrcinfo(string(res)).SaveX(context.Background())
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -745,15 +739,13 @@ func (p *ProtoPackage) findPkgFiles() error {
|
||||
return err
|
||||
}
|
||||
|
||||
if p.DbPackage == nil && p.Srcinfo == nil {
|
||||
if p.DBPackage == nil && p.Srcinfo == nil {
|
||||
return fmt.Errorf("unable to find pkgfiles without dbpkg or srcinfo present")
|
||||
}
|
||||
|
||||
var realPkgs []string
|
||||
if p.DbPackage != nil {
|
||||
for _, realPkg := range p.DbPackage.Packages {
|
||||
realPkgs = append(realPkgs, realPkg)
|
||||
}
|
||||
if p.DBPackage != nil {
|
||||
realPkgs = append(realPkgs, p.DBPackage.Packages...)
|
||||
} else {
|
||||
for _, realPkg := range p.Srcinfo.Packages {
|
||||
realPkgs = append(realPkgs, realPkg.Pkgname)
|
||||
@ -775,17 +767,23 @@ func (p *ProtoPackage) findPkgFiles() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *ProtoPackage) toDbPackage(create bool) {
|
||||
if p.DbPackage != nil {
|
||||
func (p *ProtoPackage) toDBPackage(create bool) {
|
||||
if p.DBPackage != nil {
|
||||
return
|
||||
}
|
||||
|
||||
dbPkg, err := db.DbPackage.Query().Where(dbpackage.And(dbpackage.Pkgbase(p.Pkgbase), dbpackage.March(p.March), dbpackage.RepositoryEQ(p.Repo))).Only(context.Background())
|
||||
dbPkg, err := db.DbPackage.Query().Where(dbpackage.And(dbpackage.Pkgbase(p.Pkgbase), dbpackage.March(p.March),
|
||||
dbpackage.RepositoryEQ(p.Repo))).Only(context.Background())
|
||||
if err != nil && create {
|
||||
dbPkg = db.DbPackage.Create().SetPkgbase(p.Pkgbase).SetMarch(p.March).SetPackages(packages2slice(p.Srcinfo.Packages)).SetRepository(p.Repo).SaveX(context.Background())
|
||||
dbPkg = db.DbPackage.Create().
|
||||
SetPkgbase(p.Pkgbase).
|
||||
SetMarch(p.March).
|
||||
SetPackages(packages2slice(p.Srcinfo.Packages)).
|
||||
SetRepository(p.Repo).
|
||||
SaveX(context.Background())
|
||||
}
|
||||
|
||||
p.DbPackage = dbPkg
|
||||
p.DBPackage = dbPkg
|
||||
}
|
||||
|
||||
func (p *ProtoPackage) exists() (bool, error) {
|
||||
@ -797,7 +795,7 @@ func (p *ProtoPackage) exists() (bool, error) {
|
||||
return dbPkg, nil
|
||||
}
|
||||
|
||||
func (p *ProtoPackage) isMirrorLatest(h *alpm.Handle) (bool, alpm.IPackage, string, error) {
|
||||
func (p *ProtoPackage) isMirrorLatest(h *alpm.Handle) (latest bool, foundPkg alpm.IPackage, version string, err error) {
|
||||
dbs, err := h.SyncDBs()
|
||||
if err != nil {
|
||||
return false, nil, "", err
|
||||
|
118
utils.go
118
utils.go
@ -72,10 +72,10 @@ type Conf struct {
|
||||
Basedir struct {
|
||||
Repo, Work, Debug string
|
||||
}
|
||||
Db struct {
|
||||
DB struct {
|
||||
Driver string
|
||||
ConnectTo string `yaml:"connect_to"`
|
||||
}
|
||||
} `yaml:"db"`
|
||||
Build struct {
|
||||
Worker int
|
||||
Makej int
|
||||
@ -112,7 +112,7 @@ type UnableToSatisfyError struct {
|
||||
}
|
||||
|
||||
func updateLastUpdated() error {
|
||||
err := os.WriteFile(filepath.Join(conf.Basedir.Repo, lastUpdate), []byte(strconv.FormatInt(time.Now().Unix(), 10)), 0644)
|
||||
err := os.WriteFile(filepath.Join(conf.Basedir.Repo, lastUpdate), []byte(strconv.FormatInt(time.Now().Unix(), 10)), 0o644) //nolint:gosec
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -138,7 +138,8 @@ func (b *BuildManager) refreshSRCINFOs(ctx context.Context, path string) error {
|
||||
defer wg.Done()
|
||||
for _, pkgbuild := range pkgBuilds {
|
||||
mPkgbuild := PKGBUILD(pkgbuild)
|
||||
if mPkgbuild.FullRepo() == "trunk" || !Contains(conf.Repos, mPkgbuild.Repo()) || containsSubStr(mPkgbuild.FullRepo(), conf.Blacklist.Repo) {
|
||||
if mPkgbuild.FullRepo() == "trunk" || !Contains(conf.Repos, mPkgbuild.Repo()) ||
|
||||
containsSubStr(mPkgbuild.FullRepo(), conf.Blacklist.Repo) {
|
||||
continue
|
||||
}
|
||||
|
||||
@ -179,15 +180,15 @@ func (b *BuildManager) refreshSRCINFOs(ctx context.Context, path string) error {
|
||||
March: march,
|
||||
FullRepo: mPkgbuild.Repo() + "-" + march,
|
||||
Hash: b3s,
|
||||
DbPackage: dbPkg,
|
||||
DBPackage: dbPkg,
|
||||
}
|
||||
|
||||
_, err = proto.isEligible(ctx)
|
||||
if err != nil {
|
||||
log.Infof("Unable to determine status for package %s: %v", proto.Pkgbase, err)
|
||||
b.repoPurge[proto.FullRepo] <- []*ProtoPackage{proto}
|
||||
} else if proto.DbPackage != nil {
|
||||
proto.DbPackage.Update().SetPkgbuild(proto.Pkgbuild).ExecX(ctx)
|
||||
} else if proto.DBPackage != nil {
|
||||
proto.DBPackage.Update().SetPkgbuild(proto.Pkgbuild).ExecX(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -200,7 +201,7 @@ func (b *BuildManager) refreshSRCINFOs(ctx context.Context, path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func statusId2string(s dbpackage.Status) string {
|
||||
func statusID2string(s dbpackage.Status) string {
|
||||
switch s {
|
||||
case dbpackage.StatusSkipped:
|
||||
return conf.Status.Class.Skipped
|
||||
@ -228,7 +229,7 @@ func b3sum(filePath string) (string, error) {
|
||||
_ = file.Close()
|
||||
}(file)
|
||||
|
||||
hash := blake3.New(32, nil)
|
||||
hash := blake3.New(32, nil) //nolint:gomnd
|
||||
if _, err := io.Copy(hash, file); err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -244,7 +245,7 @@ func containsSubStr(str string, subList []string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func cleanBuildDir(dir string, chrootDir string) error {
|
||||
func cleanBuildDir(dir, chrootDir string) error {
|
||||
if stat, err := os.Stat(dir); err == nil && stat.IsDir() {
|
||||
err = os.RemoveAll(dir)
|
||||
if err != nil {
|
||||
@ -316,7 +317,7 @@ func genQueue() ([]*ProtoPackage, error) {
|
||||
March: pkg.March,
|
||||
FullRepo: pkg.Repository.String() + "-" + pkg.March,
|
||||
Hash: pkg.Hash,
|
||||
DbPackage: pkg,
|
||||
DBPackage: pkg,
|
||||
Pkgbuild: pkg.Pkgbuild,
|
||||
Version: pkg.RepoVersion,
|
||||
})
|
||||
@ -347,7 +348,7 @@ func movePackagesLive(fullRepo string) error {
|
||||
dbPkg, err := pkg.DBPackageIsolated(march, dbpackage.Repository(repo), db)
|
||||
if err != nil {
|
||||
if strings.HasSuffix(pkg.Name(), "-debug") {
|
||||
mkErr := os.MkdirAll(filepath.Join(conf.Basedir.Debug, march), 0755)
|
||||
mkErr := os.MkdirAll(filepath.Join(conf.Basedir.Debug, march), 0o755)
|
||||
if mkErr != nil {
|
||||
return fmt.Errorf("unable to create folder for debug-packages: %w", mkErr)
|
||||
}
|
||||
@ -356,7 +357,8 @@ func movePackagesLive(fullRepo string) error {
|
||||
debugPkgs++
|
||||
|
||||
if _, err := os.Stat(filepath.Join(conf.Basedir.Debug, march, filepath.Base(file))); err == nil {
|
||||
log.Warningf("[MOVE] Existing debug infos for %s, skipping: %s", forPackage, filepath.Join(conf.Basedir.Debug, march, filepath.Base(file)))
|
||||
log.Warningf("[MOVE] Existing debug infos for %s, skipping: %s", forPackage,
|
||||
filepath.Join(conf.Basedir.Debug, march, filepath.Base(file)))
|
||||
} else {
|
||||
err = os.Rename(file, filepath.Join(conf.Basedir.Debug, march, filepath.Base(file)))
|
||||
if err != nil {
|
||||
@ -383,7 +385,7 @@ func movePackagesLive(fullRepo string) error {
|
||||
}
|
||||
|
||||
toAdd = append(toAdd, &ProtoPackage{
|
||||
DbPackage: dbPkg,
|
||||
DBPackage: dbPkg,
|
||||
Pkgbase: dbPkg.Pkgbase,
|
||||
PkgFiles: []string{filepath.Join(conf.Basedir.Repo, fullRepo, "os", conf.Arch, filepath.Base(file))},
|
||||
Version: pkg.Version(),
|
||||
@ -419,14 +421,14 @@ func packages2slice(pkgs interface{}) []string {
|
||||
}
|
||||
}
|
||||
|
||||
func constructVersion(pkgver string, pkgrel string, epoch string) string {
|
||||
func constructVersion(pkgver, pkgrel, epoch string) string {
|
||||
if epoch == "" {
|
||||
return pkgver + "-" + pkgrel
|
||||
}
|
||||
return epoch + ":" + pkgver + "-" + pkgrel
|
||||
}
|
||||
|
||||
func initALPM(root string, dbpath string) (*alpm.Handle, error) {
|
||||
func initALPM(root, dbpath string) (*alpm.Handle, error) {
|
||||
h, err := alpm.Initialize(root, dbpath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -467,33 +469,35 @@ func initALPM(root string, dbpath string) (*alpm.Handle, error) {
|
||||
}
|
||||
|
||||
func setupChroot() error {
|
||||
if _, err := os.Stat(filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot)); err == nil {
|
||||
//goland:noinspection SpellCheckingInspection
|
||||
cmd := exec.Command("arch-nspawn", filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot), "pacman", "-Syuu", "--noconfirm")
|
||||
_, err := os.Stat(filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot))
|
||||
switch {
|
||||
case err == nil:
|
||||
cmd := exec.Command("arch-nspawn", filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot), //nolint:gosec
|
||||
"pacman", "-Syuu", "--noconfirm")
|
||||
res, err := cmd.CombinedOutput()
|
||||
log.Debug(string(res))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to update chroot: %w\n%s", err, string(res))
|
||||
return fmt.Errorf("error updating chroot: %w\n%s", err, string(res))
|
||||
}
|
||||
} else if os.IsNotExist(err) {
|
||||
err = os.MkdirAll(filepath.Join(conf.Basedir.Work, chrootDir), 0755)
|
||||
case os.IsNotExist(err):
|
||||
err = os.MkdirAll(filepath.Join(conf.Basedir.Work, chrootDir), 0o755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cmd := exec.Command("mkarchroot", "-C", pacmanConf, filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot), "base-devel")
|
||||
cmd := exec.Command("mkarchroot", "-C", pacmanConf, //nolint:gosec
|
||||
filepath.Join(conf.Basedir.Work, chrootDir, pristineChroot), "base-devel")
|
||||
res, err := cmd.CombinedOutput()
|
||||
log.Debug(string(res))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to create chroot: %w\n%s", err, string(res))
|
||||
return fmt.Errorf("error creating chroot: %w\n%s", err, string(res))
|
||||
}
|
||||
} else {
|
||||
default:
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func housekeeping(repo string, march string, wg *sync.WaitGroup) error {
|
||||
func housekeeping(repo, march string, wg *sync.WaitGroup) error {
|
||||
defer wg.Done()
|
||||
fullRepo := repo + "-" + march
|
||||
log.Debugf("[%s] Start housekeeping", fullRepo)
|
||||
@ -525,20 +529,20 @@ func housekeeping(repo string, march string, wg *sync.WaitGroup) error {
|
||||
Pkgbase: dbPkg.Pkgbase,
|
||||
Repo: mPackage.Repo(),
|
||||
FullRepo: mPackage.FullRepo(),
|
||||
DbPackage: dbPkg,
|
||||
DBPackage: dbPkg,
|
||||
March: mPackage.MArch(),
|
||||
Arch: mPackage.Arch(),
|
||||
}
|
||||
|
||||
var upstream string
|
||||
switch pkg.DbPackage.Repository {
|
||||
switch pkg.DBPackage.Repository {
|
||||
case dbpackage.RepositoryCore, dbpackage.RepositoryExtra:
|
||||
upstream = "upstream-core-extra"
|
||||
case dbpackage.RepositoryCommunity:
|
||||
upstream = "upstream-community"
|
||||
}
|
||||
pkg.Pkgbuild = filepath.Join(conf.Basedir.Work, upstreamDir, upstream, dbPkg.Pkgbase, "repos",
|
||||
pkg.DbPackage.Repository.String()+"-"+conf.Arch, "PKGBUILD")
|
||||
pkg.DBPackage.Repository.String()+"-"+conf.Arch, "PKGBUILD")
|
||||
|
||||
// check if package is still part of repo
|
||||
dbs, err := alpmHandle.SyncDBs()
|
||||
@ -548,20 +552,20 @@ func housekeeping(repo string, march string, wg *sync.WaitGroup) error {
|
||||
buildManager.alpmMutex.Lock()
|
||||
pkgResolved, err := dbs.FindSatisfier(mPackage.Name())
|
||||
buildManager.alpmMutex.Unlock()
|
||||
if err != nil || pkgResolved.DB().Name() != pkg.DbPackage.Repository.String() || pkgResolved.DB().Name() != pkg.Repo.String() ||
|
||||
if err != nil || pkgResolved.DB().Name() != pkg.DBPackage.Repository.String() || pkgResolved.DB().Name() != pkg.Repo.String() ||
|
||||
pkgResolved.Architecture() != pkg.Arch || pkgResolved.Name() != mPackage.Name() {
|
||||
// package not found on mirror/db -> not part of any repo anymore
|
||||
log.Infof("[HK/%s/%s] not included in repo", pkg.FullRepo, mPackage.Name())
|
||||
buildManager.repoPurge[pkg.FullRepo] <- []*ProtoPackage{pkg}
|
||||
err = db.DbPackage.DeleteOne(pkg.DbPackage).Exec(context.Background())
|
||||
err = db.DbPackage.DeleteOne(pkg.DBPackage).Exec(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if pkg.DbPackage.LastVerified.Before(pkg.DbPackage.BuildTimeStart) {
|
||||
err := pkg.DbPackage.Update().SetLastVerified(time.Now().UTC()).Exec(context.Background())
|
||||
if pkg.DBPackage.LastVerified.Before(pkg.DBPackage.BuildTimeStart) {
|
||||
err := pkg.DBPackage.Update().SetLastVerified(time.Now().UTC()).Exec(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -581,7 +585,7 @@ func housekeeping(repo string, march string, wg *sync.WaitGroup) error {
|
||||
repoVer, err := pkg.repoVersion()
|
||||
if err == nil && repoVer != dbPkg.RepoVersion {
|
||||
log.Infof("[HK/%s/%s] update %s->%s in db", pkg.FullRepo, pkg.Pkgbase, dbPkg.RepoVersion, repoVer)
|
||||
pkg.DbPackage, err = pkg.DbPackage.Update().SetRepoVersion(repoVer).ClearHash().Save(context.Background())
|
||||
pkg.DBPackage, err = pkg.DBPackage.Update().SetRepoVersion(repoVer).ClearHash().Save(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -605,7 +609,7 @@ func housekeeping(repo string, march string, wg *sync.WaitGroup) error {
|
||||
Repo: dbPkg.Repository,
|
||||
March: dbPkg.March,
|
||||
FullRepo: dbPkg.Repository.String() + "-" + dbPkg.March,
|
||||
DbPackage: dbPkg,
|
||||
DBPackage: dbPkg,
|
||||
}
|
||||
|
||||
if !pkg.isAvailable(alpmHandle) {
|
||||
@ -617,24 +621,25 @@ func housekeeping(repo string, march string, wg *sync.WaitGroup) error {
|
||||
continue
|
||||
}
|
||||
|
||||
if dbPkg.Status == dbpackage.StatusLatest && dbPkg.RepoVersion != "" {
|
||||
switch {
|
||||
case dbPkg.Status == dbpackage.StatusLatest && dbPkg.RepoVersion != "":
|
||||
var existingSplits []string
|
||||
var missingSplits []string
|
||||
for _, splitPkg := range dbPkg.Packages {
|
||||
pkgFile := filepath.Join(conf.Basedir.Repo, fullRepo, "os", conf.Arch,
|
||||
splitPkg+"-"+dbPkg.RepoVersion+"-"+conf.Arch+".pkg.tar.zst")
|
||||
if _, err := os.Stat(pkgFile); os.IsNotExist(err) {
|
||||
switch {
|
||||
case os.IsNotExist(err):
|
||||
missingSplits = append(missingSplits, splitPkg)
|
||||
} else if err != nil {
|
||||
case err != nil:
|
||||
log.Warningf("[HK] error reading package-file %s: %v", splitPkg, err)
|
||||
} else {
|
||||
default:
|
||||
existingSplits = append(existingSplits, pkgFile)
|
||||
}
|
||||
}
|
||||
|
||||
if len(missingSplits) > 0 {
|
||||
log.Infof("[HK/%s] missing split-package(s) %s for pkgbase %s", fullRepo, missingSplits, dbPkg.Pkgbase)
|
||||
pkg.DbPackage, err = pkg.DbPackage.Update().ClearRepoVersion().ClearHash().SetStatus(dbpackage.StatusQueued).Save(context.Background())
|
||||