Compare commits
1 Commits
2.7.0
...
jane400/ve
Author | SHA1 | Date | |
---|---|---|---|
|
20ba9e4131 |
123
.gitlab-ci.yml
123
.gitlab-ci.yml
@@ -6,126 +6,43 @@ image: alpine:edge
|
|||||||
variables:
|
variables:
|
||||||
GOFLAGS: "-buildvcs=false"
|
GOFLAGS: "-buildvcs=false"
|
||||||
PACKAGE_REGISTRY_URL: "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/mkinitfs-vendor-${CI_COMMIT_TAG}/${CI_COMMIT_TAG}"
|
PACKAGE_REGISTRY_URL: "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/mkinitfs-vendor-${CI_COMMIT_TAG}/${CI_COMMIT_TAG}"
|
||||||
CI_TRON_TEMPLATE_PROJECT: &ci-tron-template-project postmarketOS/ci-common
|
|
||||||
CI_TRON_JOB_TEMPLATE_PROJECT_URL: $CI_SERVER_URL/$CI_TRON_TEMPLATE_PROJECT
|
|
||||||
CI_TRON_JOB_TEMPLATE_COMMIT: &ci-tron-template-commit 7c95b5f2d53533e8722abf57c73e558168e811f3
|
|
||||||
|
|
||||||
include:
|
|
||||||
- project: *ci-tron-template-project
|
|
||||||
ref: *ci-tron-template-commit
|
|
||||||
file: '/ci-tron/common.yml'
|
|
||||||
|
|
||||||
stages:
|
stages:
|
||||||
|
- lint
|
||||||
- build
|
- build
|
||||||
- hardware tests
|
|
||||||
- vendor
|
- vendor
|
||||||
- release
|
- release
|
||||||
|
|
||||||
workflow:
|
# defaults for "only"
|
||||||
rules:
|
# We need to run the CI jobs in a "merge request specific context", if CI is
|
||||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
# running in a merge request. Otherwise the environment variable that holds the
|
||||||
- if: $CI_COMMIT_BRANCH == 'master'
|
# merge request ID is not available. This means, we must set the "only"
|
||||||
- if: '$CI_COMMIT_TAG != null'
|
# variable accordingly - and if we only do it for one job, all other jobs will
|
||||||
|
# not get executed. So have the defaults here, and use them in all jobs that
|
||||||
|
# should run on both the master branch, and in merge requests.
|
||||||
|
# https://docs.gitlab.com/ee/ci/merge_request_pipelines/index.html#excluding-certain-jobs
|
||||||
|
.only-default: &only-default
|
||||||
|
only:
|
||||||
|
- master
|
||||||
|
- merge_requests
|
||||||
|
- tags
|
||||||
|
|
||||||
build:
|
build:
|
||||||
stage: build
|
stage: build
|
||||||
variables:
|
<<: *only-default
|
||||||
GOTEST: "gotestsum --junitfile report.xml --format testname -- ./..."
|
|
||||||
parallel:
|
|
||||||
matrix:
|
|
||||||
- TAG: shared
|
|
||||||
- TAG: arm64
|
|
||||||
tags:
|
|
||||||
- $TAG
|
|
||||||
before_script:
|
before_script:
|
||||||
- apk -q add go gotestsum staticcheck make scdoc
|
- apk -q add go staticcheck make scdoc
|
||||||
script:
|
script:
|
||||||
- make test
|
- make test
|
||||||
- make
|
- make
|
||||||
after_script:
|
|
||||||
- mkdir -p rootfs/usr/sbin
|
|
||||||
- cp mkinitfs rootfs/usr/sbin
|
|
||||||
artifacts:
|
artifacts:
|
||||||
expire_in: 1 week
|
expire_in: 1 week
|
||||||
reports:
|
|
||||||
junit: report.xml
|
|
||||||
paths:
|
|
||||||
- rootfs
|
|
||||||
|
|
||||||
.qemu-common:
|
|
||||||
variables:
|
|
||||||
DEVICE_NAME: qemu-$CPU_ARCH
|
|
||||||
KERNEL_VARIANT: lts
|
|
||||||
rules:
|
|
||||||
- if: '$CI_COMMIT_TAG != null'
|
|
||||||
when: never
|
|
||||||
|
|
||||||
.build-ci-tron-qemu:
|
|
||||||
stage: hardware tests
|
|
||||||
extends:
|
|
||||||
- .pmos-ci-tron-build-boot-artifacts
|
|
||||||
- .qemu-common
|
|
||||||
variables:
|
|
||||||
INSTALL_PACKAGES: device-${DEVICE_NAME} device-${DEVICE_NAME}-kernel-${KERNEL_VARIANT} postmarketos-mkinitfs-hook-ci
|
|
||||||
|
|
||||||
build-ci-tron-qemu-amd64:
|
|
||||||
extends:
|
|
||||||
- .build-ci-tron-qemu
|
|
||||||
needs:
|
|
||||||
- job: "build"
|
|
||||||
parallel:
|
|
||||||
matrix:
|
|
||||||
- TAG: shared
|
|
||||||
variables:
|
|
||||||
CPU_ARCH: amd64
|
|
||||||
|
|
||||||
build-ci-tron-qemu-aarch64:
|
|
||||||
extends:
|
|
||||||
- .build-ci-tron-qemu
|
|
||||||
needs:
|
|
||||||
- job: "build"
|
|
||||||
parallel:
|
|
||||||
matrix:
|
|
||||||
- TAG: arm64
|
|
||||||
variables:
|
|
||||||
CPU_ARCH: aarch64
|
|
||||||
|
|
||||||
.test-ci-tron-qemu:
|
|
||||||
stage: hardware tests
|
|
||||||
extends:
|
|
||||||
- .pmos-ci-tron-initramfs-test
|
|
||||||
- .qemu-common
|
|
||||||
dependencies: []
|
|
||||||
variables:
|
|
||||||
CI_TRON_KERNEL__URL: "glartifact://build-ci-tron-qemu-$CPU_ARCH/${CI_TRON__PMB_EXPORT_PATH}/vmlinuz-${KERNEL_VARIANT}"
|
|
||||||
CI_TRON_INITRAMFS__INITRAMFS__URL: "glartifact://build-ci-tron-qemu-$CPU_ARCH/${CI_TRON__PMB_EXPORT_PATH}/initramfs"
|
|
||||||
CI_TRON_KERNEL_CMDLINE__DEVICEINFO: 'console=tty1 console=ttyS0,115200 PMOS_FORCE_PARTITION_RESIZE'
|
|
||||||
|
|
||||||
test-ci-tron-qemu-amd64:
|
|
||||||
extends:
|
|
||||||
- .test-ci-tron-qemu
|
|
||||||
- .pmos-ci-tron-runner-qemu-amd64
|
|
||||||
needs:
|
|
||||||
- job: 'build-ci-tron-qemu-amd64'
|
|
||||||
artifacts: false
|
|
||||||
variables:
|
|
||||||
CPU_ARCH: amd64
|
|
||||||
|
|
||||||
test-ci-tron-qemu-aarch64:
|
|
||||||
extends:
|
|
||||||
- .test-ci-tron-qemu
|
|
||||||
- .pmos-ci-tron-runner-qemu-aarch64
|
|
||||||
needs:
|
|
||||||
- job: 'build-ci-tron-qemu-aarch64'
|
|
||||||
artifacts: false
|
|
||||||
variables:
|
|
||||||
CPU_ARCH: aarch64
|
|
||||||
|
|
||||||
vendor:
|
vendor:
|
||||||
stage: vendor
|
stage: vendor
|
||||||
image: alpine:latest
|
image: alpine:latest
|
||||||
rules:
|
only:
|
||||||
- if: '$CI_COMMIT_TAG != null'
|
- tags
|
||||||
before_script:
|
before_script:
|
||||||
- apk -q add curl go make
|
- apk -q add curl go make
|
||||||
script:
|
script:
|
||||||
@@ -137,8 +54,8 @@ vendor:
|
|||||||
release:
|
release:
|
||||||
stage: release
|
stage: release
|
||||||
image: registry.gitlab.com/gitlab-org/release-cli:latest
|
image: registry.gitlab.com/gitlab-org/release-cli:latest
|
||||||
rules:
|
only:
|
||||||
- if: '$CI_COMMIT_TAG != null'
|
- tags
|
||||||
script:
|
script:
|
||||||
- |
|
- |
|
||||||
release-cli create --name "Release $CI_COMMIT_TAG" --tag-name $CI_COMMIT_TAG \
|
release-cli create --name "Release $CI_COMMIT_TAG" --tag-name $CI_COMMIT_TAG \
|
||||||
|
12
Makefile
12
Makefile
@@ -12,13 +12,7 @@ GO?=go
|
|||||||
GOFLAGS?=
|
GOFLAGS?=
|
||||||
LDFLAGS+=-s -w -X main.Version=$(VERSION)
|
LDFLAGS+=-s -w -X main.Version=$(VERSION)
|
||||||
RM?=rm -f
|
RM?=rm -f
|
||||||
GOTESTOPTS?=-count=1 -race
|
GOTEST=go test -count=1 -race
|
||||||
GOTEST?=go test ./...
|
|
||||||
DISABLE_GOGC?=
|
|
||||||
|
|
||||||
ifeq ($(DISABLE_GOGC),1)
|
|
||||||
LDFLAGS+=-X main.DisableGC=true
|
|
||||||
endif
|
|
||||||
|
|
||||||
GOSRC!=find * -name '*.go'
|
GOSRC!=find * -name '*.go'
|
||||||
GOSRC+=go.mod go.sum
|
GOSRC+=go.mod go.sum
|
||||||
@@ -48,10 +42,10 @@ test:
|
|||||||
fi
|
fi
|
||||||
@staticcheck ./...
|
@staticcheck ./...
|
||||||
|
|
||||||
$(GOTEST) $(GOTESTOPTS)
|
@$(GOTEST) ./...
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
$(RM) mkinitfs $(DOCS)
|
$(RM) mkinitfs $(DOCS)
|
||||||
$(RM) $(VENDORED)*
|
$(RM) $(VENDORED)*
|
||||||
|
|
||||||
install: $(DOCS) mkinitfs
|
install: $(DOCS) mkinitfs
|
||||||
|
@@ -9,8 +9,6 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime/debug"
|
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/archive"
|
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/archive"
|
||||||
@@ -28,14 +26,8 @@ import (
|
|||||||
|
|
||||||
// set at build time
|
// set at build time
|
||||||
var Version string
|
var Version string
|
||||||
var DisableGC string
|
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
// To allow working around silly GC-related issues, like https://gitlab.com/qemu-project/qemu/-/issues/2560
|
|
||||||
if strings.ToLower(DisableGC) == "true" {
|
|
||||||
debug.SetGCPercent(-1)
|
|
||||||
}
|
|
||||||
|
|
||||||
retCode := 0
|
retCode := 0
|
||||||
defer func() { os.Exit(retCode) }()
|
defer func() { os.Exit(retCode) }()
|
||||||
|
|
||||||
@@ -111,38 +103,15 @@ func main() {
|
|||||||
hookfiles.New("/etc/mkinitfs/files"),
|
hookfiles.New("/etc/mkinitfs/files"),
|
||||||
hookscripts.New("/usr/share/mkinitfs/hooks", "/hooks"),
|
hookscripts.New("/usr/share/mkinitfs/hooks", "/hooks"),
|
||||||
hookscripts.New("/etc/mkinitfs/hooks", "/hooks"),
|
hookscripts.New("/etc/mkinitfs/hooks", "/hooks"),
|
||||||
hookscripts.New("/usr/share/mkinitfs/hooks-cleanup", "/hooks-cleanup"),
|
|
||||||
hookscripts.New("/etc/mkinitfs/hooks-cleanup", "/hooks-cleanup"),
|
|
||||||
modules.New("/usr/share/mkinitfs/modules"),
|
modules.New("/usr/share/mkinitfs/modules"),
|
||||||
modules.New("/etc/mkinitfs/modules"),
|
modules.New("/etc/mkinitfs/modules"),
|
||||||
})
|
})
|
||||||
initfsExtra := initramfs.New([]filelist.FileLister{
|
|
||||||
hookfiles.New("/usr/share/mkinitfs/files-extra"),
|
|
||||||
hookfiles.New("/etc/mkinitfs/files-extra"),
|
|
||||||
hookscripts.New("/usr/share/mkinitfs/hooks-extra", "/hooks-extra"),
|
|
||||||
hookscripts.New("/etc/mkinitfs/hooks-extra", "/hooks-extra"),
|
|
||||||
modules.New("/usr/share/mkinitfs/modules-extra"),
|
|
||||||
modules.New("/etc/mkinitfs/modules-extra"),
|
|
||||||
})
|
|
||||||
|
|
||||||
if err := initramfsAr.AddItems(initfs); err != nil {
|
if err := initramfsAr.AddItems(initfs); err != nil {
|
||||||
log.Println(err)
|
log.Println(err)
|
||||||
log.Println("failed to generate: ", "initramfs")
|
log.Println("failed to generate: ", "initramfs")
|
||||||
retCode = 1
|
retCode = 1
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Include initramfs-extra files in the initramfs if not making a separate
|
|
||||||
// archive
|
|
||||||
if !devinfo.CreateInitfsExtra {
|
|
||||||
if err := initramfsAr.AddItems(initfsExtra); err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
log.Println("failed to generate: ", "initramfs")
|
|
||||||
retCode = 1
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := initramfsAr.Write(filepath.Join(workDir, "initramfs"), os.FileMode(0644)); err != nil {
|
if err := initramfsAr.Write(filepath.Join(workDir, "initramfs"), os.FileMode(0644)); err != nil {
|
||||||
log.Println(err)
|
log.Println(err)
|
||||||
log.Println("failed to generate: ", "initramfs")
|
log.Println("failed to generate: ", "initramfs")
|
||||||
@@ -151,31 +120,37 @@ func main() {
|
|||||||
}
|
}
|
||||||
misc.TimeFunc(start, "initramfs")
|
misc.TimeFunc(start, "initramfs")
|
||||||
|
|
||||||
if devinfo.CreateInitfsExtra {
|
//
|
||||||
//
|
// initramfs-extra
|
||||||
// initramfs-extra
|
//
|
||||||
//
|
// deviceinfo.InitfsExtraCompression needs a little more post-processing
|
||||||
// deviceinfo.InitfsExtraCompression needs a little more post-processing
|
compressionFormat, compressionLevel = archive.ExtractFormatLevel(devinfo.InitfsExtraCompression)
|
||||||
compressionFormat, compressionLevel = archive.ExtractFormatLevel(devinfo.InitfsExtraCompression)
|
log.Printf("== Generating %s ==\n", "initramfs-extra")
|
||||||
log.Printf("== Generating %s ==\n", "initramfs-extra")
|
log.Printf("- Using compression format %s with level %q\n", compressionFormat, compressionLevel)
|
||||||
log.Printf("- Using compression format %s with level %q\n", compressionFormat, compressionLevel)
|
|
||||||
|
|
||||||
start = time.Now()
|
start = time.Now()
|
||||||
initramfsExtraAr := archive.New(compressionFormat, compressionLevel)
|
initramfsExtraAr := archive.New(compressionFormat, compressionLevel)
|
||||||
if err := initramfsExtraAr.AddItemsExclude(initfsExtra, initfs); err != nil {
|
initfsExtra := initramfs.New([]filelist.FileLister{
|
||||||
log.Println(err)
|
hookfiles.New("/usr/share/mkinitfs/files-extra"),
|
||||||
log.Println("failed to generate: ", "initramfs-extra")
|
hookfiles.New("/etc/mkinitfs/files-extra"),
|
||||||
retCode = 1
|
hookscripts.New("/usr/share/mkinitfs/hooks-extra", "/hooks-extra"),
|
||||||
return
|
hookscripts.New("/etc/mkinitfs/hooks-extra", "/hooks-extra"),
|
||||||
}
|
modules.New("/usr/share/mkinitfs/modules-extra"),
|
||||||
if err := initramfsExtraAr.Write(filepath.Join(workDir, "initramfs-extra"), os.FileMode(0644)); err != nil {
|
modules.New("/etc/mkinitfs/modules-extra"),
|
||||||
log.Println(err)
|
})
|
||||||
log.Println("failed to generate: ", "initramfs-extra")
|
if err := initramfsExtraAr.AddItemsExclude(initfsExtra, initfs); err != nil {
|
||||||
retCode = 1
|
log.Println(err)
|
||||||
return
|
log.Println("failed to generate: ", "initramfs-extra")
|
||||||
}
|
retCode = 1
|
||||||
misc.TimeFunc(start, "initramfs-extra")
|
return
|
||||||
}
|
}
|
||||||
|
if err := initramfsExtraAr.Write(filepath.Join(workDir, "initramfs-extra"), os.FileMode(0644)); err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
log.Println("failed to generate: ", "initramfs-extra")
|
||||||
|
retCode = 1
|
||||||
|
return
|
||||||
|
}
|
||||||
|
misc.TimeFunc(start, "initramfs-extra")
|
||||||
|
|
||||||
// Final processing of initramfs / kernel is done by boot-deploy
|
// Final processing of initramfs / kernel is done by boot-deploy
|
||||||
if !disableBootDeploy {
|
if !disableBootDeploy {
|
||||||
|
@@ -42,7 +42,6 @@ mkinitfs reads deviceinfo values from */usr/share/deviceinfo/deviceinfo* and
|
|||||||
*/etc/deviceinfo*, in that order. The following variables
|
*/etc/deviceinfo*, in that order. The following variables
|
||||||
are *required* by mkinitfs:
|
are *required* by mkinitfs:
|
||||||
|
|
||||||
- deviceinfo_create_initfs_extra
|
|
||||||
- deviceinfo_generate_systemd_boot
|
- deviceinfo_generate_systemd_boot
|
||||||
- deviceinfo_initfs_compression
|
- deviceinfo_initfs_compression
|
||||||
- deviceinfo_initfs_extra_compression
|
- deviceinfo_initfs_extra_compression
|
||||||
@@ -134,8 +133,7 @@ create/manage. mkinitfs reads configuration from */usr/share/mkinitfs* first, an
|
|||||||
skipped.
|
skipped.
|
||||||
|
|
||||||
## /usr/share/mkinitfs/hooks, /etc/mkinitfs/hooks
|
## /usr/share/mkinitfs/hooks, /etc/mkinitfs/hooks
|
||||||
## /usr/share/mkinitfs/hooks-cleanup, /etc/mkinitfs/hooks-cleanup
|
## /usr/share/mkinitfs/hooks-extra*, /etc/mkinitfs/hooks-extra
|
||||||
## /usr/share/mkinitfs/hooks-extra, /etc/mkinitfs/hooks-extra
|
|
||||||
|
|
||||||
Any files listed under these directories are copied as-is into the
|
Any files listed under these directories are copied as-is into the
|
||||||
relevant archives. Hooks are generally script files, but how they are
|
relevant archives. Hooks are generally script files, but how they are
|
||||||
@@ -148,7 +146,7 @@ create/manage. mkinitfs reads configuration from */usr/share/mkinitfs* first, an
|
|||||||
## /usr/share/mkinitfs/modules, /etc/mkinitfs/modules
|
## /usr/share/mkinitfs/modules, /etc/mkinitfs/modules
|
||||||
## /usr/share/mkinitfs/modules-extra, /etc/mkinitfs/modules-extra
|
## /usr/share/mkinitfs/modules-extra, /etc/mkinitfs/modules-extra
|
||||||
|
|
||||||
Files with the *.modules* extension in these directories are lists of
|
Files with the *.modules* extention in these directories are lists of
|
||||||
kernel modules to include in the initramfs. Individual modules and
|
kernel modules to include in the initramfs. Individual modules and
|
||||||
directories can be listed in the files here. Globbing is also supported.
|
directories can be listed in the files here. Globbing is also supported.
|
||||||
|
|
||||||
|
10
go.mod
10
go.mod
@@ -7,13 +7,5 @@ require (
|
|||||||
github.com/klauspost/compress v1.15.12
|
github.com/klauspost/compress v1.15.12
|
||||||
github.com/pierrec/lz4/v4 v4.1.17
|
github.com/pierrec/lz4/v4 v4.1.17
|
||||||
github.com/ulikunitz/xz v0.5.10
|
github.com/ulikunitz/xz v0.5.10
|
||||||
golang.org/x/sys v0.18.0
|
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c
|
||||||
)
|
|
||||||
|
|
||||||
require (
|
|
||||||
github.com/mvdan/sh v2.6.4+incompatible // indirect
|
|
||||||
golang.org/x/crypto v0.21.0 // indirect
|
|
||||||
golang.org/x/sync v0.6.0 // indirect
|
|
||||||
golang.org/x/term v0.18.0 // indirect
|
|
||||||
mvdan.cc/sh v2.6.4+incompatible // indirect
|
|
||||||
)
|
)
|
||||||
|
12
go.sum
12
go.sum
@@ -2,21 +2,9 @@ github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e h1:hHg27A0RS
|
|||||||
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e/go.mod h1:oDpT4efm8tSYHXV5tHSdRvBet/b/QzxZ+XyyPehvm3A=
|
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e/go.mod h1:oDpT4efm8tSYHXV5tHSdRvBet/b/QzxZ+XyyPehvm3A=
|
||||||
github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM=
|
github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM=
|
||||||
github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
|
github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
|
||||||
github.com/mvdan/sh v2.6.4+incompatible h1:D4oEWW0J8cL7zeQkrXw76IAYXF0mJfDaBwjgzmKb6zs=
|
|
||||||
github.com/mvdan/sh v2.6.4+incompatible/go.mod h1:kipHzrJQZEDCMTNRVRAlMMFjqHEYrthfIlFkJSrmDZE=
|
|
||||||
github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc=
|
github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc=
|
||||||
github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||||
github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
|
github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
|
||||||
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||||
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
|
||||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
|
||||||
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
|
||||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
|
||||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
|
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
|
||||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
|
|
||||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
|
||||||
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
|
|
||||||
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
|
||||||
mvdan.cc/sh v2.6.4+incompatible h1:eD6tDeh0pw+/TOTI1BBEryZ02rD2nMcFsgcvde7jffM=
|
|
||||||
mvdan.cc/sh v2.6.4+incompatible/go.mod h1:IeeQbZq+x2SUGBensq/jge5lLQbS3XT2ktyp3wrt4x8=
|
|
||||||
|
@@ -237,10 +237,7 @@ func (archive *Archive) AddItemsExclude(flister filelist.FileLister, exclude fil
|
|||||||
|
|
||||||
// Adds the given file or directory at "source" to the archive at "dest"
|
// Adds the given file or directory at "source" to the archive at "dest"
|
||||||
func (archive *Archive) AddItem(source string, dest string) error {
|
func (archive *Archive) AddItem(source string, dest string) error {
|
||||||
if osutil.HasMergedUsr() {
|
|
||||||
source = osutil.MergeUsr(source)
|
|
||||||
dest = osutil.MergeUsr(dest)
|
|
||||||
}
|
|
||||||
sourceStat, err := os.Lstat(source)
|
sourceStat, err := os.Lstat(source)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e, ok := err.(*os.PathError)
|
e, ok := err.(*os.PathError)
|
||||||
@@ -251,12 +248,6 @@ func (archive *Archive) AddItem(source string, dest string) error {
|
|||||||
return fmt.Errorf("AddItem: failed to get stat for %q: %w", source, err)
|
return fmt.Errorf("AddItem: failed to get stat for %q: %w", source, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// A symlink to a directory doesn't have the os.ModeDir bit set, so we need
|
|
||||||
// to check if it's a symlink first
|
|
||||||
if sourceStat.Mode()&os.ModeSymlink != 0 {
|
|
||||||
return archive.addSymlink(source, dest)
|
|
||||||
}
|
|
||||||
|
|
||||||
if sourceStat.Mode()&os.ModeDir != 0 {
|
if sourceStat.Mode()&os.ModeDir != 0 {
|
||||||
return archive.addDir(dest)
|
return archive.addDir(dest)
|
||||||
}
|
}
|
||||||
@@ -264,45 +255,6 @@ func (archive *Archive) AddItem(source string, dest string) error {
|
|||||||
return archive.addFile(source, dest)
|
return archive.addFile(source, dest)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (archive *Archive) addSymlink(source string, dest string) error {
|
|
||||||
target, err := os.Readlink(source)
|
|
||||||
if err != nil {
|
|
||||||
log.Print("addSymlink: failed to get symlink target for: ", source)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure we pick up the symlink target too
|
|
||||||
targetAbs := target
|
|
||||||
if filepath.Dir(target) == "." {
|
|
||||||
// relative symlink, make it absolute so we can add the target to the archive
|
|
||||||
targetAbs = filepath.Join(filepath.Dir(source), target)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !filepath.IsAbs(targetAbs) {
|
|
||||||
targetAbs, err = osutil.RelativeSymlinkTargetToDir(targetAbs, filepath.Dir(source))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
archive.AddItem(targetAbs, targetAbs)
|
|
||||||
|
|
||||||
// Now add the symlink itself
|
|
||||||
destFilename := strings.TrimPrefix(dest, "/")
|
|
||||||
|
|
||||||
archive.items.add(archiveItem{
|
|
||||||
sourcePath: source,
|
|
||||||
header: &cpio.Header{
|
|
||||||
Name: destFilename,
|
|
||||||
Linkname: target,
|
|
||||||
Mode: 0644 | cpio.ModeSymlink,
|
|
||||||
Size: int64(len(target)),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (archive *Archive) addFile(source string, dest string) error {
|
func (archive *Archive) addFile(source string, dest string) error {
|
||||||
if err := archive.addDir(filepath.Dir(dest)); err != nil {
|
if err := archive.addDir(filepath.Dir(dest)); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -314,6 +266,42 @@ func (archive *Archive) addFile(source string, dest string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Symlink: write symlink to archive then set 'file' to link target
|
||||||
|
if sourceStat.Mode()&os.ModeSymlink != 0 {
|
||||||
|
// log.Printf("File %q is a symlink", file)
|
||||||
|
target, err := os.Readlink(source)
|
||||||
|
if err != nil {
|
||||||
|
log.Print("addFile: failed to get symlink target: ", source)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
destFilename := strings.TrimPrefix(dest, "/")
|
||||||
|
|
||||||
|
archive.items.add(archiveItem{
|
||||||
|
sourcePath: source,
|
||||||
|
header: &cpio.Header{
|
||||||
|
Name: destFilename,
|
||||||
|
Linkname: target,
|
||||||
|
Mode: 0644 | cpio.ModeSymlink,
|
||||||
|
Size: int64(len(target)),
|
||||||
|
// Checksum: 1,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
if filepath.Dir(target) == "." {
|
||||||
|
target = filepath.Join(filepath.Dir(source), target)
|
||||||
|
}
|
||||||
|
// make sure target is an absolute path
|
||||||
|
if !filepath.IsAbs(target) {
|
||||||
|
target, err = osutil.RelativeSymlinkTargetToDir(target, filepath.Dir(source))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = archive.addFile(target, target)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
destFilename := strings.TrimPrefix(dest, "/")
|
destFilename := strings.TrimPrefix(dest, "/")
|
||||||
|
|
||||||
archive.items.add(archiveItem{
|
archive.items.add(archiveItem{
|
||||||
@@ -416,12 +404,6 @@ func (archive *Archive) writeCompressed(path string, mode os.FileMode) (err erro
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (archive *Archive) writeCpio() error {
|
func (archive *Archive) writeCpio() error {
|
||||||
// Just in case
|
|
||||||
if osutil.HasMergedUsr() {
|
|
||||||
archive.addSymlink("/bin", "/bin")
|
|
||||||
archive.addSymlink("/sbin", "/sbin")
|
|
||||||
archive.addSymlink("/lib", "/lib")
|
|
||||||
}
|
|
||||||
// having a transient function for actually adding files to the archive
|
// having a transient function for actually adding files to the archive
|
||||||
// allows the deferred fd.close to run after every copy and prevent having
|
// allows the deferred fd.close to run after every copy and prevent having
|
||||||
// tons of open file handles until the copying is all done
|
// tons of open file handles until the copying is all done
|
||||||
@@ -436,19 +418,19 @@ func (archive *Archive) writeCpio() error {
|
|||||||
if header.Mode.IsRegular() {
|
if header.Mode.IsRegular() {
|
||||||
fd, err := os.Open(source)
|
fd, err := os.Open(source)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("archive.writeCpio: Unable to open file %q, %w", source, err)
|
return fmt.Errorf("archive.writeCpio: uname to open file %q, %w", source, err)
|
||||||
}
|
}
|
||||||
defer fd.Close()
|
defer fd.Close()
|
||||||
if _, err := io.Copy(archive.cpioWriter, fd); err != nil {
|
if _, err := io.Copy(archive.cpioWriter, fd); err != nil {
|
||||||
return fmt.Errorf("archive.writeCpio: Couldn't process %q: %w", source, err)
|
return fmt.Errorf("archive.writeCpio: unable to write out archive: %w", err)
|
||||||
}
|
}
|
||||||
} else if header.Linkname != "" {
|
} else if header.Linkname != "" {
|
||||||
// the contents of a symlink is just need the link name
|
// the contents of a symlink is just need the link name
|
||||||
if _, err := archive.cpioWriter.Write([]byte(header.Linkname)); err != nil {
|
if _, err := archive.cpioWriter.Write([]byte(header.Linkname)); err != nil {
|
||||||
return fmt.Errorf("archive.writeCpio: unable to write out symlink: %q -> %q: %w", source, header.Linkname, err)
|
return fmt.Errorf("archive.writeCpio: unable to write out symlink: %w", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("archive.writeCpio: unknown type for file: %q: %d", source, header.Mode)
|
return fmt.Errorf("archive.writeCpio: unknown type for file: %s", source)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -78,17 +78,12 @@ func (b *BootDeploy) Run() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// boot-deploy -i initramfs -k vmlinuz-postmarketos-rockchip -d /tmp/cpio -o /tmp/foo initramfs-extra
|
// boot-deploy -i initramfs -k vmlinuz-postmarketos-rockchip -d /tmp/cpio -o /tmp/foo initramfs-extra
|
||||||
args := []string{
|
cmd := exec.Command("boot-deploy",
|
||||||
"-i", "initramfs",
|
"-i", "initramfs",
|
||||||
"-k", kernFilename,
|
"-k", kernFilename,
|
||||||
"-d", b.inDir,
|
"-d", b.inDir,
|
||||||
"-o", b.outDir,
|
"-o", b.outDir,
|
||||||
}
|
"initramfs-extra")
|
||||||
|
|
||||||
if b.devinfo.CreateInitfsExtra {
|
|
||||||
args = append(args, "initramfs-extra")
|
|
||||||
}
|
|
||||||
cmd := exec.Command("boot-deploy", args...)
|
|
||||||
|
|
||||||
cmd.Stdout = os.Stdout
|
cmd.Stdout = os.Stdout
|
||||||
cmd.Stderr = os.Stderr
|
cmd.Stderr = os.Stderr
|
||||||
|
@@ -44,7 +44,7 @@ func (h *HookDirs) List() (*filelist.FileList, error) {
|
|||||||
|
|
||||||
s := bufio.NewScanner(f)
|
s := bufio.NewScanner(f)
|
||||||
for s.Scan() {
|
for s.Scan() {
|
||||||
dir := strings.TrimSpace(s.Text())
|
dir := s.Text()
|
||||||
if len(dir) == 0 || strings.HasPrefix(dir, "#") {
|
if len(dir) == 0 || strings.HasPrefix(dir, "#") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@@ -11,7 +11,6 @@ import (
|
|||||||
|
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
|
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
|
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/osutil"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type HookFiles struct {
|
type HookFiles struct {
|
||||||
@@ -59,15 +58,12 @@ func slurpFiles(fd io.Reader) (*filelist.FileList, error) {
|
|||||||
|
|
||||||
s := bufio.NewScanner(fd)
|
s := bufio.NewScanner(fd)
|
||||||
for s.Scan() {
|
for s.Scan() {
|
||||||
line := strings.TrimSpace(s.Text())
|
line := s.Text()
|
||||||
if len(line) == 0 || strings.HasPrefix(line, "#") {
|
if len(line) == 0 || strings.HasPrefix(line, "#") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
src, dest, has_dest := strings.Cut(line, ":")
|
src, dest, has_dest := strings.Cut(line, ":")
|
||||||
if osutil.HasMergedUsr() {
|
|
||||||
src = osutil.MergeUsr(src)
|
|
||||||
}
|
|
||||||
|
|
||||||
fFiles, err := misc.GetFiles([]string{src}, true)
|
fFiles, err := misc.GetFiles([]string{src}, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@@ -33,14 +33,8 @@ func (m *Modules) List() (*filelist.FileList, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
files := filelist.NewFileList()
|
files := filelist.NewFileList()
|
||||||
libDir := "/usr/lib/modules"
|
|
||||||
if exists, err := misc.Exists(libDir); !exists {
|
|
||||||
libDir = "/lib/modules"
|
|
||||||
} else if err != nil {
|
|
||||||
return nil, fmt.Errorf("received unexpected error when getting status for %q: %w", libDir, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
modDir := filepath.Join(libDir, kernVer)
|
modDir := filepath.Join("/lib/modules", kernVer)
|
||||||
if exists, err := misc.Exists(modDir); !exists {
|
if exists, err := misc.Exists(modDir); !exists {
|
||||||
// dir /lib/modules/<kernel> if kernel built without module support, so just print a message
|
// dir /lib/modules/<kernel> if kernel built without module support, so just print a message
|
||||||
log.Printf("-- kernel module directory not found: %q, not including modules", modDir)
|
log.Printf("-- kernel module directory not found: %q, not including modules", modDir)
|
||||||
@@ -83,7 +77,7 @@ func slurpModules(fd io.Reader, modDir string) (*filelist.FileList, error) {
|
|||||||
files := filelist.NewFileList()
|
files := filelist.NewFileList()
|
||||||
s := bufio.NewScanner(fd)
|
s := bufio.NewScanner(fd)
|
||||||
for s.Scan() {
|
for s.Scan() {
|
||||||
line := strings.TrimSpace(s.Text())
|
line := s.Text()
|
||||||
if len(line) == 0 || strings.HasPrefix(line, "#") {
|
if len(line) == 0 || strings.HasPrefix(line, "#") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -103,8 +97,8 @@ func slurpModules(fd io.Reader, modDir string) (*filelist.FileList, error) {
|
|||||||
}
|
}
|
||||||
} else if dir == "" {
|
} else if dir == "" {
|
||||||
// item is a module name
|
// item is a module name
|
||||||
if modFilelist, err := getModule(line, modDir); err != nil {
|
if modFilelist, err := getModule(s.Text(), modDir); err != nil {
|
||||||
return nil, fmt.Errorf("unable to get module file %q: %w", line, err)
|
return nil, fmt.Errorf("unable to get module file %q: %w", s.Text(), err)
|
||||||
} else {
|
} else {
|
||||||
for _, file := range modFilelist {
|
for _, file := range modFilelist {
|
||||||
files.Add(file, file)
|
files.Add(file, file)
|
||||||
@@ -188,7 +182,7 @@ func getModuleDeps(modName string, modulesDep io.Reader) ([]string, error) {
|
|||||||
|
|
||||||
s := bufio.NewScanner(modulesDep)
|
s := bufio.NewScanner(modulesDep)
|
||||||
for s.Scan() {
|
for s.Scan() {
|
||||||
line := strings.TrimSpace(s.Text())
|
line := s.Text()
|
||||||
if len(line) == 0 || strings.HasPrefix(line, "#") {
|
if len(line) == 0 || strings.HasPrefix(line, "#") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@@ -18,7 +18,6 @@ func TestStripExts(t *testing.T) {
|
|||||||
{"another_file", "another_file"},
|
{"another_file", "another_file"},
|
||||||
{"a.b.c.d.e.f.g.h.i", "a"},
|
{"a.b.c.d.e.f.g.h.i", "a"},
|
||||||
{"virtio_blk.ko", "virtio_blk"},
|
{"virtio_blk.ko", "virtio_blk"},
|
||||||
{"virtio_blk.ko ", "virtio_blk"},
|
|
||||||
}
|
}
|
||||||
for _, table := range tables {
|
for _, table := range tables {
|
||||||
out := stripExts(table.in)
|
out := stripExts(table.in)
|
||||||
|
@@ -3,9 +3,10 @@ package misc
|
|||||||
import (
|
import (
|
||||||
"debug/elf"
|
"debug/elf"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/fs"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"log"
|
||||||
|
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/osutil"
|
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/osutil"
|
||||||
)
|
)
|
||||||
@@ -23,57 +24,51 @@ func GetFiles(list []string, required bool) (files []string, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFile(file string, required bool) (files []string, err error) {
|
// This function doesn't handle globs, use getFile() instead.
|
||||||
// Expand glob expression
|
func getFileNormalized(file string, required bool) (files []string, err error) {
|
||||||
expanded, err := filepath.Glob(file)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(expanded) > 0 && expanded[0] != file {
|
|
||||||
for _, path := range expanded {
|
|
||||||
if globFiles, err := getFile(path, required); err != nil {
|
|
||||||
return files, err
|
|
||||||
} else {
|
|
||||||
files = append(files, globFiles...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return RemoveDuplicates(files), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the file is a symlink we need to do this to prevent an infinite recursion
|
|
||||||
// loop:
|
|
||||||
// Symlinks need special handling to prevent infinite recursion:
|
|
||||||
// 1) add the symlink to the list of files
|
|
||||||
// 2) set file to dereferenced target
|
|
||||||
// 4) continue this function to either walk it if the target is a dir or add the
|
|
||||||
// target to the list of files
|
|
||||||
if s, err := os.Lstat(file); err == nil {
|
|
||||||
if s.Mode()&fs.ModeSymlink != 0 {
|
|
||||||
files = append(files, file)
|
|
||||||
if target, err := filepath.EvalSymlinks(file); err != nil {
|
|
||||||
return files, err
|
|
||||||
} else {
|
|
||||||
file = target
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fileInfo, err := os.Stat(file)
|
fileInfo, err := os.Stat(file)
|
||||||
|
|
||||||
|
// Trying some fallbacks...
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Check if there is a Zstd-compressed version of the file
|
type triedResult struct {
|
||||||
fileZstd := file + ".zst" // .zst is the extension used by linux-firmware
|
file string
|
||||||
fileInfoZstd, errZstd := os.Stat(fileZstd)
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
if errZstd == nil {
|
triedFiles := make([]triedResult, 0, 1)
|
||||||
file = fileZstd
|
|
||||||
fileInfo = fileInfoZstd
|
// Temporary fallback until alpine/pmOS usr-merge happened
|
||||||
// Unset nil so we don't retain the error from the os.Stat call for the uncompressed version.
|
// If a path starts with /bin or /sbin, also try /usr equivalent before giving up
|
||||||
err = nil
|
if strings.HasPrefix(file, "/bin/") || strings.HasPrefix(file, "/sbin/") {
|
||||||
} else {
|
fileUsr := filepath.Join("/usr", file)
|
||||||
if required {
|
_, err := os.Stat(fileUsr);
|
||||||
return files, fmt.Errorf("getFile: failed to stat file %q: %w (also tried %q: %w)", file, err, fileZstd, errZstd)
|
if err == nil {
|
||||||
|
log.Printf("getFile: failed to find %q, but found it in %q. Please adjust the path.", file, fileUsr)
|
||||||
|
return getFileNormalized(fileUsr, required)
|
||||||
|
} else {
|
||||||
|
triedFiles = append(triedFiles, triedResult{fileUsr, err})
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// Check if there is a Zstd-compressed version of the file
|
||||||
|
fileZstd := file + ".zst" // .zst is the extension used by linux-firmware
|
||||||
|
_, err := os.Stat(fileZstd);
|
||||||
|
if err == nil {
|
||||||
|
return getFileNormalized(fileZstd, required)
|
||||||
|
} else {
|
||||||
|
triedFiles = append(triedFiles, triedResult{fileZstd, err})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Failed to find anything
|
||||||
|
if required {
|
||||||
|
failStrings := make([]string, 0, 2)
|
||||||
|
for _, result := range triedFiles {
|
||||||
|
failStrings = append(failStrings, fmt.Sprintf("\n - also tried %q: %v", result.file, result.err))
|
||||||
|
}
|
||||||
|
return files, fmt.Errorf("getFile: failed to stat file %q: %v%q", file, err, strings.Join(failStrings, ""))
|
||||||
|
} else {
|
||||||
return files, nil
|
return files, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -114,6 +109,26 @@ func getFile(file string, required bool) (files []string, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getFile(file string, required bool) (files []string, err error) {
|
||||||
|
// Expand glob expression
|
||||||
|
expanded, err := filepath.Glob(file)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(expanded) > 0 && expanded[0] != file {
|
||||||
|
for _, path := range expanded {
|
||||||
|
if globFiles, err := getFile(path, required); err != nil {
|
||||||
|
return files, err
|
||||||
|
} else {
|
||||||
|
files = append(files, globFiles...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return RemoveDuplicates(files), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return getFileNormalized(file, required)
|
||||||
|
}
|
||||||
|
|
||||||
func getDeps(file string, parents map[string]struct{}) (files []string, err error) {
|
func getDeps(file string, parents map[string]struct{}) (files []string, err error) {
|
||||||
|
|
||||||
if _, found := parents[file]; found {
|
if _, found := parents[file]; found {
|
||||||
@@ -139,7 +154,6 @@ func getDeps(file string, parents map[string]struct{}) (files []string, err erro
|
|||||||
"/usr/lib",
|
"/usr/lib",
|
||||||
"/lib",
|
"/lib",
|
||||||
"/usr/lib/expect*",
|
"/usr/lib/expect*",
|
||||||
"/usr/lib/systemd",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, lib := range libs {
|
for _, lib := range libs {
|
||||||
|
@@ -1,167 +0,0 @@
|
|||||||
// Copyright 2025 Clayton Craft <clayton@craftyguy.net>
|
|
||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
package misc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"reflect"
|
|
||||||
"sort"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestGetFile(t *testing.T) {
|
|
||||||
subtests := []struct {
|
|
||||||
name string
|
|
||||||
setup func(tmpDir string) (inputPath string, expectedFiles []string, err error)
|
|
||||||
required bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "symlink to directory - no infinite recursion",
|
|
||||||
setup: func(tmpDir string) (string, []string, error) {
|
|
||||||
// Create target directory with files
|
|
||||||
targetDir := filepath.Join(tmpDir, "target")
|
|
||||||
if err := os.MkdirAll(targetDir, 0755); err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
testFile1 := filepath.Join(targetDir, "file1.txt")
|
|
||||||
testFile2 := filepath.Join(targetDir, "file2.txt")
|
|
||||||
if err := os.WriteFile(testFile1, []byte("content1"), 0644); err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
if err := os.WriteFile(testFile2, []byte("content2"), 0644); err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create symlink pointing to target directory
|
|
||||||
symlinkPath := filepath.Join(tmpDir, "symlink")
|
|
||||||
if err := os.Symlink(targetDir, symlinkPath); err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
expected := []string{symlinkPath, testFile1, testFile2}
|
|
||||||
return symlinkPath, expected, nil
|
|
||||||
},
|
|
||||||
required: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "symlink to file - returns both symlink and target",
|
|
||||||
setup: func(tmpDir string) (string, []string, error) {
|
|
||||||
// Create target file
|
|
||||||
targetFile := filepath.Join(tmpDir, "target.txt")
|
|
||||||
if err := os.WriteFile(targetFile, []byte("content"), 0644); err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create symlink pointing to target file
|
|
||||||
symlinkPath := filepath.Join(tmpDir, "symlink.txt")
|
|
||||||
if err := os.Symlink(targetFile, symlinkPath); err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
expected := []string{symlinkPath, targetFile}
|
|
||||||
return symlinkPath, expected, nil
|
|
||||||
},
|
|
||||||
required: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "regular file",
|
|
||||||
setup: func(tmpDir string) (string, []string, error) {
|
|
||||||
regularFile := filepath.Join(tmpDir, "regular.txt")
|
|
||||||
if err := os.WriteFile(regularFile, []byte("content"), 0644); err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
expected := []string{regularFile}
|
|
||||||
return regularFile, expected, nil
|
|
||||||
},
|
|
||||||
required: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "regular directory",
|
|
||||||
setup: func(tmpDir string) (string, []string, error) {
|
|
||||||
// Create directory with files
|
|
||||||
dirPath := filepath.Join(tmpDir, "testdir")
|
|
||||||
if err := os.MkdirAll(dirPath, 0755); err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
file1 := filepath.Join(dirPath, "file1.txt")
|
|
||||||
file2 := filepath.Join(dirPath, "subdir", "file2.txt")
|
|
||||||
|
|
||||||
if err := os.WriteFile(file1, []byte("content1"), 0644); err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
if err := os.MkdirAll(filepath.Dir(file2), 0755); err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
if err := os.WriteFile(file2, []byte("content2"), 0644); err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
expected := []string{file1, file2}
|
|
||||||
return dirPath, expected, nil
|
|
||||||
},
|
|
||||||
required: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "zst compressed file fallback",
|
|
||||||
setup: func(tmpDir string) (string, []string, error) {
|
|
||||||
// Create a .zst file but NOT the original file
|
|
||||||
zstFile := filepath.Join(tmpDir, "firmware.bin.zst")
|
|
||||||
if err := os.WriteFile(zstFile, []byte("compressed content"), 0644); err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Request the original file (without .zst extension)
|
|
||||||
originalFile := filepath.Join(tmpDir, "firmware.bin")
|
|
||||||
|
|
||||||
// Expected: should find and return the .zst version
|
|
||||||
expected := []string{zstFile}
|
|
||||||
return originalFile, expected, nil
|
|
||||||
},
|
|
||||||
required: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, st := range subtests {
|
|
||||||
t.Run(st.name, func(t *testing.T) {
|
|
||||||
tmpDir := t.TempDir()
|
|
||||||
|
|
||||||
inputPath, expectedFiles, err := st.setup(tmpDir)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("setup failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add timeout protection for infinite recursion test
|
|
||||||
done := make(chan struct{})
|
|
||||||
var files []string
|
|
||||||
var getFileErr error
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
defer close(done)
|
|
||||||
files, getFileErr = getFile(inputPath, st.required)
|
|
||||||
}()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-done:
|
|
||||||
if getFileErr != nil {
|
|
||||||
t.Fatalf("getFile failed: %v", getFileErr)
|
|
||||||
}
|
|
||||||
case <-time.After(5 * time.Second):
|
|
||||||
t.Fatal("getFile appears to be in infinite recursion (timeout)")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort for comparison
|
|
||||||
sort.Strings(expectedFiles)
|
|
||||||
sort.Strings(files)
|
|
||||||
|
|
||||||
if !reflect.DeepEqual(expectedFiles, files) {
|
|
||||||
t.Fatalf("expected: %q, got: %q", expectedFiles, files)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
@@ -10,39 +10,6 @@ import (
|
|||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Try to guess whether the system has merged dirs under /usr
|
|
||||||
func HasMergedUsr() bool {
|
|
||||||
for _, dir := range []string{"/bin", "/lib"} {
|
|
||||||
stat, err := os.Lstat(dir)
|
|
||||||
if err != nil {
|
|
||||||
// TODO: probably because the dir doesn't exist... so
|
|
||||||
// should we assume that it's because the system has some weird
|
|
||||||
// implementation of "merge /usr"?
|
|
||||||
return true
|
|
||||||
} else if stat.Mode()&os.ModeSymlink == 0 {
|
|
||||||
// Not a symlink, so must not be merged /usr
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Converts given path to one supported by a merged /usr config.
|
|
||||||
// E.g., /bin/foo becomes /usr/bin/foo, /lib/bar becomes /usr/lib/bar
|
|
||||||
// See: https://www.freedesktop.org/wiki/Software/systemd/TheCaseForTheUsrMerge
|
|
||||||
func MergeUsr(file string) string {
|
|
||||||
|
|
||||||
// Prepend /usr to supported paths
|
|
||||||
for _, prefix := range []string{"/bin", "/sbin", "/lib", "/lib64"} {
|
|
||||||
if strings.HasPrefix(file, prefix) {
|
|
||||||
file = filepath.Join("/usr", file)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return file
|
|
||||||
}
|
|
||||||
|
|
||||||
// Converts a relative symlink target path (e.g. ../../lib/foo.so), that is
|
// Converts a relative symlink target path (e.g. ../../lib/foo.so), that is
|
||||||
// absolute path
|
// absolute path
|
||||||
func RelativeSymlinkTargetToDir(symPath string, dir string) (string, error) {
|
func RelativeSymlinkTargetToDir(symPath string, dir string) (string, error) {
|
||||||
|
@@ -1,49 +0,0 @@
|
|||||||
// Copyright 2024 Clayton Craft <clayton@craftyguy.net>
|
|
||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
package osutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestMergeUsr(t *testing.T) {
|
|
||||||
subtests := []struct {
|
|
||||||
in string
|
|
||||||
expected string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
in: "/bin/foo",
|
|
||||||
expected: "/usr/bin/foo",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
in: "/sbin/foo",
|
|
||||||
expected: "/usr/sbin/foo",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
in: "/usr/sbin/foo",
|
|
||||||
expected: "/usr/sbin/foo",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
in: "/usr/bin/foo",
|
|
||||||
expected: "/usr/bin/foo",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
in: "/lib/foo.so",
|
|
||||||
expected: "/usr/lib/foo.so",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
in: "/lib64/foo.so",
|
|
||||||
expected: "/usr/lib64/foo.so",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, st := range subtests {
|
|
||||||
t.Run(st.in, func(t *testing.T) {
|
|
||||||
out := MergeUsr(st.in)
|
|
||||||
if out != st.expected {
|
|
||||||
t.Fatalf("expected: %q, got: %q\n", st.expected, out)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
@@ -4,14 +4,14 @@
|
|||||||
package deviceinfo
|
package deviceinfo
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"bufio"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/mvdan/sh/shell"
|
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
|
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -20,8 +20,6 @@ type DeviceInfo struct {
|
|||||||
InitfsExtraCompression string
|
InitfsExtraCompression string
|
||||||
UbootBoardname string
|
UbootBoardname string
|
||||||
GenerateSystemdBoot string
|
GenerateSystemdBoot string
|
||||||
FormatVersion string
|
|
||||||
CreateInitfsExtra bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reads the relevant entries from "file" into DeviceInfo struct
|
// Reads the relevant entries from "file" into DeviceInfo struct
|
||||||
@@ -34,7 +32,13 @@ func (d *DeviceInfo) ReadDeviceinfo(file string) error {
|
|||||||
return fmt.Errorf("unexpected error getting status for %q: %s", file, err)
|
return fmt.Errorf("unexpected error getting status for %q: %s", file, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := d.unmarshal(file); err != nil {
|
fd, err := os.Open(file)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer fd.Close()
|
||||||
|
|
||||||
|
if err := d.unmarshal(fd); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -42,44 +46,53 @@ func (d *DeviceInfo) ReadDeviceinfo(file string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Unmarshals a deviceinfo into a DeviceInfo struct
|
// Unmarshals a deviceinfo into a DeviceInfo struct
|
||||||
func (d *DeviceInfo) unmarshal(file string) error {
|
func (d *DeviceInfo) unmarshal(r io.Reader) error {
|
||||||
ctx, cancelCtx := context.WithDeadline(context.Background(), time.Now().Add(5*time.Second))
|
s := bufio.NewScanner(r)
|
||||||
defer cancelCtx()
|
for s.Scan() {
|
||||||
vars, err := shell.SourceFile(ctx, file)
|
line := s.Text()
|
||||||
if err != nil {
|
if strings.HasPrefix(line, "#") {
|
||||||
return fmt.Errorf("parsing deviceinfo %q failed: %w", file, err)
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// line isn't setting anything, so just ignore it
|
||||||
|
if !strings.Contains(line, "=") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// sometimes line has a comment at the end after setting an option
|
||||||
|
line = strings.SplitN(line, "#", 2)[0]
|
||||||
|
line = strings.TrimSpace(line)
|
||||||
|
|
||||||
|
// must support having '=' in the value (e.g. kernel cmdline)
|
||||||
|
parts := strings.SplitN(line, "=", 2)
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return fmt.Errorf("error parsing deviceinfo line, invalid format: %s", line)
|
||||||
|
}
|
||||||
|
|
||||||
|
name, val := parts[0], parts[1]
|
||||||
|
val = strings.ReplaceAll(val, "\"", "")
|
||||||
|
|
||||||
|
if name == "deviceinfo_format_version" && val != "0" {
|
||||||
|
return fmt.Errorf("deviceinfo format version %q is not supported", val)
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldName := nameToField(name)
|
||||||
|
|
||||||
|
if fieldName == "" {
|
||||||
|
return fmt.Errorf("error parsing deviceinfo line, invalid format: %s", line)
|
||||||
|
}
|
||||||
|
|
||||||
for k, v := range vars {
|
|
||||||
fieldName := nameToField(k)
|
|
||||||
field := reflect.ValueOf(d).Elem().FieldByName(fieldName)
|
field := reflect.ValueOf(d).Elem().FieldByName(fieldName)
|
||||||
if !field.IsValid() {
|
if !field.IsValid() {
|
||||||
// an option that meets the deviceinfo "specification", but isn't
|
// an option that meets the deviceinfo "specification", but isn't
|
||||||
// one we care about in this module
|
// one we care about in this module
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
switch field.Interface().(type) {
|
field.SetString(val)
|
||||||
case string:
|
|
||||||
field.SetString(v.String())
|
|
||||||
case bool:
|
|
||||||
if v, err := strconv.ParseBool(v.String()); err != nil {
|
|
||||||
return fmt.Errorf("deviceinfo %q has unsupported type for field %q, expected 'bool'", file, k)
|
|
||||||
} else {
|
|
||||||
field.SetBool(v)
|
|
||||||
}
|
|
||||||
case int:
|
|
||||||
if v, err := strconv.ParseInt(v.String(), 10, 32); err != nil {
|
|
||||||
return fmt.Errorf("deviceinfo %q has unsupported type for field %q, expected 'int'", file, k)
|
|
||||||
} else {
|
|
||||||
field.SetInt(v)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("deviceinfo %q has unsupported type for field %q", file, k)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
if err := s.Err(); err != nil {
|
||||||
if d.FormatVersion != "0" {
|
log.Print("unable to parse deviceinfo: ", err)
|
||||||
return fmt.Errorf("deviceinfo %q has an unsupported format version %q", file, d.FormatVersion)
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -103,25 +116,3 @@ func nameToField(name string) string {
|
|||||||
|
|
||||||
return field
|
return field
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d DeviceInfo) String() string {
|
|
||||||
return fmt.Sprintf(`{
|
|
||||||
%s: %v
|
|
||||||
%s: %v
|
|
||||||
%s: %v
|
|
||||||
%s: %v
|
|
||||||
%s: %v
|
|
||||||
%s: %v
|
|
||||||
%s: %v
|
|
||||||
%s: %v
|
|
||||||
}`,
|
|
||||||
"deviceinfo_format_version", d.FormatVersion,
|
|
||||||
"deviceinfo_", d.FormatVersion,
|
|
||||||
"deviceinfo_initfs_compression", d.InitfsCompression,
|
|
||||||
"deviceinfo_initfs_extra_compression", d.InitfsCompression,
|
|
||||||
"deviceinfo_ubootBoardname", d.UbootBoardname,
|
|
||||||
"deviceinfo_generateSystemdBoot", d.GenerateSystemdBoot,
|
|
||||||
"deviceinfo_formatVersion", d.FormatVersion,
|
|
||||||
"deviceinfo_createInitfsExtra", d.CreateInitfsExtra,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
@@ -4,6 +4,8 @@
|
|||||||
package deviceinfo
|
package deviceinfo
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
@@ -42,7 +44,6 @@ func TestNameToField(t *testing.T) {
|
|||||||
{"modules_initfs", "ModulesInitfs"},
|
{"modules_initfs", "ModulesInitfs"},
|
||||||
{"deviceinfo_initfs_compression___", "InitfsCompression"},
|
{"deviceinfo_initfs_compression___", "InitfsCompression"},
|
||||||
{"deviceinfo_initfs_extra_compression", "InitfsExtraCompression"},
|
{"deviceinfo_initfs_extra_compression", "InitfsExtraCompression"},
|
||||||
{"deviceinfo_create_initfs_extra", "CreateInitfsExtra"},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, table := range tables {
|
for _, table := range tables {
|
||||||
@@ -58,25 +59,37 @@ func TestUnmarshal(t *testing.T) {
|
|||||||
tables := []struct {
|
tables := []struct {
|
||||||
// field is just used for reflection within the test, so it must be a
|
// field is just used for reflection within the test, so it must be a
|
||||||
// valid DeviceInfo field
|
// valid DeviceInfo field
|
||||||
file string
|
field string
|
||||||
expected DeviceInfo
|
in string
|
||||||
|
expected string
|
||||||
}{
|
}{
|
||||||
{"./test_resources/deviceinfo-unmarshal-1", DeviceInfo{
|
{"InitfsCompression", "deviceinfo_initfs_compression=\"gzip:-9\"\n", "gzip:-9"},
|
||||||
FormatVersion: "0",
|
// line with multiple '='
|
||||||
UbootBoardname: "foobar-bazz",
|
{"InitfsCompression", "deviceinfo_initfs_compression=zstd:--foo=1 -T0 --bar=bazz", "zstd:--foo=1 -T0 --bar=bazz"},
|
||||||
InitfsCompression: "zstd:--foo=1 -T0 --bar=bazz",
|
// empty option
|
||||||
InitfsExtraCompression: "",
|
{"InitfsCompression", "deviceinfo_initfs_compression=\"\"\n", ""},
|
||||||
CreateInitfsExtra: true,
|
// line with comment at the end
|
||||||
},
|
{"", "# this is a comment!\n", ""},
|
||||||
},
|
// empty lines are fine
|
||||||
|
{"", "", ""},
|
||||||
|
// line with whitepace characters only
|
||||||
|
{"", " \t \n\r", ""},
|
||||||
}
|
}
|
||||||
var d DeviceInfo
|
var d DeviceInfo
|
||||||
for _, table := range tables {
|
for _, table := range tables {
|
||||||
if err := d.unmarshal(table.file); err != nil {
|
testName := fmt.Sprintf("unmarshal::'%s':", strings.ReplaceAll(table.in, "\n", "\\n"))
|
||||||
t.Error(err)
|
if err := d.unmarshal(strings.NewReader(table.in)); err != nil {
|
||||||
|
t.Errorf("%s received an unexpected err: ", err)
|
||||||
}
|
}
|
||||||
if d != table.expected {
|
|
||||||
t.Errorf("expected: %s, got: %s", table.expected, d)
|
// Check against expected value
|
||||||
|
field := reflect.ValueOf(&d).Elem().FieldByName(table.field)
|
||||||
|
out := ""
|
||||||
|
if table.field != "" {
|
||||||
|
out = field.String()
|
||||||
|
}
|
||||||
|
if out != table.expected {
|
||||||
|
t.Errorf("%s expected: %q, got: %q", testName, table.expected, out)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1,3 +1,2 @@
|
|||||||
deviceinfo_format_version="0"
|
|
||||||
deviceinfo_initfs_compression="gz -9"
|
deviceinfo_initfs_compression="gz -9"
|
||||||
deviceinfo_mesa_driver="panfrost"
|
deviceinfo_mesa_driver="panfrost"
|
||||||
|
@@ -1,2 +1 @@
|
|||||||
deviceinfo_format_version="0"
|
deviceinfo_mesa_driver="msm"
|
||||||
deviceinfo_mesa_driver="msm"
|
|
@@ -1,7 +0,0 @@
|
|||||||
deviceinfo_format_version="0"
|
|
||||||
deviceinfo_uboot_boardname="foobar-bazz"
|
|
||||||
# line with multiple =
|
|
||||||
deviceinfo_initfs_compression="zstd:--foo=1 -T0 --bar=bazz"
|
|
||||||
# empty option
|
|
||||||
deviceinfo_initfs_extra_compression=""
|
|
||||||
deviceinfo_create_initfs_extra="true" # in-line comment that should be ignored
|
|
Reference in New Issue
Block a user